Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/compiler/OWNERS b/src/compiler/OWNERS
new file mode 100644
index 0000000..1257e23
--- /dev/null
+++ b/src/compiler/OWNERS
@@ -0,0 +1,7 @@
+set noparent
+
+bmeurer@chromium.org
+jarin@chromium.org
+mstarzinger@chromium.org
+mtrofin@chromium.org
+titzer@chromium.org
diff --git a/src/compiler/STYLE b/src/compiler/STYLE
new file mode 100644
index 0000000..ae41e3f
--- /dev/null
+++ b/src/compiler/STYLE
@@ -0,0 +1,29 @@
+Compiler Coding Style
+=====================
+
+Coding style for the TurboFan compiler generally follows the Google C++ Style
+Guide and the Chromium Coding Style. The notes below are usually just extensions
+beyond what the Google style guide already says. If this document doesn't
+mention a rule, follow the Google C++ style.
+
+
+TODOs
+-----
+We use the following convention for putting TODOs into the code:
+
+ * A TODO(turbofan) implies a performance improvement opportunity.
+ * A TODO(name) implies an incomplete implementation.
+
+
+Use of C++11 auto keyword
+-------------------------
+Use auto to avoid type names that are just clutter. Continue to use manifest
+type declarations when it helps readability, and never use auto for anything
+but local variables, in particular auto should only be used where it is obvious
+from context what the type is:
+
+ for (auto block : x->blocks()) // clearly a Block of some kind
+ for (auto instr : x->instructions()) // clearly an Instruction of some kind
+
+ for (auto b : x->predecessors()) // less clear, better to make it explicit
+ for (BasicBlock* b : x->predecessors()) // now clear
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index 8c8e530..ebd2789 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -3,6 +3,11 @@
// found in the LICENSE file.
#include "src/compiler/access-builder.h"
+
+#include "src/contexts.h"
+#include "src/frames.h"
+#include "src/heap/heap.h"
+#include "src/type-cache.h"
#include "src/types-inl.h"
namespace v8 {
@@ -11,64 +16,282 @@
// static
FieldAccess AccessBuilder::ForMap() {
- return {kTaggedBase, HeapObject::kMapOffset, MaybeHandle<Name>(), Type::Any(),
- kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
+ MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForHeapNumberValue() {
+ FieldAccess access = {kTaggedBase, HeapNumber::kValueOffset,
+ MaybeHandle<Name>(), TypeCache().Get().kFloat64,
+ MachineType::Float64()};
+ return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
- return {kTaggedBase, JSObject::kPropertiesOffset, MaybeHandle<Name>(),
- Type::Any(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
+ MaybeHandle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
+ return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- return {kTaggedBase, JSObject::kElementsOffset, MaybeHandle<Name>(),
- Type::Internal(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
+ MaybeHandle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
+ int index) {
+ int const offset = map->GetInObjectPropertyOffset(index);
+ FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(),
+ Type::Tagged(), MachineType::AnyTagged()};
+ return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionContext() {
- return {kTaggedBase, JSFunction::kContextOffset, MaybeHandle<Name>(),
- Type::Internal(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, JSFunction::kContextOffset,
+ MaybeHandle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
+ FieldAccess access = {kTaggedBase, JSFunction::kSharedFunctionInfoOffset,
+ Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
+ TypeCache const& type_cache = TypeCache::Get();
+ FieldAccess access = {kTaggedBase, JSArray::kLengthOffset, Handle<Name>(),
+ type_cache.kJSArrayLengthType,
+ MachineType::AnyTagged()};
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ access.type = type_cache.kFixedDoubleArrayLengthType;
+ } else if (IsFastElementsKind(elements_kind)) {
+ access.type = type_cache.kFixedArrayLengthType;
+ }
+ return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
- return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, MaybeHandle<Name>(),
- Type::UntaggedPointer(), kMachPtr};
+ FieldAccess access = {kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
+ MaybeHandle<Name>(), Type::UntaggedPointer(),
+ MachineType::Pointer()};
+ return access;
}
// static
-FieldAccess AccessBuilder::ForExternalArrayPointer() {
- return {kTaggedBase, ExternalArray::kExternalPointerOffset,
- MaybeHandle<Name>(), Type::UntaggedPointer(), kMachPtr};
+FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
+ FieldAccess access = {kTaggedBase, JSArrayBuffer::kBitFieldOffset,
+ MaybeHandle<Name>(), TypeCache::Get().kInt8,
+ MachineType::Int8()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
+ FieldAccess access = {kTaggedBase, JSArrayBufferView::kBufferOffset,
+ MaybeHandle<Name>(), Type::TaggedPointer(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
+ FieldAccess access = {
+ kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
+ MaybeHandle<Name>(), Type::Number(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSIteratorResultDone() {
+ FieldAccess access = {kTaggedBase, JSIteratorResult::kDoneOffset,
+ MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSIteratorResultValue() {
+ FieldAccess access = {kTaggedBase, JSIteratorResult::kValueOffset,
+ MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSRegExpFlags() {
+ FieldAccess access = {kTaggedBase, JSRegExp::kFlagsOffset,
+ MaybeHandle<Name>(), Type::Tagged(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSRegExpSource() {
+ FieldAccess access = {kTaggedBase, JSRegExp::kSourceOffset,
+ MaybeHandle<Name>(), Type::Tagged(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForFixedArrayLength() {
+ FieldAccess access = {
+ kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
+ TypeCache::Get().kFixedArrayLengthType, MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
+ FieldAccess access = {kTaggedBase, DescriptorArray::kEnumCacheOffset,
+ Handle<Name>(), Type::TaggedPointer(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
+ FieldAccess access = {
+ kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset, Handle<Name>(),
+ Type::TaggedPointer(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForMapBitField() {
+ FieldAccess access = {kTaggedBase, Map::kBitFieldOffset, Handle<Name>(),
+ TypeCache::Get().kUint8, MachineType::Uint8()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForMapBitField3() {
+ FieldAccess access = {kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
+ TypeCache::Get().kInt32, MachineType::Int32()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForMapDescriptors() {
+ FieldAccess access = {kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
+ Type::TaggedPointer(), MachineType::AnyTagged()};
+ return access;
}
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
- return {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
- Type::UntaggedUnsigned8(), kMachUint8};
+ FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
+ TypeCache::Get().kUint8, MachineType::Uint8()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForMapPrototype() {
+ FieldAccess access = {kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
+ Type::TaggedPointer(), MachineType::AnyTagged()};
+ return access;
}
// static
FieldAccess AccessBuilder::ForStringLength() {
- return {kTaggedBase, String::kLengthOffset, Handle<Name>(),
- Type::SignedSmall(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, String::kLengthOffset, Handle<Name>(),
+ TypeCache::Get().kStringLengthType,
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
+ FieldAccess access = {kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
+ Handle<Name>(), Type::Receiver(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
+ FieldAccess access = {kTaggedBase, JSGlobalObject::kNativeContextOffset,
+ Handle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
+ return access;
}
// static
FieldAccess AccessBuilder::ForValue() {
- return {kTaggedBase, JSValue::kValueOffset, Handle<Name>(), Type::Any(),
- kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
+ Type::Any(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForArgumentsLength() {
+ int offset =
+ JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForArgumentsCallee() {
+ int offset =
+ JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
+ int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
}
@@ -77,13 +300,47 @@
int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
DCHECK_EQ(offset,
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
- return {kTaggedBase, offset, Handle<Name>(), Type::Any(), kMachAnyTagged};
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForPropertyCellValue() {
+ return ForPropertyCellValue(Type::Tagged());
+}
+
+
+// static
+FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
+ FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
+ type, MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
+ FieldAccess access = {kTaggedBase, SharedFunctionInfo::kFeedbackVectorOffset,
+ Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ return access;
}
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
- return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
+ ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Tagged(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
+ ElementAccess access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
+ TypeCache::Get().kFloat64, MachineType::Float64()};
+ return access;
}
@@ -93,26 +350,59 @@
BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
int header_size = is_external ? 0 : FixedTypedArrayBase::kDataOffset;
switch (type) {
- case kExternalInt8Array:
- return {taggedness, header_size, Type::Signed32(), kMachInt8};
+ case kExternalInt8Array: {
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int8()};
+ return access;
+ }
case kExternalUint8Array:
- case kExternalUint8ClampedArray:
- return {taggedness, header_size, Type::Unsigned32(), kMachUint8};
- case kExternalInt16Array:
- return {taggedness, header_size, Type::Signed32(), kMachInt16};
- case kExternalUint16Array:
- return {taggedness, header_size, Type::Unsigned32(), kMachUint16};
- case kExternalInt32Array:
- return {taggedness, header_size, Type::Signed32(), kMachInt32};
- case kExternalUint32Array:
- return {taggedness, header_size, Type::Unsigned32(), kMachUint32};
- case kExternalFloat32Array:
- return {taggedness, header_size, Type::Number(), kMachFloat32};
- case kExternalFloat64Array:
- return {taggedness, header_size, Type::Number(), kMachFloat64};
+ case kExternalUint8ClampedArray: {
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint8()};
+ return access;
+ }
+ case kExternalInt16Array: {
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int16()};
+ return access;
+ }
+ case kExternalUint16Array: {
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint16()};
+ return access;
+ }
+ case kExternalInt32Array: {
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int32()};
+ return access;
+ }
+ case kExternalUint32Array: {
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint32()};
+ return access;
+ }
+ case kExternalFloat32Array: {
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ MachineType::Float32()};
+ return access;
+ }
+ case kExternalFloat64Array: {
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ MachineType::Float64()};
+ return access;
+ }
}
UNREACHABLE();
- return {kUntaggedBase, 0, Type::None(), kMachNone};
+ ElementAccess access = {kUntaggedBase, 0, Type::None(), MachineType::None()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForStatsCounter() {
+ FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
+ TypeCache::Get().kInt32, MachineType::Int32()};
+ return access;
}
} // namespace compiler
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index d6385e4..8375d37 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -12,47 +12,130 @@
namespace compiler {
// This access builder provides a set of static methods constructing commonly
-// used FieldAccess and ElementAccess descriptors. These descriptors server as
+// used FieldAccess and ElementAccess descriptors. These descriptors serve as
// parameters to simplified load/store operators.
-class AccessBuilder FINAL : public AllStatic {
+class AccessBuilder final : public AllStatic {
public:
+ // ===========================================================================
+ // Access to heap object fields and elements (based on tagged pointer).
+
// Provides access to HeapObject::map() field.
static FieldAccess ForMap();
+ // Provides access to HeapNumber::value() field.
+ static FieldAccess ForHeapNumberValue();
+
// Provides access to JSObject::properties() field.
static FieldAccess ForJSObjectProperties();
// Provides access to JSObject::elements() field.
static FieldAccess ForJSObjectElements();
+ // Provides access to JSObject inobject property fields.
+ static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
+
// Provides access to JSFunction::context() field.
static FieldAccess ForJSFunctionContext();
+ // Provides access to JSFunction::shared() field.
+ static FieldAccess ForJSFunctionSharedFunctionInfo();
+
+ // Provides access to JSArray::length() field.
+ static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
+
// Provides access to JSArrayBuffer::backing_store() field.
static FieldAccess ForJSArrayBufferBackingStore();
- // Provides access to ExternalArray::external_pointer() field.
- static FieldAccess ForExternalArrayPointer();
+ // Provides access to JSArrayBuffer::bit_field() field.
+ static FieldAccess ForJSArrayBufferBitField();
+
+ // Provides access to JSArrayBufferView::buffer() field.
+ static FieldAccess ForJSArrayBufferViewBuffer();
+
+ // Provides access to JSDate fields.
+ static FieldAccess ForJSDateField(JSDate::FieldIndex index);
+
+ // Provides access to JSIteratorResult::done() field.
+ static FieldAccess ForJSIteratorResultDone();
+
+ // Provides access to JSIteratorResult::value() field.
+ static FieldAccess ForJSIteratorResultValue();
+
+ // Provides access to JSRegExp::flags() field.
+ static FieldAccess ForJSRegExpFlags();
+
+ // Provides access to JSRegExp::source() field.
+ static FieldAccess ForJSRegExpSource();
+
+ // Provides access to FixedArray::length() field.
+ static FieldAccess ForFixedArrayLength();
+
+ // Provides access to DescriptorArray::enum_cache() field.
+ static FieldAccess ForDescriptorArrayEnumCache();
+
+ // Provides access to DescriptorArray::enum_cache_bridge_cache() field.
+ static FieldAccess ForDescriptorArrayEnumCacheBridgeCache();
+
+ // Provides access to Map::bit_field() byte.
+ static FieldAccess ForMapBitField();
+
+ // Provides access to Map::bit_field3() field.
+ static FieldAccess ForMapBitField3();
+
+ // Provides access to Map::descriptors() field.
+ static FieldAccess ForMapDescriptors();
// Provides access to Map::instance_type() field.
static FieldAccess ForMapInstanceType();
+ // Provides access to Map::prototype() field.
+ static FieldAccess ForMapPrototype();
+
// Provides access to String::length() field.
static FieldAccess ForStringLength();
+ // Provides access to JSGlobalObject::global_proxy() field.
+ static FieldAccess ForJSGlobalObjectGlobalProxy();
+
+ // Provides access to JSGlobalObject::native_context() field.
+ static FieldAccess ForJSGlobalObjectNativeContext();
+
// Provides access to JSValue::value() field.
static FieldAccess ForValue();
- // Provides access Context slots.
+ // Provides access to arguments object fields.
+ static FieldAccess ForArgumentsLength();
+ static FieldAccess ForArgumentsCallee();
+
+ // Provides access to FixedArray slots.
+ static FieldAccess ForFixedArraySlot(size_t index);
+
+ // Provides access to Context slots.
static FieldAccess ForContextSlot(size_t index);
+ // Provides access to PropertyCell::value() field.
+ static FieldAccess ForPropertyCellValue();
+ static FieldAccess ForPropertyCellValue(Type* type);
+
+ // Provides access to SharedFunctionInfo::feedback_vector() field.
+ static FieldAccess ForSharedFunctionInfoTypeFeedbackVector();
+
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
+ // Provides access to FixedDoubleArray elements.
+ static ElementAccess ForFixedDoubleArrayElement();
+
// Provides access to Fixed{type}TypedArray and External{type}Array elements.
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
+ // ===========================================================================
+ // Access to global per-isolate variables (based on external reference).
+
+ // Provides access to the backing store of a StatsCounter.
+ static FieldAccess ForStatsCounter();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
new file mode 100644
index 0000000..612170e
--- /dev/null
+++ b/src/compiler/access-info.cc
@@ -0,0 +1,488 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <ostream>
+
+#include "src/accessors.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-info.h"
+#include "src/field-index-inl.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-cache.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+bool CanInlineElementAccess(Handle<Map> map) {
+ if (!map->IsJSObjectMap()) return false;
+ if (map->is_access_check_needed()) return false;
+ if (map->has_indexed_interceptor()) return false;
+ ElementsKind const elements_kind = map->elements_kind();
+ if (IsFastElementsKind(elements_kind)) return true;
+ // TODO(bmeurer): Add support for other elements kind.
+ return false;
+}
+
+
+bool CanInlinePropertyAccess(Handle<Map> map) {
+ // We can inline property access to prototypes of all primitives, except
+ // the special Oddball ones that have no wrapper counterparts (i.e. Null,
+ // Undefined and TheHole).
+ STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_TYPE);
+ if (map->IsBooleanMap()) return true;
+ if (map->instance_type() < LAST_PRIMITIVE_TYPE) return true;
+ return map->IsJSObjectMap() && !map->is_dictionary_map() &&
+ !map->has_named_interceptor() &&
+ // TODO(verwaest): Whitelist contexts to which we have access.
+ !map->is_access_check_needed();
+}
+
+} // namespace
+
+
+std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
+ switch (access_mode) {
+ case AccessMode::kLoad:
+ return os << "Load";
+ case AccessMode::kStore:
+ return os << "Store";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+// static
+PropertyAccessInfo PropertyAccessInfo::NotFound(Type* receiver_type,
+ MaybeHandle<JSObject> holder) {
+ return PropertyAccessInfo(holder, receiver_type);
+}
+
+
+// static
+PropertyAccessInfo PropertyAccessInfo::DataConstant(
+ Type* receiver_type, Handle<Object> constant,
+ MaybeHandle<JSObject> holder) {
+ return PropertyAccessInfo(holder, constant, receiver_type);
+}
+
+
+// static
+PropertyAccessInfo PropertyAccessInfo::DataField(
+ Type* receiver_type, FieldIndex field_index, Type* field_type,
+ FieldCheck field_check, MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map) {
+ return PropertyAccessInfo(holder, transition_map, field_index, field_check,
+ field_type, receiver_type);
+}
+
+
+ElementAccessInfo::ElementAccessInfo() : receiver_type_(Type::None()) {}
+
+
+ElementAccessInfo::ElementAccessInfo(Type* receiver_type,
+ ElementsKind elements_kind,
+ MaybeHandle<JSObject> holder)
+ : elements_kind_(elements_kind),
+ holder_(holder),
+ receiver_type_(receiver_type) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo()
+ : kind_(kInvalid), receiver_type_(Type::None()), field_type_(Type::Any()) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ Type* receiver_type)
+ : kind_(kNotFound),
+ receiver_type_(receiver_type),
+ holder_(holder),
+ field_type_(Type::Any()) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ Handle<Object> constant,
+ Type* receiver_type)
+ : kind_(kDataConstant),
+ receiver_type_(receiver_type),
+ constant_(constant),
+ holder_(holder),
+ field_type_(Type::Any()) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map,
+ FieldIndex field_index,
+ FieldCheck field_check, Type* field_type,
+ Type* receiver_type)
+ : kind_(kDataField),
+ receiver_type_(receiver_type),
+ transition_map_(transition_map),
+ holder_(holder),
+ field_index_(field_index),
+ field_check_(field_check),
+ field_type_(field_type) {}
+
+
+AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
+ Handle<Context> native_context, Zone* zone)
+ : dependencies_(dependencies),
+ native_context_(native_context),
+ isolate_(native_context->GetIsolate()),
+ type_cache_(TypeCache::Get()),
+ zone_(zone) {
+ DCHECK(native_context->IsNativeContext());
+}
+
+
+bool AccessInfoFactory::ComputeElementAccessInfo(
+ Handle<Map> map, AccessMode access_mode, ElementAccessInfo* access_info) {
+ // Check if it is safe to inline element access for the {map}.
+ if (!CanInlineElementAccess(map)) return false;
+
+ ElementsKind const elements_kind = map->elements_kind();
+
+ // Certain (monomorphic) stores need a prototype chain check because shape
+ // changes could allow callbacks on elements in the chain that are not
+ // compatible with monomorphic keyed stores.
+ MaybeHandle<JSObject> holder;
+ if (access_mode == AccessMode::kStore && map->prototype()->IsJSObject()) {
+ for (PrototypeIterator i(map); !i.IsAtEnd(); i.Advance()) {
+ Handle<JSReceiver> prototype =
+ PrototypeIterator::GetCurrent<JSReceiver>(i);
+ if (!prototype->IsJSObject()) return false;
+ // TODO(bmeurer): We do not currently support unstable prototypes.
+ // We might want to revisit the way we handle certain keyed stores
+ // because this whole prototype chain check is essential a hack,
+ // and I'm not sure that it is correct at all with dictionaries in
+ // the prototype chain.
+ if (!prototype->map()->is_stable()) return false;
+ holder = Handle<JSObject>::cast(prototype);
+ }
+ }
+
+ *access_info =
+ ElementAccessInfo(Type::Class(map, zone()), elements_kind, holder);
+ return true;
+}
+
+
+bool AccessInfoFactory::ComputeElementAccessInfos(
+ MapHandleList const& maps, AccessMode access_mode,
+ ZoneVector<ElementAccessInfo>* access_infos) {
+ // Collect possible transition targets.
+ MapHandleList possible_transition_targets(maps.length());
+ for (Handle<Map> map : maps) {
+ if (Map::TryUpdate(map).ToHandle(&map)) {
+ if (CanInlineElementAccess(map) &&
+ IsFastElementsKind(map->elements_kind()) &&
+ GetInitialFastElementsKind() != map->elements_kind()) {
+ possible_transition_targets.Add(map);
+ }
+ }
+ }
+
+ // Separate the actual receiver maps and the possible transition sources.
+ MapHandleList receiver_maps(maps.length());
+ MapTransitionList transitions(maps.length());
+ for (Handle<Map> map : maps) {
+ if (Map::TryUpdate(map).ToHandle(&map)) {
+ Handle<Map> transition_target =
+ Map::FindTransitionedMap(map, &possible_transition_targets);
+ if (transition_target.is_null()) {
+ receiver_maps.Add(map);
+ } else {
+ transitions.push_back(std::make_pair(map, transition_target));
+ }
+ }
+ }
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ // Compute the element access information.
+ ElementAccessInfo access_info;
+ if (!ComputeElementAccessInfo(receiver_map, access_mode, &access_info)) {
+ return false;
+ }
+
+ // Collect the possible transitions for the {receiver_map}.
+ for (auto transition : transitions) {
+ if (transition.second.is_identical_to(receiver_map)) {
+ access_info.transitions().push_back(transition);
+ }
+ }
+
+ // Schedule the access information.
+ access_infos->push_back(access_info);
+ }
+ return true;
+}
+
+
+bool AccessInfoFactory::ComputePropertyAccessInfo(
+ Handle<Map> map, Handle<Name> name, AccessMode access_mode,
+ PropertyAccessInfo* access_info) {
+ // Check if it is safe to inline property access for the {map}.
+ if (!CanInlinePropertyAccess(map)) return false;
+
+ // Compute the receiver type.
+ Handle<Map> receiver_map = map;
+
+ // We support fast inline cases for certain JSObject getters.
+ if (access_mode == AccessMode::kLoad &&
+ LookupSpecialFieldAccessor(map, name, access_info)) {
+ return true;
+ }
+
+ MaybeHandle<JSObject> holder;
+ do {
+ // Lookup the named property on the {map}.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ int const number = descriptors->SearchWithCache(*name, *map);
+ if (number != DescriptorArray::kNotFound) {
+ PropertyDetails const details = descriptors->GetDetails(number);
+ if (access_mode == AccessMode::kStore) {
+ // Don't bother optimizing stores to read-only properties.
+ if (details.IsReadOnly()) {
+ return false;
+ }
+ // Check for store to data property on a prototype.
+ if (details.kind() == kData && !holder.is_null()) {
+ // Store to property not found on the receiver but on a prototype, we
+ // need to transition to a new data property.
+ // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
+ return LookupTransition(receiver_map, name, holder, access_info);
+ }
+ }
+ if (details.type() == DATA_CONSTANT) {
+ *access_info = PropertyAccessInfo::DataConstant(
+ Type::Class(receiver_map, zone()),
+ handle(descriptors->GetValue(number), isolate()), holder);
+ return true;
+ } else if (details.type() == DATA) {
+ int index = descriptors->GetFieldIndex(number);
+ Representation field_representation = details.representation();
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(
+ *map, index, field_representation.IsDouble());
+ Type* field_type = Type::Tagged();
+ if (field_representation.IsSmi()) {
+ field_type = type_cache_.kSmi;
+ } else if (field_representation.IsDouble()) {
+ field_type = type_cache_.kFloat64;
+ } else if (field_representation.IsHeapObject()) {
+ // Extract the field type from the property details (make sure its
+ // representation is TaggedPointer to reflect the heap object case).
+ field_type = Type::Intersect(
+ Type::Convert<HeapType>(
+ handle(descriptors->GetFieldType(number), isolate()), zone()),
+ Type::TaggedPointer(), zone());
+ if (field_type->Is(Type::None())) {
+ // Store is not safe if the field type was cleared.
+ if (access_mode == AccessMode::kStore) return false;
+
+ // The field type was cleared by the GC, so we don't know anything
+ // about the contents now.
+ // TODO(bmeurer): It would be awesome to make this saner in the
+ // runtime/GC interaction.
+ field_type = Type::TaggedPointer();
+ } else if (!Type::Any()->Is(field_type)) {
+ // Add proper code dependencies in case of stable field map(s).
+ Handle<Map> field_owner_map(map->FindFieldOwner(number), isolate());
+ dependencies()->AssumeFieldType(field_owner_map);
+ }
+ DCHECK(field_type->Is(Type::TaggedPointer()));
+ }
+ *access_info = PropertyAccessInfo::DataField(
+ Type::Class(receiver_map, zone()), field_index, field_type,
+ FieldCheck::kNone, holder);
+ return true;
+ } else {
+ // TODO(bmeurer): Add support for accessors.
+ return false;
+ }
+ }
+
+ // Don't search on the prototype chain for special indices in case of
+ // integer indexed exotic objects (see ES6 section 9.4.5).
+ if (map->IsJSTypedArrayMap() && name->IsString() &&
+ IsSpecialIndex(isolate()->unicode_cache(), String::cast(*name))) {
+ return false;
+ }
+
+ // Don't lookup private symbols on the prototype chain.
+ if (name->IsPrivate()) return false;
+
+ // Walk up the prototype chain.
+ if (!map->prototype()->IsJSObject()) {
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ Handle<JSFunction> constructor;
+ if (Map::GetConstructorFunction(map, native_context())
+ .ToHandle(&constructor)) {
+ map = handle(constructor->initial_map(), isolate());
+ DCHECK(map->prototype()->IsJSObject());
+ } else if (map->prototype()->IsNull()) {
+ // Store to property not found on the receiver or any prototype, we need
+ // to transition to a new data property.
+ // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
+ if (access_mode == AccessMode::kStore) {
+ return LookupTransition(receiver_map, name, holder, access_info);
+ }
+ // The property was not found, return undefined or throw depending
+ // on the language mode of the load operation.
+ // Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
+ *access_info = PropertyAccessInfo::NotFound(
+ Type::Class(receiver_map, zone()), holder);
+ return true;
+ } else {
+ return false;
+ }
+ }
+ Handle<JSObject> map_prototype(JSObject::cast(map->prototype()), isolate());
+ if (map_prototype->map()->is_deprecated()) {
+ // Try to migrate the prototype object so we don't embed the deprecated
+ // map into the optimized code.
+ JSObject::TryMigrateInstance(map_prototype);
+ }
+ map = handle(map_prototype->map(), isolate());
+ holder = map_prototype;
+ } while (CanInlinePropertyAccess(map));
+ return false;
+}
+
+
+bool AccessInfoFactory::ComputePropertyAccessInfos(
+ MapHandleList const& maps, Handle<Name> name, AccessMode access_mode,
+ ZoneVector<PropertyAccessInfo>* access_infos) {
+ for (Handle<Map> map : maps) {
+ if (Map::TryUpdate(map).ToHandle(&map)) {
+ PropertyAccessInfo access_info;
+ if (!ComputePropertyAccessInfo(map, name, access_mode, &access_info)) {
+ return false;
+ }
+ access_infos->push_back(access_info);
+ }
+ }
+ return true;
+}
+
+
+bool AccessInfoFactory::LookupSpecialFieldAccessor(
+ Handle<Map> map, Handle<Name> name, PropertyAccessInfo* access_info) {
+ // Check for special JSObject field accessors.
+ int offset;
+ if (Accessors::IsJSObjectFieldAccessor(map, name, &offset)) {
+ FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
+ Type* field_type = Type::Tagged();
+ if (map->IsStringMap()) {
+ DCHECK(Name::Equals(factory()->length_string(), name));
+ // The String::length property is always a smi in the range
+ // [0, String::kMaxLength].
+ field_type = type_cache_.kStringLengthType;
+ } else if (map->IsJSArrayMap()) {
+ DCHECK(Name::Equals(factory()->length_string(), name));
+ // The JSArray::length property is a smi in the range
+ // [0, FixedDoubleArray::kMaxLength] in case of fast double
+ // elements, a smi in the range [0, FixedArray::kMaxLength]
+ // in case of other fast elements, and [0, kMaxUInt32] in
+ // case of other arrays.
+ if (IsFastDoubleElementsKind(map->elements_kind())) {
+ field_type = type_cache_.kFixedDoubleArrayLengthType;
+ } else if (IsFastElementsKind(map->elements_kind())) {
+ field_type = type_cache_.kFixedArrayLengthType;
+ } else {
+ field_type = type_cache_.kJSArrayLengthType;
+ }
+ }
+ *access_info = PropertyAccessInfo::DataField(Type::Class(map, zone()),
+ field_index, field_type);
+ return true;
+ }
+ // Check for special JSArrayBufferView field accessors.
+ if (Accessors::IsJSArrayBufferViewFieldAccessor(map, name, &offset)) {
+ FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
+ Type* field_type = Type::Tagged();
+ if (Name::Equals(factory()->byte_length_string(), name) ||
+ Name::Equals(factory()->byte_offset_string(), name)) {
+ // The JSArrayBufferView::byte_length and JSArrayBufferView::byte_offset
+ // properties are always numbers in the range [0, kMaxSafeInteger].
+ field_type = type_cache_.kPositiveSafeInteger;
+ } else if (map->IsJSTypedArrayMap()) {
+ DCHECK(Name::Equals(factory()->length_string(), name));
+ // The JSTypedArray::length property is always a number in the range
+ // [0, kMaxSafeInteger].
+ field_type = type_cache_.kPositiveSafeInteger;
+ }
+ *access_info = PropertyAccessInfo::DataField(
+ Type::Class(map, zone()), field_index, field_type,
+ FieldCheck::kJSArrayBufferViewBufferNotNeutered);
+ return true;
+ }
+ return false;
+}
+
+
+bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
+ MaybeHandle<JSObject> holder,
+ PropertyAccessInfo* access_info) {
+ // Check if the {map} has a data transition with the given {name}.
+ if (map->unused_property_fields() == 0) return false;
+ Handle<Map> transition_map;
+ if (TransitionArray::SearchTransition(map, kData, name, NONE)
+ .ToHandle(&transition_map)) {
+ int const number = transition_map->LastAdded();
+ PropertyDetails const details =
+ transition_map->instance_descriptors()->GetDetails(number);
+ // Don't bother optimizing stores to read-only properties.
+ if (details.IsReadOnly()) return false;
+ // TODO(bmeurer): Handle transition to data constant?
+ if (details.type() != DATA) return false;
+ int const index = details.field_index();
+ Representation field_representation = details.representation();
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(
+ *transition_map, index, field_representation.IsDouble());
+ Type* field_type = Type::Tagged();
+ if (field_representation.IsSmi()) {
+ field_type = type_cache_.kSmi;
+ } else if (field_representation.IsDouble()) {
+ field_type = type_cache_.kFloat64;
+ } else if (field_representation.IsHeapObject()) {
+ // Extract the field type from the property details (make sure its
+ // representation is TaggedPointer to reflect the heap object case).
+ field_type = Type::Intersect(
+ Type::Convert<HeapType>(
+ handle(
+ transition_map->instance_descriptors()->GetFieldType(number),
+ isolate()),
+ zone()),
+ Type::TaggedPointer(), zone());
+ if (field_type->Is(Type::None())) {
+ // Store is not safe if the field type was cleared.
+ return false;
+ } else if (!Type::Any()->Is(field_type)) {
+ // Add proper code dependencies in case of stable field map(s).
+ Handle<Map> field_owner_map(transition_map->FindFieldOwner(number),
+ isolate());
+ dependencies()->AssumeFieldType(field_owner_map);
+ }
+ DCHECK(field_type->Is(Type::TaggedPointer()));
+ }
+ dependencies()->AssumeMapNotDeprecated(transition_map);
+ *access_info = PropertyAccessInfo::DataField(
+ Type::Class(map, zone()), field_index, field_type, FieldCheck::kNone,
+ holder, transition_map);
+ return true;
+ }
+ return false;
+}
+
+
+Factory* AccessInfoFactory::factory() const { return isolate()->factory(); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/access-info.h b/src/compiler/access-info.h
new file mode 100644
index 0000000..cae1191
--- /dev/null
+++ b/src/compiler/access-info.h
@@ -0,0 +1,164 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ACCESS_INFO_H_
+#define V8_COMPILER_ACCESS_INFO_H_
+
+#include <iosfwd>
+
+#include "src/field-index.h"
+#include "src/objects.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+class TypeCache;
+
+
+namespace compiler {
+
+// Whether we are loading a property or storing to a property.
+enum class AccessMode { kLoad, kStore };
+
+std::ostream& operator<<(std::ostream&, AccessMode);
+
+
+// Mapping of transition source to transition target.
+typedef std::vector<std::pair<Handle<Map>, Handle<Map>>> MapTransitionList;
+
+
+// This class encapsulates all information required to access a certain element.
+class ElementAccessInfo final {
+ public:
+ ElementAccessInfo();
+ ElementAccessInfo(Type* receiver_type, ElementsKind elements_kind,
+ MaybeHandle<JSObject> holder);
+
+ MaybeHandle<JSObject> holder() const { return holder_; }
+ ElementsKind elements_kind() const { return elements_kind_; }
+ Type* receiver_type() const { return receiver_type_; }
+ MapTransitionList& transitions() { return transitions_; }
+ MapTransitionList const& transitions() const { return transitions_; }
+
+ private:
+ ElementsKind elements_kind_;
+ MaybeHandle<JSObject> holder_;
+ Type* receiver_type_;
+ MapTransitionList transitions_;
+};
+
+
+// Additional checks that need to be perform for data field accesses.
+enum class FieldCheck : uint8_t {
+ // No additional checking needed.
+ kNone,
+ // Check that the [[ViewedArrayBuffer]] of {JSArrayBufferView}s
+ // was not neutered.
+ kJSArrayBufferViewBufferNotNeutered,
+};
+
+
+// This class encapsulates all information required to access a certain
+// object property, either on the object itself or on the prototype chain.
+class PropertyAccessInfo final {
+ public:
+ enum Kind { kInvalid, kNotFound, kDataConstant, kDataField };
+
+ static PropertyAccessInfo NotFound(Type* receiver_type,
+ MaybeHandle<JSObject> holder);
+ static PropertyAccessInfo DataConstant(Type* receiver_type,
+ Handle<Object> constant,
+ MaybeHandle<JSObject> holder);
+ static PropertyAccessInfo DataField(
+ Type* receiver_type, FieldIndex field_index, Type* field_type,
+ FieldCheck field_check = FieldCheck::kNone,
+ MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
+ MaybeHandle<Map> transition_map = MaybeHandle<Map>());
+
+ PropertyAccessInfo();
+
+ bool IsNotFound() const { return kind() == kNotFound; }
+ bool IsDataConstant() const { return kind() == kDataConstant; }
+ bool IsDataField() const { return kind() == kDataField; }
+
+ bool HasTransitionMap() const { return !transition_map().is_null(); }
+
+ Kind kind() const { return kind_; }
+ MaybeHandle<JSObject> holder() const { return holder_; }
+ MaybeHandle<Map> transition_map() const { return transition_map_; }
+ Handle<Object> constant() const { return constant_; }
+ FieldCheck field_check() const { return field_check_; }
+ FieldIndex field_index() const { return field_index_; }
+ Type* field_type() const { return field_type_; }
+ Type* receiver_type() const { return receiver_type_; }
+
+ private:
+ PropertyAccessInfo(MaybeHandle<JSObject> holder, Type* receiver_type);
+ PropertyAccessInfo(MaybeHandle<JSObject> holder, Handle<Object> constant,
+ Type* receiver_type);
+ PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map, FieldIndex field_index,
+ FieldCheck field_check, Type* field_type,
+ Type* receiver_type);
+
+ Kind kind_;
+ Type* receiver_type_;
+ Handle<Object> constant_;
+ MaybeHandle<Map> transition_map_;
+ MaybeHandle<JSObject> holder_;
+ FieldIndex field_index_;
+ FieldCheck field_check_;
+ Type* field_type_;
+};
+
+
+// Factory class for {ElementAccessInfo}s and {PropertyAccessInfo}s.
+class AccessInfoFactory final {
+ public:
+ AccessInfoFactory(CompilationDependencies* dependencies,
+ Handle<Context> native_context, Zone* zone);
+
+ bool ComputeElementAccessInfo(Handle<Map> map, AccessMode access_mode,
+ ElementAccessInfo* access_info);
+ bool ComputeElementAccessInfos(MapHandleList const& maps,
+ AccessMode access_mode,
+ ZoneVector<ElementAccessInfo>* access_infos);
+ bool ComputePropertyAccessInfo(Handle<Map> map, Handle<Name> name,
+ AccessMode access_mode,
+ PropertyAccessInfo* access_info);
+ bool ComputePropertyAccessInfos(MapHandleList const& maps, Handle<Name> name,
+ AccessMode access_mode,
+ ZoneVector<PropertyAccessInfo>* access_infos);
+
+ private:
+ bool LookupSpecialFieldAccessor(Handle<Map> map, Handle<Name> name,
+ PropertyAccessInfo* access_info);
+ bool LookupTransition(Handle<Map> map, Handle<Name> name,
+ MaybeHandle<JSObject> holder,
+ PropertyAccessInfo* access_info);
+
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Factory* factory() const;
+ Isolate* isolate() const { return isolate_; }
+ Handle<Context> native_context() const { return native_context_; }
+ Zone* zone() const { return zone_; }
+
+ CompilationDependencies* const dependencies_;
+ Handle<Context> const native_context_;
+ Isolate* const isolate_;
+ TypeCache const& type_cache_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(AccessInfoFactory);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ACCESS_INFO_H_
diff --git a/src/compiler/all-nodes.cc b/src/compiler/all-nodes.cc
new file mode 100644
index 0000000..ed4a218
--- /dev/null
+++ b/src/compiler/all-nodes.cc
@@ -0,0 +1,39 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/all-nodes.h"
+
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+AllNodes::AllNodes(Zone* local_zone, const Graph* graph)
+ : live(local_zone), is_live(graph->NodeCount(), false, local_zone) {
+ Node* end = graph->end();
+ is_live[end->id()] = true;
+ live.push_back(end);
+ // Find all live nodes reachable from end.
+ for (size_t i = 0; i < live.size(); i++) {
+ for (Node* const input : live[i]->inputs()) {
+ if (input == nullptr) {
+ // TODO(titzer): print a warning.
+ continue;
+ }
+ if (input->id() >= graph->NodeCount()) {
+ // TODO(titzer): print a warning.
+ continue;
+ }
+ if (!is_live[input->id()]) {
+ is_live[input->id()] = true;
+ live.push_back(input);
+ }
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/all-nodes.h b/src/compiler/all-nodes.h
new file mode 100644
index 0000000..700f007
--- /dev/null
+++ b/src/compiler/all-nodes.h
@@ -0,0 +1,38 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ALL_NODES_H_
+#define V8_COMPILER_ALL_NODES_H_
+
+#include "src/compiler/node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A helper utility that traverses the graph and gathers all nodes reachable
+// from end.
+class AllNodes {
+ public:
+ // Constructor. Traverses the graph and builds the {live} sets.
+ AllNodes(Zone* local_zone, const Graph* graph);
+
+ bool IsLive(Node* node) {
+ if (!node) return false;
+ size_t id = node->id();
+ return id < is_live.size() && is_live[id];
+ }
+
+ NodeVector live; // Nodes reachable from end.
+
+ private:
+ BoolVector is_live;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ALL_NODES_H_
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index cfa4de9..9b074b0 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -5,11 +5,11 @@
#include "src/compiler/code-generator.h"
#include "src/arm/macro-assembler-arm.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/scopes.h"
+#include "src/compiler/osr.h"
namespace v8 {
namespace internal {
@@ -22,16 +22,16 @@
// Adds Arm-specific methods to convert InstructionOperands.
-class ArmOperandConverter FINAL : public InstructionOperandConverter {
+class ArmOperandConverter final : public InstructionOperandConverter {
public:
ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- SwVfpRegister OutputFloat32Register(int index = 0) {
+ SwVfpRegister OutputFloat32Register(size_t index = 0) {
return ToFloat32Register(instr_->OutputAt(index));
}
- SwVfpRegister InputFloat32Register(int index) {
+ SwVfpRegister InputFloat32Register(size_t index) {
return ToFloat32Register(instr_->InputAt(index));
}
@@ -39,11 +39,11 @@
return ToFloat64Register(op).low();
}
- LowDwVfpRegister OutputFloat64Register(int index = 0) {
+ LowDwVfpRegister OutputFloat64Register(size_t index = 0) {
return ToFloat64Register(instr_->OutputAt(index));
}
- LowDwVfpRegister InputFloat64Register(int index) {
+ LowDwVfpRegister InputFloat64Register(size_t index) {
return ToFloat64Register(instr_->InputAt(index));
}
@@ -63,7 +63,7 @@
return LeaveCC;
}
- Operand InputImmediate(int index) {
+ Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
@@ -84,8 +84,8 @@
return Operand::Zero();
}
- Operand InputOperand2(int first_index) {
- const int index = first_index;
+ Operand InputOperand2(size_t first_index) {
+ const size_t index = first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
case kMode_Offset_RI:
@@ -116,8 +116,8 @@
return Operand::Zero();
}
- MemOperand InputOffset(int* first_index) {
- const int index = *first_index;
+ MemOperand InputOffset(size_t* first_index) {
+ const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
case kMode_Operand2_I:
@@ -142,17 +142,15 @@
return MemOperand(r0);
}
- MemOperand InputOffset(int first_index = 0) {
+ MemOperand InputOffset(size_t first_index = 0) {
return InputOffset(&first_index);
}
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -160,12 +158,12 @@
namespace {
-class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
+class OutOfLineLoadFloat32 final : public OutOfLineCode {
public:
OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ vmov(result_, std::numeric_limits<float>::quiet_NaN());
}
@@ -174,12 +172,12 @@
};
-class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
+class OutOfLineLoadFloat64 final : public OutOfLineCode {
public:
OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
}
@@ -188,17 +186,109 @@
};
-class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+class OutOfLineLoadInteger final : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL { __ mov(result_, Operand::Zero()); }
+ void Generate() final { __ mov(result_, Operand::Zero()); }
private:
Register const result_;
};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ add(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
+Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kFloatLessThanOrUnordered:
+ return lt;
+ case kFloatGreaterThanOrEqual:
+ return ge;
+ case kFloatLessThanOrEqual:
+ return ls;
+ case kFloatGreaterThanOrUnordered:
+ return hi;
+ case kFloatLessThan:
+ return lo;
+ case kFloatGreaterThanOrEqualOrUnordered:
+ return hs;
+ case kFloatLessThanOrEqualOrUnordered:
+ return le;
+ case kFloatGreaterThan:
+ return gt;
+ case kOverflow:
+ return vs;
+ case kNotOverflow:
+ return vc;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
} // namespace
@@ -264,10 +354,38 @@
} while (0)
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ add(sp, sp, Operand(sp_slot_delta * kPointerSize));
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ sub(sp, sp, Operand(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ }
+ __ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ArmOperandConverter i(this, instr);
+ masm()->MaybeCheckConstPool();
+
switch (ArchOpcodeField::decode(instr->opcode())) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
@@ -279,8 +397,24 @@
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ add(ip, i.InputRegister(0),
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+ }
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -294,18 +428,80 @@
}
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(ip);
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, kScratchReg);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Jump(ip);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ break;
+ }
case kArchRet:
AssembleReturn();
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -314,10 +510,31 @@
__ mov(i.OutputRegister(), sp);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ str(value, MemOperand(object, index));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kArmAdd:
__ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
i.OutputSBit());
@@ -441,6 +658,10 @@
i.InputInt32(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArmClz:
+ __ clz(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmCmp:
__ cmp(i.InputRegister(0), i.InputOperand2(1));
DCHECK_EQ(SetCC, i.OutputSBit());
@@ -457,9 +678,67 @@
__ teq(i.InputRegister(0), i.InputOperand2(1));
DCHECK_EQ(SetCC, i.OutputSBit());
break;
+ case kArmVcmpF32:
+ if (instr->InputAt(1)->IsDoubleRegister()) {
+ __ VFPCompareAndSetFlags(i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ } else {
+ DCHECK(instr->InputAt(1)->IsImmediate());
+ // 0.0 is the only immediate supported by vcmp instructions.
+ DCHECK(i.InputFloat32(1) == 0.0f);
+ __ VFPCompareAndSetFlags(i.InputFloat32Register(0), i.InputFloat32(1));
+ }
+ DCHECK_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmVaddF32:
+ __ vadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVsubF32:
+ __ vsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmulF32:
+ __ vmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmlaF32:
+ __ vmla(i.OutputFloat32Register(), i.InputFloat32Register(1),
+ i.InputFloat32Register(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmlsF32:
+ __ vmls(i.OutputFloat32Register(), i.InputFloat32Register(1),
+ i.InputFloat32Register(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVdivF32:
+ __ vdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVsqrtF32:
+ __ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArmVabsF32:
+ __ vabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArmVnegF32:
+ __ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArmVcmpF64:
- __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ if (instr->InputAt(1)->IsDoubleRegister()) {
+ __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
+ } else {
+ DCHECK(instr->InputAt(1)->IsImmediate());
+ // 0.0 is the only immediate supported by vcmp instructions.
+ DCHECK(i.InputDouble(1) == 0.0);
+ __ VFPCompareAndSetFlags(i.InputFloat64Register(0), i.InputDouble(1));
+ }
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVaddF64:
@@ -509,21 +788,39 @@
case kArmVsqrtF64:
__ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
- case kArmVfloorF64:
- __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
- break;
- case kArmVceilF64:
- __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
- break;
- case kArmVroundTruncateF64:
- __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
- break;
- case kArmVroundTiesAwayF64:
- __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ case kArmVabsF64:
+ __ vabs(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
case kArmVnegF64:
__ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVrintmF32:
+ __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArmVrintmF64:
+ __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVrintpF32:
+ __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArmVrintpF64:
+ __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVrintzF32:
+ __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArmVrintzF64:
+ __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVrintaF64:
+ __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVrintnF32:
+ __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArmVrintnF64:
+ __ vrintn(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
case kArmVcvtF32F64: {
__ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -562,6 +859,27 @@
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmVmovLowU32F64:
+ __ VmovLow(i.OutputRegister(), i.InputFloat64Register(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovLowF64U32:
+ __ VmovLow(i.OutputFloat64Register(), i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovHighU32F64:
+ __ VmovHigh(i.OutputRegister(), i.InputFloat64Register(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovHighF64U32:
+ __ VmovHigh(i.OutputFloat64Register(), i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovF64U32U32:
+ __ vmov(i.OutputFloat64Register(), i.InputRegister(0),
+ i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -571,7 +889,7 @@
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmStrb: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.InputOffset(&index);
__ strb(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -584,7 +902,7 @@
__ ldrsh(i.OutputRegister(), i.InputOffset());
break;
case kArmStrh: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.InputOffset(&index);
__ strh(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -594,7 +912,7 @@
__ ldr(i.OutputRegister(), i.InputOffset());
break;
case kArmStr: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.InputOffset(&index);
__ str(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -606,7 +924,7 @@
break;
}
case kArmVstrF32: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.InputOffset(&index);
__ vstr(i.InputFloat32Register(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -617,26 +935,25 @@
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVstrF64: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.InputOffset(&index);
__ vstr(i.InputFloat64Register(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmPush:
- __ Push(i.InputRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ vpush(i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArmStoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ add(index, object, index);
- __ str(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
- __ RecordWrite(object, index, value, lr_status, mode);
+ case kArmPoke: {
+ int const slot = MiscField::decode(instr->opcode());
+ __ str(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -676,8 +993,12 @@
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord64:
+ UNREACHABLE(); // currently unsupported checked int64 load/store.
+ break;
}
-}
+} // NOLINT(readability/fn_size)
// Assembles branches after an instruction.
@@ -685,70 +1006,13 @@
ArmOperandConverter i(this, instr);
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
- switch (branch->condition) {
- case kUnorderedEqual:
- // The "eq" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kEqual:
- __ b(eq, tlabel);
- break;
- case kUnorderedNotEqual:
- // Unordered or not equal can be tested with "ne" condtion.
- // See ARMv7 manual A8.3 - Conditional execution.
- case kNotEqual:
- __ b(ne, tlabel);
- break;
- case kSignedLessThan:
- __ b(lt, tlabel);
- break;
- case kSignedGreaterThanOrEqual:
- __ b(ge, tlabel);
- break;
- case kSignedLessThanOrEqual:
- __ b(le, tlabel);
- break;
- case kSignedGreaterThan:
- __ b(gt, tlabel);
- break;
- case kUnorderedLessThan:
- // The "lo" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kUnsignedLessThan:
- __ b(lo, tlabel);
- break;
- case kUnorderedGreaterThanOrEqual:
- // Unordered, greater than or equal can be tested with "hs" condtion.
- // See ARMv7 manual A8.3 - Conditional execution.
- case kUnsignedGreaterThanOrEqual:
- __ b(hs, tlabel);
- break;
- case kUnorderedLessThanOrEqual:
- // The "ls" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kUnsignedLessThanOrEqual:
- __ b(ls, tlabel);
- break;
- case kUnorderedGreaterThan:
- // Unordered or greater than can be tested with "hi" condtion.
- // See ARMv7 manual A8.3 - Conditional execution.
- case kUnsignedGreaterThan:
- __ b(hi, tlabel);
- break;
- case kOverflow:
- __ b(vs, tlabel);
- break;
- case kNotOverflow:
- __ b(vc, tlabel);
- break;
- }
+ Condition cc = FlagsConditionToCondition(branch->condition);
+ __ b(cc, tlabel);
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
@@ -757,169 +1021,160 @@
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
ArmOperandConverter i(this, instr);
- Label done;
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
- Label check;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cc = kNoCondition;
- switch (condition) {
- case kUnorderedEqual:
- __ b(vc, &check);
- __ mov(reg, Operand(0));
- __ b(&done);
- // Fall through.
- case kEqual:
- cc = eq;
- break;
- case kUnorderedNotEqual:
- __ b(vc, &check);
- __ mov(reg, Operand(1));
- __ b(&done);
- // Fall through.
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnorderedLessThan:
- __ b(vc, &check);
- __ mov(reg, Operand(0));
- __ b(&done);
- // Fall through.
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnorderedGreaterThanOrEqual:
- __ b(vc, &check);
- __ mov(reg, Operand(1));
- __ b(&done);
- // Fall through.
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnorderedLessThanOrEqual:
- __ b(vc, &check);
- __ mov(reg, Operand(0));
- __ b(&done);
- // Fall through.
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnorderedGreaterThan:
- __ b(vc, &check);
- __ mov(reg, Operand(1));
- __ b(&done);
- // Fall through.
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- case kOverflow:
- cc = vs;
- break;
- case kNotOverflow:
- cc = vc;
- break;
- }
- __ bind(&check);
+ Condition cc = FlagsConditionToCondition(condition);
__ mov(reg, Operand(0));
__ mov(reg, Operand(1), LeaveCC, cc);
- __ bind(&done);
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmp(input, Operand(i.InputInt32(index + 0)));
+ __ b(eq, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ // Ensure to emit the constant pool first if necessary.
+ __ CheckConstPool(true, true);
+ __ cmp(input, Operand(case_count));
+ __ BlockConstPoolFor(case_count + 2);
+ __ add(pc, pc, Operand(input, LSL, 2), LeaveCC, lo);
+ __ b(GetLabel(i.InputRpo(1)));
+ for (size_t index = 0; index < case_count; ++index) {
+ __ b(GetLabel(i.InputRpo(index + 2)));
+ }
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- bool saved_pp;
- if (FLAG_enable_ool_constant_pool) {
+ if (descriptor->IsCFunctionCall()) {
+ if (FLAG_enable_embedded_constant_pool) {
__ Push(lr, fp, pp);
// Adjust FP to point to saved FP.
__ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
- saved_pp = true;
} else {
__ Push(lr, fp);
__ mov(fp, sp);
- saved_pp = false;
- }
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0 || saved_pp) {
- // Save callee-saved registers.
- int register_save_area_size = saved_pp ? kPointerSize : 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- __ stm(db_w, sp, saves);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(0);
}
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ sub(sp, sp, Operand(stack_slots * kPointerSize));
+ frame_access_state()->SetFrameAccessToDefault();
+
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ if (stack_shrink_slots > 0) {
+ __ sub(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ }
+
+ if (saves_fp != 0) {
+ // Save callee-saved FP registers.
+ STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
+ uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
+ DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
+ __ vstm(db_w, sp, DwVfpRegister::from_code(first),
+ DwVfpRegister::from_code(last));
+ frame()->AllocateSavedCalleeRegisterSlots((last - first + 1) *
+ (kDoubleSize / kPointerSize));
+ }
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ stm(db_w, sp, saves);
+ frame()->AllocateSavedCalleeRegisterSlots(
+ base::bits::CountPopulation32(saves));
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ add(sp, sp, Operand(stack_slots * kPointerSize));
- }
- // Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- __ ldm(ia_w, sp, saves);
- }
- }
- __ LeaveFrame(StackFrame::MANUAL);
- __ Ret();
- } else {
- __ LeaveFrame(StackFrame::MANUAL);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ Drop(pop_count);
- __ Ret();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+
+ // Restore registers.
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ ldm(ia_w, sp, saves);
}
+
+ // Restore FP registers.
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
+ uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
+ __ vldm(ia_w, sp, DwVfpRegister::from_code(first),
+ DwVfpRegister::from_code(last));
+ }
+
+ if (descriptor->IsCFunctionCall()) {
+ __ LeaveFrame(StackFrame::MANUAL);
+ } else if (frame()->needs_frame()) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ __ LeaveFrame(StackFrame::MANUAL);
+ }
+ }
+ __ Ret(pop_count);
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- ArmOperandConverter g(this, NULL);
+ ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -963,9 +1218,19 @@
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
break;
- case Constant::kHeapObject:
- __ Move(dst, src.ToHeapObject());
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ int offset;
+ if (IsMaterializableFromFrame(src_object, &offset)) {
+ __ ldr(dst, MemOperand(fp, offset));
+ } else if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Move(dst, src_object);
+ }
break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
break;
@@ -1017,7 +1282,7 @@
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- ArmOperandConverter g(this, NULL);
+ ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1082,29 +1347,36 @@
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 32-bit ARM we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() {
// On 32-bit ARM we do not insert nops for inlined Smi code.
}
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- // Block literal pool emission for duration of padding.
- v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= v8::internal::Assembler::kInstrSize;
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block literal pool emission for duration of padding.
+ v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index ecd0b2d..401100b 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -15,6 +15,7 @@
V(ArmAdd) \
V(ArmAnd) \
V(ArmBic) \
+ V(ArmClz) \
V(ArmCmp) \
V(ArmCmn) \
V(ArmTst) \
@@ -43,6 +44,16 @@
V(ArmUxth) \
V(ArmUxtab) \
V(ArmUxtah) \
+ V(ArmVcmpF32) \
+ V(ArmVaddF32) \
+ V(ArmVsubF32) \
+ V(ArmVmulF32) \
+ V(ArmVmlaF32) \
+ V(ArmVmlsF32) \
+ V(ArmVdivF32) \
+ V(ArmVabsF32) \
+ V(ArmVnegF32) \
+ V(ArmVsqrtF32) \
V(ArmVcmpF64) \
V(ArmVaddF64) \
V(ArmVsubF64) \
@@ -51,18 +62,29 @@
V(ArmVmlsF64) \
V(ArmVdivF64) \
V(ArmVmodF64) \
+ V(ArmVabsF64) \
V(ArmVnegF64) \
V(ArmVsqrtF64) \
- V(ArmVfloorF64) \
- V(ArmVceilF64) \
- V(ArmVroundTruncateF64) \
- V(ArmVroundTiesAwayF64) \
+ V(ArmVrintmF32) \
+ V(ArmVrintmF64) \
+ V(ArmVrintpF32) \
+ V(ArmVrintpF64) \
+ V(ArmVrintzF32) \
+ V(ArmVrintzF64) \
+ V(ArmVrintaF64) \
+ V(ArmVrintnF32) \
+ V(ArmVrintnF64) \
V(ArmVcvtF32F64) \
V(ArmVcvtF64F32) \
V(ArmVcvtF64S32) \
V(ArmVcvtF64U32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
+ V(ArmVmovLowU32F64) \
+ V(ArmVmovLowF64U32) \
+ V(ArmVmovHighU32F64) \
+ V(ArmVmovHighF64U32) \
+ V(ArmVmovF64U32U32) \
V(ArmVldrF32) \
V(ArmVstrF32) \
V(ArmVldrF64) \
@@ -76,7 +98,7 @@
V(ArmLdr) \
V(ArmStr) \
V(ArmPush) \
- V(ArmStoreWriteBarrier)
+ V(ArmPoke)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
new file mode 100644
index 0000000..f36802c
--- /dev/null
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -0,0 +1,129 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kArmAdd:
+ case kArmAnd:
+ case kArmBic:
+ case kArmClz:
+ case kArmCmp:
+ case kArmCmn:
+ case kArmTst:
+ case kArmTeq:
+ case kArmOrr:
+ case kArmEor:
+ case kArmSub:
+ case kArmRsb:
+ case kArmMul:
+ case kArmMla:
+ case kArmMls:
+ case kArmSmmul:
+ case kArmSmmla:
+ case kArmUmull:
+ case kArmSdiv:
+ case kArmUdiv:
+ case kArmMov:
+ case kArmMvn:
+ case kArmBfc:
+ case kArmUbfx:
+ case kArmSxtb:
+ case kArmSxth:
+ case kArmSxtab:
+ case kArmSxtah:
+ case kArmUxtb:
+ case kArmUxth:
+ case kArmUxtab:
+ case kArmUxtah:
+ case kArmVcmpF32:
+ case kArmVaddF32:
+ case kArmVsubF32:
+ case kArmVmulF32:
+ case kArmVmlaF32:
+ case kArmVmlsF32:
+ case kArmVdivF32:
+ case kArmVabsF32:
+ case kArmVnegF32:
+ case kArmVsqrtF32:
+ case kArmVcmpF64:
+ case kArmVaddF64:
+ case kArmVsubF64:
+ case kArmVmulF64:
+ case kArmVmlaF64:
+ case kArmVmlsF64:
+ case kArmVdivF64:
+ case kArmVmodF64:
+ case kArmVabsF64:
+ case kArmVnegF64:
+ case kArmVsqrtF64:
+ case kArmVrintmF32:
+ case kArmVrintmF64:
+ case kArmVrintpF32:
+ case kArmVrintpF64:
+ case kArmVrintzF32:
+ case kArmVrintzF64:
+ case kArmVrintaF64:
+ case kArmVrintnF32:
+ case kArmVrintnF64:
+ case kArmVcvtF32F64:
+ case kArmVcvtF64F32:
+ case kArmVcvtF64S32:
+ case kArmVcvtF64U32:
+ case kArmVcvtS32F64:
+ case kArmVcvtU32F64:
+ case kArmVmovLowU32F64:
+ case kArmVmovLowF64U32:
+ case kArmVmovHighU32F64:
+ case kArmVmovHighF64U32:
+ case kArmVmovF64U32U32:
+ return kNoOpcodeFlags;
+
+ case kArmVldrF32:
+ case kArmVldrF64:
+ case kArmLdrb:
+ case kArmLdrsb:
+ case kArmLdrh:
+ case kArmLdrsh:
+ case kArmLdr:
+ return kIsLoadOperation;
+
+ case kArmVstrF32:
+ case kArmVstrF64:
+ case kArmStrb:
+ case kArmStrh:
+ case kArmStr:
+ case kArmPush:
+ case kArmPoke:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index ef9e89e..f3deae7 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -59,7 +61,6 @@
case kArmStrb:
case kArmLdr:
case kArmStr:
- case kArmStoreWriteBarrier:
return value >= -4095 && value <= 4095;
case kArmLdrh:
@@ -77,16 +78,14 @@
namespace {
-void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
ArmOperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
ArmOperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
@@ -98,8 +97,8 @@
AddressingMode kImmMode, AddressingMode kRegMode>
bool TryMatchShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
- InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
ArmOperandGenerator g(selector);
if (node->opcode() == kOpcode) {
Int32BinopMatcher m(node);
@@ -118,8 +117,8 @@
bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
- Node* node, InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ Node* node, InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
value_return, shift_return);
@@ -127,8 +126,8 @@
bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
- Node* node, InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ Node* node, InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
value_return, shift_return);
@@ -136,8 +135,8 @@
bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
- Node* node, InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ Node* node, InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
value_return, shift_return);
@@ -145,8 +144,8 @@
bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
- Node* node, InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ Node* node, InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
value_return, shift_return);
@@ -155,8 +154,8 @@
bool TryMatchShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
- InstructionOperand** value_return,
- InstructionOperand** shift_return) {
+ InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
return (
TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
@@ -168,7 +167,7 @@
bool TryMatchImmediateOrShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
size_t* input_count_return,
- InstructionOperand** inputs) {
+ InstructionOperand* inputs) {
ArmOperandGenerator g(selector);
if (g.CanBeImmediate(node, *opcode_return)) {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
@@ -189,9 +188,9 @@
FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[5];
+ InstructionOperand inputs[5];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
if (m.left().node() == m.right().node()) {
@@ -202,7 +201,7 @@
// mov r0, r1, asr #16
// adds r0, r0, r1, asr #16
// bvs label
- InstructionOperand* const input = g.UseRegister(m.left().node());
+ InstructionOperand const input = g.UseRegister(m.left().node());
opcode |= AddressingModeField::encode(kMode_Operand2_R);
inputs[input_count++] = input;
inputs[input_count++] = input;
@@ -232,15 +231,14 @@
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -251,36 +249,86 @@
}
+void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
+ ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
+ InstructionOperand result_operand, InstructionOperand left_operand,
+ InstructionOperand right_operand) {
+ ArmOperandGenerator g(selector);
+ if (selector->IsSupported(SUDIV)) {
+ selector->Emit(div_opcode, result_operand, left_operand, right_operand);
+ return;
+ }
+ InstructionOperand left_double_operand = g.TempDoubleRegister();
+ InstructionOperand right_double_operand = g.TempDoubleRegister();
+ InstructionOperand result_double_operand = g.TempDoubleRegister();
+ selector->Emit(f64i32_opcode, left_double_operand, left_operand);
+ selector->Emit(f64i32_opcode, right_double_operand, right_operand);
+ selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
+ right_double_operand);
+ selector->Emit(i32f64_opcode, result_operand, result_double_operand);
+}
+
+
+void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
+ ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
+ ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand div_operand = g.TempRegister();
+ InstructionOperand result_operand = g.DefineAsRegister(node);
+ InstructionOperand left_operand = g.UseRegister(m.left().node());
+ InstructionOperand right_operand = g.UseRegister(m.right().node());
+ EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
+ left_operand, right_operand);
+ if (selector->IsSupported(MLS)) {
+ selector->Emit(kArmMls, result_operand, div_operand, right_operand,
+ left_operand);
+ } else {
+ InstructionOperand mul_operand = g.TempRegister();
+ selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
+ selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
+ }
+}
+
} // namespace
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kArmVldrF32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kArmVldrF64;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kArmLdrb : kArmLdrsb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kArmLdrh : kArmLdrsh;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kArmLdr;
break;
- default:
+ case MachineRepresentation::kNone: // Fall through.
+ case MachineRepresentation::kWord64:
UNREACHABLE();
return;
}
@@ -301,87 +349,110 @@
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
- Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
- g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps),
- temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kArmVstrF32;
- break;
- case kRepFloat64:
- opcode = kArmVstrF64;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kArmStrb;
- break;
- case kRepWord16:
- opcode = kArmStrh;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kArmStr;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
- g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kArmVstrF32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kArmVstrF64;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kArmStrb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kArmStrh;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kArmStr;
+ break;
+ case MachineRepresentation::kNone: // Fall through.
+ case MachineRepresentation::kWord64:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.UseRegister(offset);
- InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
- ? g.UseImmediate(length)
- : g.UseRegister(length);
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer), offset_operand);
@@ -389,38 +460,41 @@
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.UseRegister(offset);
- InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
- ? g.UseImmediate(length)
- : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), nullptr,
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length);
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
offset_operand, length_operand, g.UseRegister(value),
g.UseRegister(buffer), offset_operand);
}
@@ -432,8 +506,8 @@
Node* right) {
ArmOperandGenerator g(selector);
InstructionCode opcode = kArmBic;
- InstructionOperand* value_operand;
- InstructionOperand* shift_operand;
+ InstructionOperand value_operand;
+ InstructionOperand shift_operand;
if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
value_operand, shift_operand);
@@ -447,8 +521,8 @@
void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
uint32_t lsb, uint32_t width) {
- DCHECK_LE(1, width);
- DCHECK_LE(width, 32 - lsb);
+ DCHECK_LE(1u, width);
+ DCHECK_LE(width, 32u - lsb);
ArmOperandGenerator g(selector);
selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
g.TempImmediate(lsb), g.TempImmediate(width));
@@ -480,7 +554,7 @@
uint32_t msb = base::bits::CountLeadingZeros32(value);
// Try to interpret this AND as UBFX.
if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
- DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
if (m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 31)) {
@@ -534,8 +608,8 @@
Int32BinopMatcher m(node);
if (m.right().Is(-1)) {
InstructionCode opcode = kArmMvn;
- InstructionOperand* value_operand;
- InstructionOperand* shift_operand;
+ InstructionOperand value_operand;
+ InstructionOperand shift_operand;
if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
&shift_operand)) {
Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
@@ -549,15 +623,16 @@
}
+namespace {
+
template <typename TryMatchShift>
-static inline void VisitShift(InstructionSelector* selector, Node* node,
- TryMatchShift try_match_shift,
- FlagsContinuation* cont) {
+void VisitShift(InstructionSelector* selector, Node* node,
+ TryMatchShift try_match_shift, FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
InstructionCode opcode = kArmMov;
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 2;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
@@ -572,25 +647,26 @@
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
template <typename TryMatchShift>
-static inline void VisitShift(InstructionSelector* selector, Node* node,
+void VisitShift(InstructionSelector* selector, Node* node,
TryMatchShift try_match_shift) {
FlagsContinuation cont;
VisitShift(selector, node, try_match_shift, &cont);
}
+} // namespace
+
void InstructionSelector::VisitWord32Shl(Node* node) {
VisitShift(this, node, TryMatchLSL);
@@ -602,7 +678,7 @@
Int32BinopMatcher m(node);
if (IsSupported(ARMv7) && m.left().IsWord32And() &&
m.right().IsInRange(0, 31)) {
- int32_t lsb = m.right().Value();
+ uint32_t lsb = m.right().Value();
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
uint32_t value = (mleft.right().Value() >> lsb) << lsb;
@@ -642,6 +718,17 @@
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kArmClz, node);
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -791,59 +878,24 @@
return;
}
}
- Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseRegister(m.right().node()));
+ VisitRRR(this, kArmMul, node);
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmSmmul, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
+ VisitRRR(this, kArmSmmul, node);
}
void InstructionSelector::VisitUint32MulHigh(Node* node) {
ArmOperandGenerator g(this);
- InstructionOperand* outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
- InstructionOperand* inputs[] = {g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1))};
+ InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1))};
Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
-static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
- ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
- InstructionOperand* result_operand,
- InstructionOperand* left_operand,
- InstructionOperand* right_operand) {
- ArmOperandGenerator g(selector);
- if (selector->IsSupported(SUDIV)) {
- selector->Emit(div_opcode, result_operand, left_operand, right_operand);
- return;
- }
- InstructionOperand* left_double_operand = g.TempDoubleRegister();
- InstructionOperand* right_double_operand = g.TempDoubleRegister();
- InstructionOperand* result_double_operand = g.TempDoubleRegister();
- selector->Emit(f64i32_opcode, left_double_operand, left_operand);
- selector->Emit(f64i32_opcode, right_double_operand, right_operand);
- selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
- right_double_operand);
- selector->Emit(i32f64_opcode, result_operand, result_double_operand);
-}
-
-
-static void VisitDiv(InstructionSelector* selector, Node* node,
- ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
- ArchOpcode i32f64_opcode) {
- ArmOperandGenerator g(selector);
- Int32BinopMatcher m(node);
- EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
- g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseRegister(m.right().node()));
-}
-
-
void InstructionSelector::VisitInt32Div(Node* node) {
VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
}
@@ -854,28 +906,6 @@
}
-static void VisitMod(InstructionSelector* selector, Node* node,
- ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
- ArchOpcode i32f64_opcode) {
- ArmOperandGenerator g(selector);
- Int32BinopMatcher m(node);
- InstructionOperand* div_operand = g.TempRegister();
- InstructionOperand* result_operand = g.DefineAsRegister(node);
- InstructionOperand* left_operand = g.UseRegister(m.left().node());
- InstructionOperand* right_operand = g.UseRegister(m.right().node());
- EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
- left_operand, right_operand);
- if (selector->IsSupported(MLS)) {
- selector->Emit(kArmMls, result_operand, div_operand, right_operand,
- left_operand);
- return;
- }
- InstructionOperand* mul_operand = g.TempRegister();
- selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
- selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
-}
-
-
void InstructionSelector::VisitInt32Mod(Node* node) {
VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
}
@@ -887,47 +917,80 @@
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmVcvtF64F32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArmVcvtF64F32, node);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArmVcvtF64S32, node);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmVcvtF64U32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArmVcvtF64U32, node);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArmVcvtS32F64, node);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArmVcvtU32F64, node);
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ VisitRR(this, kArmVcvtF32F64, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kArmVcvtS32F64, node);
+ }
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kArmVmovLowU32F64, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArmVcvtF32F64, g.DefineAsRegister(node),
+ Emit(kArmVmovLowF64U32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ Float32BinopMatcher mleft(m.left().node());
+ Emit(kArmVmlaF32, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ Float32BinopMatcher mright(m.right().node());
+ Emit(kArmVmlaF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRR(this, kArmVaddF32, node);
+}
+
+
void InstructionSelector::VisitFloat64Add(Node* node) {
ArmOperandGenerator g(this);
Float64BinopMatcher m(node);
@@ -945,7 +1008,26 @@
g.UseRegister(mright.right().node()));
return;
}
- VisitRRRFloat64(this, kArmVaddF64, node);
+ VisitRRR(this, kArmVaddF64, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ Emit(kArmVnegF32, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ Float32BinopMatcher mright(m.right().node());
+ Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRR(this, kArmVsubF32, node);
}
@@ -953,6 +1035,18 @@
ArmOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero()) {
+ if (m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kArmVrintpF64, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
Emit(kArmVnegF64, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
return;
@@ -964,17 +1058,27 @@
g.UseRegister(mright.right().node()));
return;
}
- VisitRRRFloat64(this, kArmVsubF64, node);
+ VisitRRR(this, kArmVsubF64, node);
+}
+
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kArmVmulF32, node);
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
- VisitRRRFloat64(this, kArmVmulF64, node);
+ VisitRRR(this, kArmVmulF64, node);
+}
+
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kArmVdivF32, node);
}
void InstructionSelector::VisitFloat64Div(Node* node) {
- VisitRRRFloat64(this, kArmVdivF64, node);
+ VisitRRR(this, kArmVdivF64, node);
}
@@ -985,104 +1089,169 @@
}
+void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kArmVabsF32, node);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kArmVabsF64, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kArmVsqrtF32, node);
+}
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArmVsqrtF64, node);
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVfloorF64, node);
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kArmVrintmF32, node);
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVceilF64, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kArmVrintmF64, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kArmVrintpF32, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kArmVrintpF64, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kArmVrintzF32, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVroundTruncateF64, node);
+ VisitRR(this, kArmVrintzF64, node);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVroundTiesAwayF64, node);
+ VisitRR(this, kArmVrintaF64, node);
}
-void InstructionSelector::VisitCall(Node* node) {
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kArmVrintnF32, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kArmVrintnF64, node);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
ArmOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- FrameStateDescriptor* frame_state_descriptor = NULL;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM64 it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // TODO(dcarney): might be possible to use claim/poke instead
- // Push any stack arguments.
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- Emit(kArmPush, NULL, g.UseRegister(*input));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
+ // Poke any stack arguments.
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ int slot = static_cast<int>(n);
+ Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
+ g.UseRegister(input.node()));
+ }
}
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
+ } else {
+ // Push any stack arguments.
+ for (PushParameter input : base::Reversed(*arguments)) {
+ // Skip any alignment holes in pushed nodes.
+ if (input.node() == nullptr) continue;
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node()));
+ }
}
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- InstructionOperand** first_output =
- buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), first_output,
- buffer.instruction_args.size(), &buffer.instruction_args.front());
- call_instr->MarkAsCall();
}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+
namespace {
-// Shared routine for multiple float compare operations.
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ }
+}
+
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ if (m.right().Is(0.0f)) {
+ VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()), cont);
+ } else if (m.left().Is(0.0f)) {
+ cont->Commute();
+ VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.right().node()),
+ g.UseImmediate(m.left().node()), cont);
+ } else {
+ VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()), cont);
+ }
+}
+
+
+// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Float64BinopMatcher m(node);
- if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kArmVcmpF64), nullptr,
- g.UseRegister(m.left().node()),
- g.UseRegister(m.right().node()), g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ if (m.right().Is(0.0)) {
+ VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()), cont);
+ } else if (m.left().Is(0.0)) {
+ cont->Commute();
+ VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.right().node()),
+ g.UseImmediate(m.left().node()), cont);
} else {
- DCHECK(cont->IsSet());
- selector->Emit(
- cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
- g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+ VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()), cont);
}
}
@@ -1092,9 +1261,9 @@
InstructionCode opcode, FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[5];
+ InstructionOperand inputs[5];
size_t input_count = 0;
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
size_t output_count = 0;
if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
@@ -1120,13 +1289,12 @@
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
+ DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -1166,26 +1334,35 @@
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kFloatLessThan);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont->OverwriteAndNegateIfEqual(kFloatLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
- Node* const result = node->FindProjection(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
if (!result || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
@@ -1228,11 +1405,10 @@
ArmOperandGenerator g(selector);
InstructionCode const opcode =
cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
- InstructionOperand* const value_operand = g.UseRegister(value);
+ InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, nullptr, value_operand, value_operand,
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
value_operand);
@@ -1249,6 +1425,34 @@
}
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ ArmOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
+ index_operand, value_operand, g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -1284,7 +1488,7 @@
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
}
@@ -1294,7 +1498,7 @@
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
}
@@ -1303,36 +1507,100 @@
}
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kFloatLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kFloatLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kFloatLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kFloatLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ VisitRR(this, kArmVmovLowU32F64, node);
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ VisitRR(this, kArmVmovHighU32F64, node);
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+ CanCover(node, left)) {
+ left = left->InputAt(1);
+ Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(right),
+ g.UseRegister(left));
+ return;
+ }
+ Emit(kArmVmovLowF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+ CanCover(node, left)) {
+ left = left->InputAt(1);
+ Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(left),
+ g.UseRegister(right));
+ return;
+ }
+ Emit(kArmVmovHighF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
-
if (CpuFeatures::IsSupported(ARMv8)) {
- flags |= MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kFloat64RoundTiesAway;
+ MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
return flags;
}
diff --git a/src/compiler/arm/linkage-arm.cc b/src/compiler/arm/linkage-arm.cc
deleted file mode 100644
index 3fca76f..0000000
--- a/src/compiler/arm/linkage-arm.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct ArmLinkageHelperTraits {
- static Register ReturnValueReg() { return r0; }
- static Register ReturnValue2Reg() { return r1; }
- static Register JSCallFunctionReg() { return r1; }
- static Register ContextReg() { return cp; }
- static Register RuntimeCallFunctionReg() { return r1; }
- static Register RuntimeCallArgCountReg() { return r0; }
- static RegList CCalleeSaveRegisters() {
- return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() |
- r10.bit();
- }
- static Register CRegisterParameter(int i) {
- static Register register_parameters[] = {r0, r1, r2, r3};
- return register_parameters[i];
- }
- static int CRegisterParametersLength() { return 4; }
-};
-
-
-typedef LinkageHelper<ArmLinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index e025236..d356195 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -4,12 +4,13 @@
#include "src/compiler/code-generator.h"
+#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/scopes.h"
+#include "src/compiler/osr.h"
namespace v8 {
namespace internal {
@@ -19,38 +20,60 @@
// Adds Arm64-specific methods to convert InstructionOperands.
-class Arm64OperandConverter FINAL : public InstructionOperandConverter {
+class Arm64OperandConverter final : public InstructionOperandConverter {
public:
Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- DoubleRegister InputFloat32Register(int index) {
+ DoubleRegister InputFloat32Register(size_t index) {
return InputDoubleRegister(index).S();
}
- DoubleRegister InputFloat64Register(int index) {
+ DoubleRegister InputFloat64Register(size_t index) {
return InputDoubleRegister(index);
}
+ size_t OutputCount() { return instr_->OutputCount(); }
+
DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
- Register InputRegister32(int index) {
+ Register InputRegister32(size_t index) {
return ToRegister(instr_->InputAt(index)).W();
}
- Register InputRegister64(int index) { return InputRegister(index); }
+ Register InputOrZeroRegister32(size_t index) {
+ DCHECK(instr_->InputAt(index)->IsRegister() ||
+ (instr_->InputAt(index)->IsImmediate() && (InputInt32(index) == 0)));
+ if (instr_->InputAt(index)->IsImmediate()) {
+ return wzr;
+ }
+ return InputRegister32(index);
+ }
- Operand InputImmediate(int index) {
+ Register InputRegister64(size_t index) { return InputRegister(index); }
+
+ Register InputOrZeroRegister64(size_t index) {
+ DCHECK(instr_->InputAt(index)->IsRegister() ||
+ (instr_->InputAt(index)->IsImmediate() && (InputInt64(index) == 0)));
+ if (instr_->InputAt(index)->IsImmediate()) {
+ return xzr;
+ }
+ return InputRegister64(index);
+ }
+
+ Operand InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
- Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+ Operand InputOperand(size_t index) {
+ return ToOperand(instr_->InputAt(index));
+ }
- Operand InputOperand64(int index) { return InputOperand(index); }
+ Operand InputOperand64(size_t index) { return InputOperand(index); }
- Operand InputOperand32(int index) {
+ Operand InputOperand32(size_t index) {
return ToOperand32(instr_->InputAt(index));
}
@@ -58,7 +81,7 @@
Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
- Operand InputOperand2_32(int index) {
+ Operand InputOperand2_32(size_t index) {
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
return InputOperand32(index);
@@ -70,6 +93,14 @@
return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
case kMode_Operand2_R_ROR_I:
return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
+ case kMode_Operand2_R_UXTB:
+ return Operand(InputRegister32(index), UXTB);
+ case kMode_Operand2_R_UXTH:
+ return Operand(InputRegister32(index), UXTH);
+ case kMode_Operand2_R_SXTB:
+ return Operand(InputRegister32(index), SXTB);
+ case kMode_Operand2_R_SXTH:
+ return Operand(InputRegister32(index), SXTH);
case kMode_MRI:
case kMode_MRR:
break;
@@ -78,7 +109,7 @@
return Operand(-1);
}
- Operand InputOperand2_64(int index) {
+ Operand InputOperand2_64(size_t index) {
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
return InputOperand64(index);
@@ -90,6 +121,14 @@
return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
case kMode_Operand2_R_ROR_I:
return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
+ case kMode_Operand2_R_UXTB:
+ return Operand(InputRegister64(index), UXTB);
+ case kMode_Operand2_R_UXTH:
+ return Operand(InputRegister64(index), UXTH);
+ case kMode_Operand2_R_SXTB:
+ return Operand(InputRegister64(index), SXTB);
+ case kMode_Operand2_R_SXTH:
+ return Operand(InputRegister64(index), SXTH);
case kMode_MRI:
case kMode_MRR:
break;
@@ -98,14 +137,18 @@
return Operand(-1);
}
- MemOperand MemoryOperand(int* first_index) {
- const int index = *first_index;
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
case kMode_Operand2_R_LSL_I:
case kMode_Operand2_R_LSR_I:
case kMode_Operand2_R_ASR_I:
case kMode_Operand2_R_ROR_I:
+ case kMode_Operand2_R_UXTB:
+ case kMode_Operand2_R_UXTH:
+ case kMode_Operand2_R_SXTB:
+ case kMode_Operand2_R_SXTH:
break;
case kMode_MRI:
*first_index += 2;
@@ -118,7 +161,7 @@
return MemOperand(no_reg);
}
- MemOperand MemoryOperand(int first_index = 0) {
+ MemOperand MemoryOperand(size_t first_index = 0) {
return MemoryOperand(&first_index);
}
@@ -162,12 +205,21 @@
}
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
- DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
+ if (offset.from_frame_pointer()) {
+ int from_sp =
+ offset.offset() +
+ ((frame()->GetSpToFpSlotCount() + frame_access_state()->sp_delta()) *
+ kPointerSize);
+ // Convert FP-offsets to SP-offsets if it results in better code.
+ if (Assembler::IsImmLSUnscaled(from_sp) ||
+ Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
+ offset = FrameOffset::FromStackPointer(from_sp);
+ }
+ }
return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
offset.offset());
}
@@ -176,12 +228,12 @@
namespace {
-class OutOfLineLoadNaN32 FINAL : public OutOfLineCode {
+class OutOfLineLoadNaN32 final : public OutOfLineCode {
public:
OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
}
@@ -190,12 +242,12 @@
};
-class OutOfLineLoadNaN64 FINAL : public OutOfLineCode {
+class OutOfLineLoadNaN64 final : public OutOfLineCode {
public:
OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
}
@@ -204,17 +256,110 @@
};
-class OutOfLineLoadZero FINAL : public OutOfLineCode {
+class OutOfLineLoadZero final : public OutOfLineCode {
public:
OutOfLineLoadZero(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL { __ Mov(result_, 0); }
+ void Generate() final { __ Mov(result_, 0); }
private:
Register const result_;
};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlagClear(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ Add(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
+Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kFloatLessThanOrUnordered:
+ return lt;
+ case kFloatGreaterThanOrEqual:
+ return ge;
+ case kFloatLessThanOrEqual:
+ return ls;
+ case kFloatGreaterThanOrUnordered:
+ return hi;
+ case kFloatLessThan:
+ return lo;
+ case kFloatGreaterThanOrEqualOrUnordered:
+ return hs;
+ case kFloatLessThanOrEqualOrUnordered:
+ return le;
+ case kFloatGreaterThan:
+ return gt;
+ case kOverflow:
+ return vs;
+ case kNotOverflow:
+ return vc;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ }
+ UNREACHABLE();
+ return nv;
+}
+
} // namespace
@@ -246,6 +391,20 @@
} while (0)
+#define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto buffer = i.InputRegister(0); \
+ auto offset = i.InputRegister32(1); \
+ auto length = i.InputOperand32(2); \
+ __ Cmp(offset, length); \
+ auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+ __ B(hs, ool->entry()); \
+ __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
+ __ Bind(ool->exit()); \
+ } while (0)
+
+
#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
do { \
auto buffer = i.InputRegister(0); \
@@ -274,18 +433,57 @@
} while (0)
-#define ASSEMBLE_SHIFT(asm_instr, width) \
- do { \
- if (instr->InputAt(1)->IsRegister()) { \
- __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
- i.InputRegister##width(1)); \
- } else { \
- int64_t imm = i.InputOperand##width(1).immediate().value(); \
- __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
- } \
+#define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr) \
+ do { \
+ auto buffer = i.InputRegister(0); \
+ auto offset = i.InputRegister32(1); \
+ auto length = i.InputOperand32(2); \
+ auto value = i.InputRegister(3); \
+ __ Cmp(offset, length); \
+ Label done; \
+ __ B(hs, &done); \
+ __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
+ __ Bind(&done); \
} while (0)
+#define ASSEMBLE_SHIFT(asm_instr, width) \
+ do { \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
+ i.InputRegister##width(1)); \
+ } else { \
+ uint32_t imm = \
+ static_cast<uint32_t>(i.InputOperand##width(1).ImmediateValue()); \
+ __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
+ imm % (width)); \
+ } \
+ } while (0)
+
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ Drop(sp_slot_delta);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Claim(-sp_slot_delta);
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
@@ -301,7 +499,22 @@
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Call(target);
}
- AddSafepointAndDeopt(instr);
+ frame_access_state()->ClearSPDelta();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ Register target = i.InputRegister(0);
+ __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(target);
+ }
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -317,59 +530,165 @@
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(x10);
- AddSafepointAndDeopt(instr);
+ frame_access_state()->ClearSPDelta();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireX();
+ __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, temp);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Jump(x10);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchPrepareCallCFunction:
+ // We don't need kArchPrepareCallCFunction on arm64 as the instruction
+ // selector already perform a Claim to reserve space on the stack and
+ // guarantee correct alignment of stack pointer.
+ UNREACHABLE();
+ break;
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters, 0);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters, 0);
+ }
+ // CallCFunction only supports register arguments so we never need to call
+ // frame()->ClearOutgoingParameterSlots() here.
+ DCHECK(frame_access_state()->sp_delta() == 0);
break;
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), masm()->StackPointer());
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
- case kArm64Float64Ceil:
- __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ Str(value, MemOperand(object, index));
+ __ CheckPageFlagSet(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ ool->entry());
+ __ Bind(ool->exit());
break;
- case kArm64Float64Floor:
+ }
+ case kArm64Float32RoundDown:
+ __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float64RoundDown:
__ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArm64Float64RoundTruncate:
- __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ case kArm64Float32RoundUp:
+ __ Frintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float64RoundUp:
+ __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Float64RoundTiesAway:
__ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kArm64Float32RoundTruncate:
+ __ Frintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float64RoundTruncate:
+ __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kArm64Float32RoundTiesEven:
+ __ Frintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float64RoundTiesEven:
+ __ Frintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Add:
- __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Adds(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ } else {
+ __ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ }
break;
case kArm64Add32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
- __ Adds(i.OutputRegister32(), i.InputRegister32(0),
+ __ Adds(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
} else {
- __ Add(i.OutputRegister32(), i.InputRegister32(0),
+ __ Add(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
}
break;
case kArm64And:
- __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64And32:
- __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Bic:
- __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Bic(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Bic32:
- __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Bic(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -447,52 +766,59 @@
i.InputRegister32(0));
break;
}
- // TODO(dcarney): use mvn instr??
case kArm64Not:
- __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
+ __ Mvn(i.OutputRegister(), i.InputOperand(0));
break;
case kArm64Not32:
- __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
- break;
- case kArm64Neg:
- __ Neg(i.OutputRegister(), i.InputOperand(0));
- break;
- case kArm64Neg32:
- __ Neg(i.OutputRegister32(), i.InputOperand32(0));
+ __ Mvn(i.OutputRegister32(), i.InputOperand32(0));
break;
case kArm64Or:
- __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Orr(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Or32:
- __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Orr(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Orn:
- __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Orn(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Orn32:
- __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Orn(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Eor:
- __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Eor(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Eor32:
- __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Eor(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Eon:
- __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Eon(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Eon32:
- __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Eon(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Sub:
- __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Subs(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ } else {
+ __ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ }
break;
case kArm64Sub32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
- __ Subs(i.OutputRegister32(), i.InputRegister32(0),
+ __ Subs(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
} else {
- __ Sub(i.OutputRegister32(), i.InputRegister32(0),
+ __ Sub(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
}
break;
@@ -532,13 +858,25 @@
case kArm64Sxtw:
__ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;
+ case kArm64Sbfx32:
+ __ Sbfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
+ i.InputInt5(2));
+ break;
case kArm64Ubfx:
- __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
+ __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1),
+ i.InputInt6(2));
break;
case kArm64Ubfx32:
- __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
- i.InputInt8(2));
+ __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
+ i.InputInt5(2));
+ break;
+ case kArm64Ubfiz32:
+ __ Ubfiz(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
+ i.InputInt5(2));
+ break;
+ case kArm64Bfi:
+ __ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2),
+ i.InputInt6(3));
break;
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
@@ -547,39 +885,48 @@
case kArm64CompareAndBranch32:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
- case kArm64Claim: {
- int words = MiscField::decode(instr->opcode());
- __ Claim(words);
+ case kArm64ClaimForCallArguments: {
+ __ Claim(i.InputInt32(0));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0));
break;
}
case kArm64Poke: {
- int slot = MiscField::decode(instr->opcode());
- Operand operand(slot * kPointerSize);
- __ Poke(i.InputRegister(0), operand);
- break;
- }
- case kArm64PokePairZero: {
- // TODO(dcarney): test slot offset and register order.
- int slot = MiscField::decode(instr->opcode()) - 1;
- __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
+ Operand operand(i.InputInt32(1) * kPointerSize);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Poke(i.InputFloat64Register(0), operand);
+ } else {
+ __ Poke(i.InputRegister(0), operand);
+ }
break;
}
case kArm64PokePair: {
- int slot = MiscField::decode(instr->opcode()) - 1;
- __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
+ int slot = i.InputInt32(2) - 1;
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
+ slot * kPointerSize);
+ } else {
+ __ PokePair(i.InputRegister(1), i.InputRegister(0),
+ slot * kPointerSize);
+ }
break;
}
+ case kArm64Clz:
+ __ Clz(i.OutputRegister64(), i.InputRegister64(0));
+ break;
+ case kArm64Clz32:
+ __ Clz(i.OutputRegister32(), i.InputRegister32(0));
+ break;
case kArm64Cmp:
- __ Cmp(i.InputRegister(0), i.InputOperand(1));
+ __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
break;
case kArm64Cmp32:
- __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
+ __ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Cmn:
- __ Cmn(i.InputRegister(0), i.InputOperand(1));
+ __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand(1));
break;
case kArm64Cmn32:
- __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
+ __ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Tst:
__ Tst(i.InputRegister(0), i.InputOperand(1));
@@ -587,8 +934,59 @@
case kArm64Tst32:
__ Tst(i.InputRegister32(0), i.InputOperand32(1));
break;
+ case kArm64Float32Cmp:
+ if (instr->InputAt(1)->IsDoubleRegister()) {
+ __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
+ } else {
+ DCHECK(instr->InputAt(1)->IsImmediate());
+ // 0.0 is the only immediate supported by fcmp instructions.
+ DCHECK(i.InputFloat32(1) == 0.0f);
+ __ Fcmp(i.InputFloat32Register(0), i.InputFloat32(1));
+ }
+ break;
+ case kArm64Float32Add:
+ __ Fadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ break;
+ case kArm64Float32Sub:
+ __ Fsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ break;
+ case kArm64Float32Mul:
+ __ Fmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ break;
+ case kArm64Float32Div:
+ __ Fdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ break;
+ case kArm64Float32Max:
+ // (b < a) ? a : b
+ __ Fcmp(i.InputFloat32Register(1), i.InputFloat32Register(0));
+ __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1), lo);
+ break;
+ case kArm64Float32Min:
+ // (a < b) ? a : b
+ __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
+ __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1), lo);
+ break;
+ case kArm64Float32Abs:
+ __ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float32Sqrt:
+ __ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArm64Float64Cmp:
- __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ if (instr->InputAt(1)->IsDoubleRegister()) {
+ __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ DCHECK(instr->InputAt(1)->IsImmediate());
+ // 0.0 is the only immediate supported by fcmp instructions.
+ DCHECK(i.InputDouble(1) == 0.0);
+ __ Fcmp(i.InputDoubleRegister(0), i.InputDouble(1));
+ }
break;
case kArm64Float64Add:
__ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -617,6 +1015,24 @@
0, 2);
break;
}
+ case kArm64Float64Max:
+ // (b < a) ? a : b
+ __ Fcmp(i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), lo);
+ break;
+ case kArm64Float64Min:
+ // (a < b) ? a : b
+ __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), lo);
+ break;
+ case kArm64Float64Abs:
+ __ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kArm64Float64Neg:
+ __ Fneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Float64Sqrt:
__ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
@@ -632,12 +1048,101 @@
case kArm64Float64ToUint32:
__ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
break;
+ case kArm64Float32ToInt64:
+ __ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0));
+ if (i.OutputCount() > 1) {
+ __ Mov(i.OutputRegister(1), 1);
+ Label done;
+ __ Cmp(i.OutputRegister(0), 1);
+ __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
+ __ Fccmp(i.InputFloat32Register(0), i.InputFloat32Register(0), VFlag,
+ vc);
+ __ B(vc, &done);
+ __ Fcmp(i.InputFloat32Register(0), static_cast<float>(INT64_MIN));
+ __ Cset(i.OutputRegister(1), eq);
+ __ Bind(&done);
+ }
+ break;
+ case kArm64Float64ToInt64:
+ __ Fcvtzs(i.OutputRegister(0), i.InputDoubleRegister(0));
+ if (i.OutputCount() > 1) {
+ __ Mov(i.OutputRegister(1), 1);
+ Label done;
+ __ Cmp(i.OutputRegister(0), 1);
+ __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
+ __ Fccmp(i.InputDoubleRegister(0), i.InputDoubleRegister(0), VFlag, vc);
+ __ B(vc, &done);
+ __ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT64_MIN));
+ __ Cset(i.OutputRegister(1), eq);
+ __ Bind(&done);
+ }
+ break;
+ case kArm64Float32ToUint64:
+ __ Fcvtzu(i.OutputRegister64(), i.InputFloat32Register(0));
+ if (i.OutputCount() > 1) {
+ __ Fcmp(i.InputFloat32Register(0), -1.0);
+ __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
+ __ Cset(i.OutputRegister(1), ne);
+ }
+ break;
+ case kArm64Float64ToUint64:
+ __ Fcvtzu(i.OutputRegister64(), i.InputDoubleRegister(0));
+ if (i.OutputCount() > 1) {
+ __ Fcmp(i.InputDoubleRegister(0), -1.0);
+ __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
+ __ Cset(i.OutputRegister(1), ne);
+ }
+ break;
case kArm64Int32ToFloat64:
__ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
+ case kArm64Int64ToFloat32:
+ __ Scvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
+ break;
+ case kArm64Int64ToFloat64:
+ __ Scvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
+ break;
case kArm64Uint32ToFloat64:
__ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
+ case kArm64Uint64ToFloat32:
+ __ Ucvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
+ break;
+ case kArm64Uint64ToFloat64:
+ __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
+ break;
+ case kArm64Float64ExtractLowWord32:
+ __ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float64ExtractHighWord32:
+ // TODO(arm64): This should use MOV (to general) when NEON is supported.
+ __ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
+ __ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
+ break;
+ case kArm64Float64InsertLowWord32: {
+ // TODO(arm64): This should use MOV (from general) when NEON is supported.
+ UseScratchRegisterScope scope(masm());
+ Register tmp = scope.AcquireX();
+ __ Fmov(tmp, i.InputFloat64Register(0));
+ __ Bfi(tmp, i.InputRegister(1), 0, 32);
+ __ Fmov(i.OutputFloat64Register(), tmp);
+ break;
+ }
+ case kArm64Float64InsertHighWord32: {
+ // TODO(arm64): This should use MOV (from general) when NEON is supported.
+ UseScratchRegisterScope scope(masm());
+ Register tmp = scope.AcquireX();
+ __ Fmov(tmp.W(), i.InputFloat32Register(0));
+ __ Bfi(tmp, i.InputRegister(1), 32, 32);
+ __ Fmov(i.OutputFloat64Register(), tmp);
+ break;
+ }
+ case kArm64Float64MoveU64:
+ __ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
+ break;
+ case kArm64U64MoveFloat64:
+ __ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
break;
@@ -680,29 +1185,6 @@
case kArm64StrD:
__ Str(i.InputDoubleRegister(2), i.MemoryOperand());
break;
- case kArm64StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ Add(index, object, Operand(index, SXTW));
- __ Str(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- // TODO(dcarney): we shouldn't test write barriers from c calls.
- LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
- UseScratchRegisterScope scope(masm());
- Register temp = no_reg;
- if (csp.is(masm()->StackPointer())) {
- temp = scope.AcquireX();
- lr_status = kLRHasBeenSaved;
- __ Push(lr, temp); // Need to push a pair
- }
- __ RecordWrite(object, index, value, lr_status, mode);
- if (csp.is(masm()->StackPointer())) {
- __ Pop(temp, lr);
- }
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
break;
@@ -718,6 +1200,9 @@
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
break;
+ case kCheckedLoadWord64:
+ ASSEMBLE_CHECKED_LOAD_INTEGER_64(Ldr);
+ break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(32);
break;
@@ -733,6 +1218,9 @@
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(Str);
break;
+ case kCheckedStoreWord64:
+ ASSEMBLE_CHECKED_STORE_INTEGER_64(Str);
+ break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(32);
break;
@@ -740,7 +1228,7 @@
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
}
-}
+} // NOLINT(readability/fn_size)
// Assemble branches after this instruction.
@@ -785,71 +1273,14 @@
UNREACHABLE();
}
} else {
- switch (condition) {
- case kUnorderedEqual:
- // The "eq" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kEqual:
- __ B(eq, tlabel);
- break;
- case kUnorderedNotEqual:
- // Unordered or not equal can be tested with "ne" condtion.
- // See ARMv8 manual C1.2.3 - Condition Code.
- case kNotEqual:
- __ B(ne, tlabel);
- break;
- case kSignedLessThan:
- __ B(lt, tlabel);
- break;
- case kSignedGreaterThanOrEqual:
- __ B(ge, tlabel);
- break;
- case kSignedLessThanOrEqual:
- __ B(le, tlabel);
- break;
- case kSignedGreaterThan:
- __ B(gt, tlabel);
- break;
- case kUnorderedLessThan:
- // The "lo" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kUnsignedLessThan:
- __ B(lo, tlabel);
- break;
- case kUnorderedGreaterThanOrEqual:
- // Unordered, greater than or equal can be tested with "hs" condtion.
- // See ARMv8 manual C1.2.3 - Condition Code.
- case kUnsignedGreaterThanOrEqual:
- __ B(hs, tlabel);
- break;
- case kUnorderedLessThanOrEqual:
- // The "ls" condition will not catch the unordered case.
- // The jump/fall through to false label will be used if the comparison
- // was unordered.
- case kUnsignedLessThanOrEqual:
- __ B(ls, tlabel);
- break;
- case kUnorderedGreaterThan:
- // Unordered or greater than can be tested with "hi" condtion.
- // See ARMv8 manual C1.2.3 - Condition Code.
- case kUnsignedGreaterThan:
- __ B(hi, tlabel);
- break;
- case kOverflow:
- __ B(vs, tlabel);
- break;
- case kNotOverflow:
- __ B(vc, tlabel);
- break;
- }
+ Condition cc = FlagsConditionToCondition(condition);
+ __ B(cc, tlabel);
}
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
}
@@ -858,165 +1289,178 @@
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
Arm64OperandConverter i(this, instr);
- Label done;
// Materialize a full 64-bit 1 or 0 value. The result register is always the
// last output of the instruction.
- Label check;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cc = nv;
- switch (condition) {
- case kUnorderedEqual:
- __ B(vc, &check);
- __ Mov(reg, 0);
- __ B(&done);
- // Fall through.
- case kEqual:
- cc = eq;
- break;
- case kUnorderedNotEqual:
- __ B(vc, &check);
- __ Mov(reg, 1);
- __ B(&done);
- // Fall through.
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnorderedLessThan:
- __ B(vc, &check);
- __ Mov(reg, 0);
- __ B(&done);
- // Fall through.
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnorderedGreaterThanOrEqual:
- __ B(vc, &check);
- __ Mov(reg, 1);
- __ B(&done);
- // Fall through.
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnorderedLessThanOrEqual:
- __ B(vc, &check);
- __ Mov(reg, 0);
- __ B(&done);
- // Fall through.
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnorderedGreaterThan:
- __ B(vc, &check);
- __ Mov(reg, 1);
- __ B(&done);
- // Fall through.
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- case kOverflow:
- cc = vs;
- break;
- case kNotOverflow:
- cc = vc;
- break;
- }
- __ Bind(&check);
+ Condition cc = FlagsConditionToCondition(condition);
__ Cset(reg, cc);
- __ Bind(&done);
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ Arm64OperandConverter i(this, instr);
+ Register input = i.InputRegister32(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ Cmp(input, i.InputInt32(index + 0));
+ __ B(eq, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ Arm64OperandConverter i(this, instr);
+ UseScratchRegisterScope scope(masm());
+ Register input = i.InputRegister32(0);
+ Register temp = scope.AcquireX();
+ size_t const case_count = instr->InputCount() - 2;
+ Label table;
+ __ Cmp(input, case_count);
+ __ B(hs, GetLabel(i.InputRpo(1)));
+ __ Adr(temp, &table);
+ __ Add(temp, temp, Operand(input, UXTW, 2));
+ __ Br(temp);
+ __ StartBlockPools();
+ __ Bind(&table);
+ for (size_t index = 0; index < case_count; ++index) {
+ __ B(GetLabel(i.InputRpo(index + 2)));
+ }
+ __ EndBlockPools();
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
-// TODO(dcarney): increase stack slots in frame once before first use.
-static int AlignedStackSlots(int stack_slots) {
- if (stack_slots & 1) stack_slots++;
- return stack_slots;
-}
-
-
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ SetStackPointer(csp);
__ Push(lr, fp);
__ Mov(fp, csp);
- // TODO(dcarney): correct callee saved registers.
- __ PushCalleeSavedRegisters();
- frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
__ SetStackPointer(jssp);
- __ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
- __ SetStackPointer(jssp);
- __ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
- }
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- Register sp = __ StackPointer();
- if (!sp.Is(csp)) {
- __ Sub(sp, sp, stack_slots * kPointerSize);
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
+ if (descriptor->UseNativeStack()) {
+ __ SetStackPointer(csp);
+ } else {
+ __ SetStackPointer(jssp);
}
- __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+ __ StubPrologue();
+ } else {
+ if (descriptor->UseNativeStack()) {
+ __ SetStackPointer(csp);
+ } else {
+ __ SetStackPointer(jssp);
+ }
+ frame()->SetElidedFrameSizeInSlots(0);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ // If frame()->needs_frame() is false, then
+ // frame()->AlignSavedCalleeRegisterSlots() is guaranteed to return 0.
+ if (csp.Is(masm()->StackPointer()) && frame()->needs_frame()) {
+ // The system stack pointer requires 16-byte alignment at function call
+ // boundaries.
+
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ __ Claim(stack_shrink_slots);
+
+ // Save FP registers.
+ CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ descriptor->CalleeSavedFPRegisters());
+ int saved_count = saves_fp.Count();
+ if (saved_count != 0) {
+ DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
+ __ PushCPURegList(saves_fp);
+ frame()->AllocateSavedCalleeRegisterSlots(saved_count *
+ (kDoubleSize / kPointerSize));
+ }
+ // Save registers.
+ // TODO(palfia): TF save list is not in sync with
+ // CPURegList::GetCalleeSaved(): x30 is missing.
+ // DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
+ CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
+ descriptor->CalleeSavedRegisters());
+ saved_count = saves.Count();
+ if (saved_count != 0) {
+ __ PushCPURegList(saves);
+ frame()->AllocateSavedCalleeRegisterSlots(saved_count);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
- }
- // Restore registers.
- // TODO(dcarney): correct callee saved registers.
- __ PopCalleeSavedRegisters();
- }
+
+ // Restore registers.
+ CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
+ descriptor->CalleeSavedRegisters());
+ if (saves.Count() != 0) {
+ __ PopCPURegList(saves);
+ }
+
+ // Restore fp registers.
+ CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ descriptor->CalleeSavedFPRegisters());
+ if (saves_fp.Count() != 0) {
+ __ PopCPURegList(saves_fp);
+ }
+
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ if (descriptor->IsCFunctionCall()) {
__ Mov(csp, fp);
__ Pop(fp, lr);
- __ Ret();
- } else {
- __ Mov(jssp, fp);
- __ Pop(fp, lr);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ Drop(pop_count);
- __ Ret();
+ } else if (frame()->needs_frame()) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ B(&return_label_);
+ return;
+ } else {
+ __ Bind(&return_label_);
+ if (descriptor->UseNativeStack()) {
+ __ Mov(csp, fp);
+ } else {
+ __ Mov(jssp, fp);
+ }
+ __ Pop(fp, lr);
+ }
+ } else if (descriptor->UseNativeStack()) {
+ pop_count += (pop_count & 1);
}
+ __ Drop(pop_count);
+ __ Ret();
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- Arm64OperandConverter g(this, NULL);
+ Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1045,7 +1489,16 @@
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: scope.AcquireX();
if (src.type() == Constant::kHeapObject) {
- __ LoadObject(dst, src.ToHeapObject());
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ int offset;
+ if (IsMaterializableFromFrame(src_object, &offset)) {
+ __ Ldr(dst, MemOperand(fp, offset));
+ } else if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ LoadObject(dst, src_object);
+ }
} else {
__ Mov(dst, g.ToImmediate(source));
}
@@ -1104,7 +1557,7 @@
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- Arm64OperandConverter g(this, NULL);
+ Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1157,29 +1610,36 @@
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit ARM we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- intptr_t current_pc = masm()->pc_offset();
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ intptr_t current_pc = masm()->pc_offset();
- if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
- intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK((padding_size % kInstructionSize) == 0);
- InstructionAccurateScope instruction_accurate(
- masm(), padding_size / kInstructionSize);
+ if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+ intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK((padding_size % kInstructionSize) == 0);
+ InstructionAccurateScope instruction_accurate(
+ masm(), padding_size / kInstructionSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= kInstructionSize;
- }
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= kInstructionSize;
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
index 863451f..ef33348 100644
--- a/src/compiler/arm64/instruction-codes-arm64.h
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -18,6 +18,8 @@
V(Arm64And32) \
V(Arm64Bic) \
V(Arm64Bic32) \
+ V(Arm64Clz) \
+ V(Arm64Clz32) \
V(Arm64Cmp) \
V(Arm64Cmp32) \
V(Arm64Cmn) \
@@ -54,8 +56,6 @@
V(Arm64Umod32) \
V(Arm64Not) \
V(Arm64Not32) \
- V(Arm64Neg) \
- V(Arm64Neg32) \
V(Arm64Lsl) \
V(Arm64Lsl32) \
V(Arm64Lsr) \
@@ -68,32 +68,66 @@
V(Arm64Sxtb32) \
V(Arm64Sxth32) \
V(Arm64Sxtw) \
+ V(Arm64Sbfx32) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
+ V(Arm64Ubfiz32) \
+ V(Arm64Bfi) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
- V(Arm64Claim) \
+ V(Arm64ClaimForCallArguments) \
V(Arm64Poke) \
- V(Arm64PokePairZero) \
V(Arm64PokePair) \
+ V(Arm64Float32Cmp) \
+ V(Arm64Float32Add) \
+ V(Arm64Float32Sub) \
+ V(Arm64Float32Mul) \
+ V(Arm64Float32Div) \
+ V(Arm64Float32Max) \
+ V(Arm64Float32Min) \
+ V(Arm64Float32Abs) \
+ V(Arm64Float32Sqrt) \
+ V(Arm64Float32RoundDown) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
V(Arm64Float64Sub) \
V(Arm64Float64Mul) \
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
+ V(Arm64Float64Max) \
+ V(Arm64Float64Min) \
+ V(Arm64Float64Abs) \
+ V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \
- V(Arm64Float64Floor) \
- V(Arm64Float64Ceil) \
- V(Arm64Float64RoundTruncate) \
+ V(Arm64Float64RoundDown) \
+ V(Arm64Float32RoundUp) \
+ V(Arm64Float64RoundUp) \
V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float32RoundTruncate) \
+ V(Arm64Float64RoundTruncate) \
+ V(Arm64Float32RoundTiesEven) \
+ V(Arm64Float64RoundTiesEven) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float64ToInt32) \
V(Arm64Float64ToUint32) \
+ V(Arm64Float32ToInt64) \
+ V(Arm64Float64ToInt64) \
+ V(Arm64Float32ToUint64) \
+ V(Arm64Float64ToUint64) \
V(Arm64Int32ToFloat64) \
+ V(Arm64Int64ToFloat32) \
+ V(Arm64Int64ToFloat64) \
V(Arm64Uint32ToFloat64) \
+ V(Arm64Uint64ToFloat32) \
+ V(Arm64Uint64ToFloat64) \
+ V(Arm64Float64ExtractLowWord32) \
+ V(Arm64Float64ExtractHighWord32) \
+ V(Arm64Float64InsertLowWord32) \
+ V(Arm64Float64InsertHighWord32) \
+ V(Arm64Float64MoveU64) \
+ V(Arm64U64MoveFloat64) \
V(Arm64LdrS) \
V(Arm64StrS) \
V(Arm64LdrD) \
@@ -107,8 +141,7 @@
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
- V(Arm64Str) \
- V(Arm64StoreWriteBarrier)
+ V(Arm64Str)
// Addressing modes represent the "shape" of inputs to an instruction.
@@ -124,16 +157,20 @@
// I = immediate (handle, external, int32)
// MRI = [register + immediate]
// MRR = [register + register]
-#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MRI) /* [%r0 + K] */ \
- V(MRR) /* [%r0 + %r1] */ \
- V(Operand2_R_LSL_I) /* %r0 LSL K */ \
- V(Operand2_R_LSR_I) /* %r0 LSR K */ \
- V(Operand2_R_ASR_I) /* %r0 ASR K */ \
- V(Operand2_R_ROR_I) /* %r0 ROR K */
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */ \
+ V(Operand2_R_LSL_I) /* %r0 LSL K */ \
+ V(Operand2_R_LSR_I) /* %r0 LSR K */ \
+ V(Operand2_R_ASR_I) /* %r0 ASR K */ \
+ V(Operand2_R_ROR_I) /* %r0 ROR K */ \
+ V(Operand2_R_UXTB) /* %r0 UXTB (unsigned extend byte) */ \
+ V(Operand2_R_UXTH) /* %r0 UXTH (unsigned extend halfword) */ \
+ V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
+ V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */
-} // namespace internal
} // namespace compiler
+} // namespace internal
} // namespace v8
#endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
diff --git a/src/compiler/arm64/instruction-scheduler-arm64.cc b/src/compiler/arm64/instruction-scheduler-arm64.cc
new file mode 100644
index 0000000..eb358dd
--- /dev/null
+++ b/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -0,0 +1,224 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kArm64Add:
+ case kArm64Add32:
+ case kArm64And:
+ case kArm64And32:
+ case kArm64Bic:
+ case kArm64Bic32:
+ case kArm64Clz:
+ case kArm64Clz32:
+ case kArm64Cmp:
+ case kArm64Cmp32:
+ case kArm64Cmn:
+ case kArm64Cmn32:
+ case kArm64Tst:
+ case kArm64Tst32:
+ case kArm64Or:
+ case kArm64Or32:
+ case kArm64Orn:
+ case kArm64Orn32:
+ case kArm64Eor:
+ case kArm64Eor32:
+ case kArm64Eon:
+ case kArm64Eon32:
+ case kArm64Sub:
+ case kArm64Sub32:
+ case kArm64Mul:
+ case kArm64Mul32:
+ case kArm64Smull:
+ case kArm64Umull:
+ case kArm64Madd:
+ case kArm64Madd32:
+ case kArm64Msub:
+ case kArm64Msub32:
+ case kArm64Mneg:
+ case kArm64Mneg32:
+ case kArm64Idiv:
+ case kArm64Idiv32:
+ case kArm64Udiv:
+ case kArm64Udiv32:
+ case kArm64Imod:
+ case kArm64Imod32:
+ case kArm64Umod:
+ case kArm64Umod32:
+ case kArm64Not:
+ case kArm64Not32:
+ case kArm64Lsl:
+ case kArm64Lsl32:
+ case kArm64Lsr:
+ case kArm64Lsr32:
+ case kArm64Asr:
+ case kArm64Asr32:
+ case kArm64Ror:
+ case kArm64Ror32:
+ case kArm64Mov32:
+ case kArm64Sxtb32:
+ case kArm64Sxth32:
+ case kArm64Sxtw:
+ case kArm64Sbfx32:
+ case kArm64Ubfx:
+ case kArm64Ubfx32:
+ case kArm64Ubfiz32:
+ case kArm64Bfi:
+ case kArm64Float32Cmp:
+ case kArm64Float32Add:
+ case kArm64Float32Sub:
+ case kArm64Float32Mul:
+ case kArm64Float32Div:
+ case kArm64Float32Max:
+ case kArm64Float32Min:
+ case kArm64Float32Abs:
+ case kArm64Float32Sqrt:
+ case kArm64Float32RoundDown:
+ case kArm64Float64Cmp:
+ case kArm64Float64Add:
+ case kArm64Float64Sub:
+ case kArm64Float64Mul:
+ case kArm64Float64Div:
+ case kArm64Float64Mod:
+ case kArm64Float64Max:
+ case kArm64Float64Min:
+ case kArm64Float64Abs:
+ case kArm64Float64Neg:
+ case kArm64Float64Sqrt:
+ case kArm64Float64RoundDown:
+ case kArm64Float64RoundTiesAway:
+ case kArm64Float64RoundTruncate:
+ case kArm64Float64RoundTiesEven:
+ case kArm64Float64RoundUp:
+ case kArm64Float32RoundTiesEven:
+ case kArm64Float32RoundTruncate:
+ case kArm64Float32RoundUp:
+ case kArm64Float32ToFloat64:
+ case kArm64Float64ToFloat32:
+ case kArm64Float64ToInt32:
+ case kArm64Float64ToUint32:
+ case kArm64Float32ToInt64:
+ case kArm64Float64ToInt64:
+ case kArm64Float32ToUint64:
+ case kArm64Float64ToUint64:
+ case kArm64Int32ToFloat64:
+ case kArm64Int64ToFloat32:
+ case kArm64Int64ToFloat64:
+ case kArm64Uint32ToFloat64:
+ case kArm64Uint64ToFloat32:
+ case kArm64Uint64ToFloat64:
+ case kArm64Float64ExtractLowWord32:
+ case kArm64Float64ExtractHighWord32:
+ case kArm64Float64InsertLowWord32:
+ case kArm64Float64InsertHighWord32:
+ case kArm64Float64MoveU64:
+ case kArm64U64MoveFloat64:
+ return kNoOpcodeFlags;
+
+ case kArm64TestAndBranch32:
+ case kArm64TestAndBranch:
+ case kArm64CompareAndBranch32:
+ return kIsBlockTerminator;
+
+ case kArm64LdrS:
+ case kArm64LdrD:
+ case kArm64Ldrb:
+ case kArm64Ldrsb:
+ case kArm64Ldrh:
+ case kArm64Ldrsh:
+ case kArm64LdrW:
+ case kArm64Ldr:
+ return kIsLoadOperation;
+
+ case kArm64ClaimForCallArguments:
+ case kArm64Poke:
+ case kArm64PokePair:
+ case kArm64StrS:
+ case kArm64StrD:
+ case kArm64Strb:
+ case kArm64Strh:
+ case kArm64StrW:
+ case kArm64Str:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // Basic latency modeling for arm64 instructions. They have been determined
+ // in an empirical way.
+ switch (instr->arch_opcode()) {
+ case kArm64Float32ToFloat64:
+ case kArm64Float64ToFloat32:
+ case kArm64Float64ToInt32:
+ case kArm64Float64ToUint32:
+ case kArm64Int32ToFloat64:
+ case kArm64Uint32ToFloat64:
+ return 3;
+
+ case kArm64Float64Add:
+ case kArm64Float64Sub:
+ return 2;
+
+ case kArm64Float64Mul:
+ return 3;
+
+ case kArm64Float64Div:
+ return 6;
+
+ case kArm64Lsl:
+ case kArm64Lsl32:
+ case kArm64Lsr:
+ case kArm64Lsr32:
+ case kArm64Asr:
+ case kArm64Asr32:
+ case kArm64Ror:
+ case kArm64Ror32:
+ return 3;
+
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedLoadWord64:
+ case kCheckedLoadFloat32:
+ case kCheckedLoadFloat64:
+ case kArm64LdrS:
+ case kArm64LdrD:
+ case kArm64Ldrb:
+ case kArm64Ldrsb:
+ case kArm64Ldrh:
+ case kArm64Ldrsh:
+ case kArm64LdrW:
+ case kArm64Ldr:
+ return 5;
+
+ default:
+ return 1;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 72661af..1ec5ab4 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -4,6 +4,7 @@
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -24,27 +25,52 @@
// Adds Arm64-specific methods for generating operands.
-class Arm64OperandGenerator FINAL : public OperandGenerator {
+class Arm64OperandGenerator final : public OperandGenerator {
public:
explicit Arm64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* UseOperand(Node* node, ImmediateMode mode) {
+ InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
if (CanBeImmediate(node, mode)) {
return UseImmediate(node);
}
return UseRegister(node);
}
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ // Use the provided node if it has the required value, or create a
+ // TempImmediate otherwise.
+ InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
+ if (GetIntegerConstantValue(node) == value) {
+ return UseImmediate(node);
+ }
+ return TempImmediate(value);
+ }
+
+ bool IsIntegerConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kInt32Constant) ||
+ (node->opcode() == IrOpcode::kInt64Constant);
+ }
+
+ int64_t GetIntegerConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node);
+ }
+ DCHECK(node->opcode() == IrOpcode::kInt64Constant);
+ return OpParameter<int64_t>(node);
+ }
+
bool CanBeImmediate(Node* node, ImmediateMode mode) {
- int64_t value;
- if (node->opcode() == IrOpcode::kInt32Constant)
- value = OpParameter<int32_t>(node);
- else if (node->opcode() == IrOpcode::kInt64Constant)
- value = OpParameter<int64_t>(node);
- else
- return false;
- return CanBeImmediate(value, mode);
+ return IsIntegerConstant(node) &&
+ CanBeImmediate(GetIntegerConstantValue(node), mode);
}
bool CanBeImmediate(int64_t value, ImmediateMode mode) {
@@ -60,10 +86,6 @@
&ignored, &ignored, &ignored);
case kArithmeticImm:
return Assembler::IsImmAddSub(value);
- case kShift32Imm:
- return 0 <= value && value < 32;
- case kShift64Imm:
- return 0 <= value && value < 64;
case kLoadStoreImm8:
return IsLoadStoreImmediate(value, LSByte);
case kLoadStoreImm16:
@@ -74,6 +96,12 @@
return IsLoadStoreImmediate(value, LSDoubleWord);
case kNoImmediate:
return false;
+ case kShift32Imm: // Fall through.
+ case kShift64Imm:
+ // Shift operations only observe the bottom 5 or 6 bits of the value.
+ // All possible shifts can be encoded by discarding bits which have no
+ // effect.
+ return true;
}
return false;
}
@@ -86,16 +114,16 @@
};
-static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+namespace {
+
+void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Arm64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Arm64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
@@ -103,17 +131,8 @@
}
-static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
- Arm64OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
-}
-
-
-static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
- Node* node, ImmediateMode operand_mode) {
+void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+ ImmediateMode operand_mode) {
Arm64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
@@ -121,87 +140,135 @@
}
-template <typename Matcher>
-static bool TryMatchShift(InstructionSelector* selector, Node* node,
- InstructionCode* opcode, IrOpcode::Value shift_opcode,
- ImmediateMode imm_mode,
- AddressingMode addressing_mode) {
- if (node->opcode() != shift_opcode) return false;
+bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
+ Node* input_node, InstructionCode* opcode, bool try_ror) {
Arm64OperandGenerator g(selector);
- Matcher m(node);
- if (g.CanBeImmediate(m.right().node(), imm_mode)) {
- *opcode |= AddressingModeField::encode(addressing_mode);
- return true;
+
+ if (!selector->CanCover(node, input_node)) return false;
+ if (input_node->InputCount() != 2) return false;
+ if (!g.IsIntegerConstant(input_node->InputAt(1))) return false;
+
+ switch (input_node->opcode()) {
+ case IrOpcode::kWord32Shl:
+ case IrOpcode::kWord64Shl:
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
+ return true;
+ case IrOpcode::kWord32Shr:
+ case IrOpcode::kWord64Shr:
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
+ return true;
+ case IrOpcode::kWord32Sar:
+ case IrOpcode::kWord64Sar:
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+ return true;
+ case IrOpcode::kWord32Ror:
+ case IrOpcode::kWord64Ror:
+ if (try_ror) {
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
+
+bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
+ Node* node, Node* left_node, Node* right_node,
+ InstructionOperand* left_op,
+ InstructionOperand* right_op, InstructionCode* opcode) {
+ if (!selector->CanCover(node, right_node)) return false;
+
+ NodeMatcher nm(right_node);
+
+ if (nm.IsWord32And()) {
+ Int32BinopMatcher mright(right_node);
+ if (mright.right().Is(0xff) || mright.right().Is(0xffff)) {
+ int32_t mask = mright.right().Value();
+ *left_op = g->UseRegister(left_node);
+ *right_op = g->UseRegister(mright.left().node());
+ *opcode |= AddressingModeField::encode(
+ (mask == 0xff) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
+ return true;
+ }
+ } else if (nm.IsWord32Sar()) {
+ Int32BinopMatcher mright(right_node);
+ if (selector->CanCover(mright.node(), mright.left().node()) &&
+ mright.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft_of_right(mright.left().node());
+ if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
+ (mright.right().Is(24) && mleft_of_right.right().Is(24))) {
+ int32_t shift = mright.right().Value();
+ *left_op = g->UseRegister(left_node);
+ *right_op = g->UseRegister(mleft_of_right.left().node());
+ *opcode |= AddressingModeField::encode(
+ (shift == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH);
+ return true;
+ }
+ }
}
return false;
}
-static bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
- InstructionCode* opcode, bool try_ror) {
- return TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord32Shl, kShift32Imm,
- kMode_Operand2_R_LSL_I) ||
- TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord32Shr, kShift32Imm,
- kMode_Operand2_R_LSR_I) ||
- TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord32Sar, kShift32Imm,
- kMode_Operand2_R_ASR_I) ||
- (try_ror && TryMatchShift<Int32BinopMatcher>(
- selector, node, opcode, IrOpcode::kWord32Ror,
- kShift32Imm, kMode_Operand2_R_ROR_I)) ||
- TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord64Shl, kShift64Imm,
- kMode_Operand2_R_LSL_I) ||
- TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord64Shr, kShift64Imm,
- kMode_Operand2_R_LSR_I) ||
- TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
- IrOpcode::kWord64Sar, kShift64Imm,
- kMode_Operand2_R_ASR_I) ||
- (try_ror && TryMatchShift<Int64BinopMatcher>(
- selector, node, opcode, IrOpcode::kWord64Ror,
- kShift64Imm, kMode_Operand2_R_ROR_I));
-}
-
-
// Shared routine for multiple binary operations.
template <typename Matcher>
-static void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode, ImmediateMode operand_mode,
- FlagsContinuation* cont) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, ImmediateMode operand_mode,
+ FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
Matcher m(node);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[5];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
- bool try_ror_operand = true;
+ bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
- if (m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() || m.IsInt64Sub()) {
- try_ror_operand = false;
- }
+ // We can commute cmp by switching the inputs and commuting the flags
+ // continuation.
+ bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
- if (g.CanBeImmediate(m.right().node(), operand_mode)) {
- inputs[input_count++] = g.UseRegister(m.left().node());
- inputs[input_count++] = g.UseImmediate(m.right().node());
- } else if (TryMatchAnyShift(selector, m.right().node(), &opcode,
- try_ror_operand)) {
- Matcher m_shift(m.right().node());
- inputs[input_count++] = g.UseRegister(m.left().node());
+ // The cmp and cmn instructions are encoded as sub or add with zero output
+ // register, and therefore support the same operand modes.
+ bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
+ m.IsInt64Sub() || is_cmp;
+
+ Node* left_node = m.left().node();
+ Node* right_node = m.right().node();
+
+ if (g.CanBeImmediate(right_node, operand_mode)) {
+ inputs[input_count++] = g.UseRegister(left_node);
+ inputs[input_count++] = g.UseImmediate(right_node);
+ } else if (is_cmp && g.CanBeImmediate(left_node, operand_mode)) {
+ cont->Commute();
+ inputs[input_count++] = g.UseRegister(right_node);
+ inputs[input_count++] = g.UseImmediate(left_node);
+ } else if (is_add_sub &&
+ TryMatchAnyExtend(&g, selector, node, left_node, right_node,
+ &inputs[0], &inputs[1], &opcode)) {
+ input_count += 2;
+ } else if (is_add_sub && can_commute &&
+ TryMatchAnyExtend(&g, selector, node, right_node, left_node,
+ &inputs[0], &inputs[1], &opcode)) {
+ if (is_cmp) cont->Commute();
+ input_count += 2;
+ } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
+ !is_add_sub)) {
+ Matcher m_shift(right_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
- } else if (m.HasProperty(Operator::kCommutative) &&
- TryMatchAnyShift(selector, m.left().node(), &opcode,
- try_ror_operand)) {
- Matcher m_shift(m.left().node());
- inputs[input_count++] = g.UseRegister(m.right().node());
+ } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
+ !is_add_sub)) {
+ if (is_cmp) cont->Commute();
+ Matcher m_shift(left_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else {
- inputs[input_count++] = g.UseRegister(m.left().node());
- inputs[input_count++] = g.UseRegister(m.right().node());
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
+ inputs[input_count++] = g.UseRegister(right_node);
}
if (cont->IsBranch()) {
@@ -209,83 +276,102 @@
inputs[input_count++] = g.Label(cont->false_block());
}
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (!is_cmp) {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
+
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK((output_count != 0) || is_cmp);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
// Shared routine for multiple binary operations.
template <typename Matcher>
-static void VisitBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode, ImmediateMode operand_mode) {
+void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+ ImmediateMode operand_mode) {
FlagsContinuation cont;
VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
}
template <typename Matcher>
-static void VisitAddSub(InstructionSelector* selector, Node* node,
- ArchOpcode opcode, ArchOpcode negate_opcode) {
+void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+ ArchOpcode negate_opcode) {
Arm64OperandGenerator g(selector);
Matcher m(node);
if (m.right().HasValue() && (m.right().Value() < 0) &&
g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
selector->Emit(negate_opcode, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
- g.TempImmediate(-m.right().Value()));
+ g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
} else {
VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
}
}
+// For multiplications by immediate of the form x * (2^k + 1), where k > 0,
+// return the value of k, otherwise return zero. This is used to reduce the
+// multiplication to addition with left shift: x + (x << k).
+template <typename Matcher>
+int32_t LeftShiftForReducedMultiply(Matcher* m) {
+ DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
+ if (m->right().HasValue() && m->right().Value() >= 3) {
+ uint64_t value_minus_one = m->right().Value() - 1;
+ if (base::bits::IsPowerOfTwo64(value_minus_one)) {
+ return WhichPowerOf2_64(value_minus_one);
+ }
+ }
+ return 0;
+}
+
+} // namespace
+
+
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
- switch (rep) {
- case kRepFloat32:
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kArm64LdrS;
immediate_mode = kLoadStoreImm32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kArm64LdrD;
immediate_mode = kLoadStoreImm64;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
immediate_mode = kLoadStoreImm8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
immediate_mode = kLoadStoreImm16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kArm64LdrW;
immediate_mode = kLoadStoreImm32;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -305,88 +391,114 @@
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
- Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
- g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
- temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- ImmediateMode immediate_mode = kNoImmediate;
- switch (rep) {
- case kRepFloat32:
- opcode = kArm64StrS;
- immediate_mode = kLoadStoreImm32;
- break;
- case kRepFloat64:
- opcode = kArm64StrD;
- immediate_mode = kLoadStoreImm64;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kArm64Strb;
- immediate_mode = kLoadStoreImm8;
- break;
- case kRepWord16:
- opcode = kArm64Strh;
- immediate_mode = kLoadStoreImm16;
- break;
- case kRepWord32:
- opcode = kArm64StrW;
- immediate_mode = kLoadStoreImm32;
- break;
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kArm64Str;
- immediate_mode = kLoadStoreImm64;
- break;
- default:
- UNREACHABLE();
- return;
- }
- if (g.CanBeImmediate(index, immediate_mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ // TODO(arm64): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
- g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ ArchOpcode opcode = kArchNop;
+ ImmediateMode immediate_mode = kNoImmediate;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kArm64StrS;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kArm64StrD;
+ immediate_mode = kLoadStoreImm64;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kArm64Strb;
+ immediate_mode = kLoadStoreImm8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kArm64Strh;
+ immediate_mode = kLoadStoreImm16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kArm64StrW;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kArm64Str;
+ immediate_mode = kLoadStoreImm64;
+ break;
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, immediate_mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedLoadWord64;
+ break;
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -396,34 +508,39 @@
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedStoreWord64;
+ break;
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- Emit(opcode, nullptr, g.UseRegister(buffer), g.UseRegister(offset),
+ Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
}
@@ -506,22 +623,25 @@
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
// The mask must be contiguous, and occupy the least-significant bits.
- DCHECK_EQ(0, base::bits::CountTrailingZeros32(mask));
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().IsInRange(0, 31)) {
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().Value() & 0x1f;
+
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
// still use ubfx with a smaller mask and the remaining bits will be
// zeros.
- uint32_t lsb = mleft.right().Value();
if (lsb + mask_width > 32) mask_width = 32 - lsb;
Emit(kArm64Ubfx32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
- g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
+ g.UseImmediateOrTemp(mleft.right().node(), lsb),
+ g.TempImmediate(mask_width));
return;
}
// Other cases fall through to the normal And operation.
@@ -543,22 +663,25 @@
uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
// The mask must be contiguous, and occupy the least-significant bits.
- DCHECK_EQ(0, base::bits::CountTrailingZeros64(mask));
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
// Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
// significant bits.
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().IsInRange(0, 63)) {
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
// still use ubfx with a smaller mask and the remaining bits will be
// zeros.
- uint64_t lsb = mleft.right().Value();
if (lsb + mask_width > 64) mask_width = 64 - lsb;
Emit(kArm64Ubfx, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
- g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
+ g.UseImmediateOrTemp(mleft.right().node(), lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
return;
}
// Other cases fall through to the normal And operation.
@@ -603,6 +726,38 @@
void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ Arm64OperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint32_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kArm64Lsl32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ } else {
+ // Select Ubfiz for Shl(And(x, mask), imm) where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Emit(kArm64Ubfiz32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()), g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
}
@@ -623,47 +778,96 @@
}
-void InstructionSelector::VisitWord32Shr(Node* node) {
- Arm64OperandGenerator g(this);
+namespace {
+
+bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
+ Arm64OperandGenerator g(selector);
Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
- int32_t lsb = m.right().Value();
+ if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+ // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
+ // OP is >>> or >> and (K & 0x1f) != 0.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue() && m.right().HasValue() &&
+ (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
+ DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
+ ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
+
+ int right_val = m.right().Value() & 0x1f;
+ DCHECK_NE(right_val, 0);
+
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0),
+ g.TempImmediate(32 - right_val));
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x1f;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
- uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
- uint32_t mask_width = base::bits::CountPopulation32(mask);
- uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
if ((mask_msb + mask_width + lsb) == 32) {
+ Arm64OperandGenerator g(this);
DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
Emit(kArm64Ubfx32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediateOrTemp(m.right().node(), lsb),
g.TempImmediate(mask_width));
return;
}
}
+ } else if (TryEmitBitfieldExtract32(this, node)) {
+ return;
}
+
+ if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
+ CanCover(node, node->InputAt(0))) {
+ // Combine this shift with the multiply and shift that would be generated
+ // by Uint32MulHigh.
+ Arm64OperandGenerator g(this);
+ Node* left = m.left().node();
+ int shift = m.right().Value() & 0x1f;
+ InstructionOperand const smull_operand = g.TempRegister();
+ Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
+ g.UseRegister(left->InputAt(1)));
+ Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand,
+ g.TempImmediate(32 + shift));
+ return;
+ }
+
VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
}
void InstructionSelector::VisitWord64Shr(Node* node) {
- Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
- if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
- int64_t lsb = m.right().Value();
+ if (m.left().IsWord64And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x3f;
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
// shifted into the least-significant bits.
uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
- uint64_t mask_width = base::bits::CountPopulation64(mask);
- uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ unsigned mask_width = base::bits::CountPopulation64(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
if ((mask_msb + mask_width + lsb) == 64) {
+ Arm64OperandGenerator g(this);
DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
Emit(kArm64Ubfx, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediateOrTemp(m.right().node(), lsb),
g.TempImmediate(mask_width));
return;
}
@@ -674,21 +878,54 @@
void InstructionSelector::VisitWord32Sar(Node* node) {
- Arm64OperandGenerator g(this);
+ if (TryEmitBitfieldExtract32(this, node)) {
+ return;
+ }
+
Int32BinopMatcher m(node);
- // Select Sxth/Sxtb for (x << K) >> K where K is 16 or 24.
- if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(16) && m.right().Is(16)) {
- Emit(kArm64Sxth32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()));
- return;
- } else if (mleft.right().Is(24) && m.right().Is(24)) {
- Emit(kArm64Sxtb32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()));
+ if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
+ CanCover(node, node->InputAt(0))) {
+ // Combine this shift with the multiply and shift that would be generated
+ // by Int32MulHigh.
+ Arm64OperandGenerator g(this);
+ Node* left = m.left().node();
+ int shift = m.right().Value() & 0x1f;
+ InstructionOperand const smull_operand = g.TempRegister();
+ Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
+ g.UseRegister(left->InputAt(1)));
+ Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand,
+ g.TempImmediate(32 + shift));
+ return;
+ }
+
+ if (m.left().IsInt32Add() && m.right().HasValue() &&
+ CanCover(node, node->InputAt(0))) {
+ Node* add_node = m.left().node();
+ Int32BinopMatcher madd_node(add_node);
+ if (madd_node.left().IsInt32MulHigh() &&
+ CanCover(add_node, madd_node.left().node())) {
+ // Combine the shift that would be generated by Int32MulHigh with the add
+ // on the left of this Sar operation. We do it here, as the result of the
+ // add potentially has 33 bits, so we have to ensure the result is
+ // truncated by being the input to this 32-bit Sar operation.
+ Arm64OperandGenerator g(this);
+ Node* mul_node = madd_node.left().node();
+
+ InstructionOperand const smull_operand = g.TempRegister();
+ Emit(kArm64Smull, smull_operand, g.UseRegister(mul_node->InputAt(0)),
+ g.UseRegister(mul_node->InputAt(1)));
+
+ InstructionOperand const add_operand = g.TempRegister();
+ Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
+ add_operand, g.UseRegister(add_node->InputAt(1)), smull_operand,
+ g.TempImmediate(32));
+
+ Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand,
+ g.UseImmediate(node->InputAt(1)));
return;
}
}
+
VisitRRO(this, kArm64Asr32, node, kShift32Imm);
}
@@ -708,24 +945,56 @@
}
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
// Select Madd(x, y, z) for Add(Mul(x, y), z).
if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- Emit(kArm64Madd32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mleft) == 0) {
+ Emit(kArm64Madd32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()),
+ g.UseRegister(m.right().node()));
+ return;
+ }
}
- // Select Madd(x, y, z) for Add(x, Mul(x, y)).
+ // Select Madd(x, y, z) for Add(z, Mul(x, y)).
if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- Emit(kArm64Madd32, g.DefineAsRegister(node),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mright) == 0) {
+ Emit(kArm64Madd32, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
+ g.UseRegister(m.left().node()));
+ return;
+ }
}
VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
}
@@ -737,18 +1006,26 @@
// Select Madd(x, y, z) for Add(Mul(x, y), z).
if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
Int64BinopMatcher mleft(m.left().node());
- Emit(kArm64Madd, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mleft) == 0) {
+ Emit(kArm64Madd, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()),
+ g.UseRegister(m.right().node()));
+ return;
+ }
}
- // Select Madd(x, y, z) for Add(x, Mul(x, y)).
+ // Select Madd(x, y, z) for Add(z, Mul(x, y)).
if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
Int64BinopMatcher mright(m.right().node());
- Emit(kArm64Madd, g.DefineAsRegister(node),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mright) == 0) {
+ Emit(kArm64Madd, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
+ g.UseRegister(m.left().node()));
+ return;
+ }
}
VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
}
@@ -758,21 +1035,20 @@
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
- // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
+ // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- Emit(kArm64Msub32, g.DefineAsRegister(node),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mright) == 0) {
+ Emit(kArm64Msub32, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
+ g.UseRegister(m.left().node()));
+ return;
+ }
}
- if (m.left().Is(0)) {
- Emit(kArm64Neg32, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- } else {
- VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
- }
+ VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
}
@@ -780,20 +1056,20 @@
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
- // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
+ // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
Int64BinopMatcher mright(m.right().node());
- Emit(kArm64Msub, g.DefineAsRegister(node),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
- return;
+ // Check multiply can't be later reduced to addition with shift.
+ if (LeftShiftForReducedMultiply(&mright) == 0) {
+ Emit(kArm64Msub, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
+ g.UseRegister(m.left().node()));
+ return;
+ }
}
- if (m.left().Is(0)) {
- Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
- } else {
- VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
- }
+ VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
}
@@ -801,6 +1077,16 @@
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
+ // First, try to reduce the multiplication to addition with left shift.
+ // x * (2^k + 1) -> x + (x << k)
+ int32_t shift = LeftShiftForReducedMultiply(&m);
+ if (shift > 0) {
+ Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift));
+ return;
+ }
+
if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
@@ -833,6 +1119,16 @@
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
+ // First, try to reduce the multiplication to addition with left shift.
+ // x * (2^k + 1) -> x + (x << k)
+ int32_t shift = LeftShiftForReducedMultiply(&m);
+ if (shift > 0) {
+ Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift));
+ return;
+ }
+
if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
Int64BinopMatcher mleft(m.left().node());
@@ -861,9 +1157,8 @@
void InstructionSelector::VisitInt32MulHigh(Node* node) {
- // TODO(arm64): Can we do better here?
Arm64OperandGenerator g(this);
- InstructionOperand* const smull_operand = g.TempRegister();
+ InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
@@ -871,9 +1166,8 @@
void InstructionSelector::VisitUint32MulHigh(Node* node) {
- // TODO(arm64): Can we do better here?
Arm64OperandGenerator g(this);
- InstructionOperand* const smull_operand = g.TempRegister();
+ InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
@@ -921,43 +1215,100 @@
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Float32ToFloat64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArm64Float32ToFloat64, node);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArm64Int32ToFloat64, node);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Uint32ToFloat64, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArm64Uint32ToFloat64, node);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArm64Float64ToInt32, node);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kArm64Float64ToUint32, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArm64Sxtw, node);
}
@@ -1002,16 +1353,25 @@
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- Arm64OperandGenerator g(this);
- Emit(kArm64Float64ToFloat32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArm64Float64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kArm64Float64ToInt32, node);
+ }
+ UNREACHABLE();
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
Node* value = node->InputAt(0);
- if (CanCover(node, value)) {
+ if (CanCover(node, value) && value->InputCount() >= 2) {
Int64BinopMatcher m(value);
if ((m.IsWord64Sar() && m.right().HasValue() &&
(m.right().Value() == 32)) ||
@@ -1026,23 +1386,102 @@
}
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kArm64Int64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kArm64Int64ToFloat64, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kArm64Uint64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kArm64Uint64ToFloat64, node);
+}
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kArm64Float64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kArm64U64MoveFloat64, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ VisitRR(this, kArm64Float64MoveU64, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kArm64Float64MoveU64, node);
+}
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kArm64Float32Add, node);
+}
+
+
void InstructionSelector::VisitFloat64Add(Node* node) {
- VisitRRRFloat64(this, kArm64Float64Add, node);
+ VisitRRR(this, kArm64Float64Add, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kArm64Float32Sub, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
- VisitRRRFloat64(this, kArm64Float64Sub, node);
+ Arm64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ if (m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
+ Emit(kArm64Float64Neg, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ VisitRRR(this, kArm64Float64Sub, node);
+}
+
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kArm64Float32Mul, node);
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
- VisitRRRFloat64(this, kArm64Float64Mul, node);
+ VisitRRR(this, kArm64Float64Mul, node);
+}
+
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kArm64Float32Div, node);
}
void InstructionSelector::VisitFloat64Div(Node* node) {
- VisitRRRFloat64(this, kArm64Float64Div, node);
+ VisitRRR(this, kArm64Float64Div, node);
}
@@ -1054,112 +1493,145 @@
}
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ VisitRRR(this, kArm64Float32Max, node);
+}
+
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ VisitRRR(this, kArm64Float64Max, node);
+}
+
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ VisitRRR(this, kArm64Float32Min, node);
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ VisitRRR(this, kArm64Float64Min, node);
+}
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kArm64Float32Abs, node);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kArm64Float64Abs, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kArm64Float32Sqrt, node);
+}
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- VisitRRFloat64(this, kArm64Float64Sqrt, node);
+ VisitRR(this, kArm64Float64Sqrt, node);
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRRFloat64(this, kArm64Float64Floor, node);
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kArm64Float32RoundDown, node);
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRRFloat64(this, kArm64Float64Ceil, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kArm64Float64RoundDown, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kArm64Float32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kArm64Float64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kArm64Float32RoundTruncate, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- VisitRRFloat64(this, kArm64Float64RoundTruncate, node);
+ VisitRR(this, kArm64Float64RoundTruncate, node);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
- VisitRRFloat64(this, kArm64Float64RoundTiesAway, node);
+ VisitRR(this, kArm64Float64RoundTiesAway, node);
}
-void InstructionSelector::VisitCall(Node* node) {
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kArm64Float32RoundTiesEven, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kArm64Float64RoundTiesEven, node);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
Arm64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = NULL;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM64 it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
// Push the arguments to the stack.
- bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
- int aligned_push_count = buffer.pushed_nodes.size();
+ int aligned_push_count = static_cast<int>(arguments->size());
+
+ bool pushed_count_uneven = aligned_push_count & 1;
+ int claim_count = aligned_push_count;
+ if (pushed_count_uneven && descriptor->UseNativeStack()) {
+ // We can only claim for an even number of call arguments when we use the
+ // native stack.
+ claim_count++;
+ }
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
if (aligned_push_count > 0) {
// TODO(dcarney): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
- Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
+ Emit(kArm64ClaimForCallArguments, g.NoOutput(),
+ g.TempImmediate(claim_count));
}
+
// Move arguments to the stack.
- {
- int slot = buffer.pushed_nodes.size() - 1;
- // Emit the uneven pushes.
- if (pushed_count_uneven) {
- Node* input = buffer.pushed_nodes[slot];
- Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
- slot--;
- }
- // Now all pushes can be done in pairs.
- for (; slot >= 0; slot -= 2) {
- Emit(kArm64PokePair | MiscField::encode(slot), NULL,
- g.UseRegister(buffer.pushed_nodes[slot]),
- g.UseRegister(buffer.pushed_nodes[slot - 1]));
- }
+ int slot = aligned_push_count - 1;
+ while (slot >= 0) {
+ Emit(kArm64Poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
+ g.TempImmediate(slot));
+ slot--;
+ // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
+ // same type.
+ // Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
+ // g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
+ // slot -= 2;
}
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- InstructionOperand** first_output =
- buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), first_output,
- buffer.instruction_args.size(), &buffer.instruction_args.front());
- call_instr->MarkAsCall();
}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+
+namespace {
+
// Shared routine for multiple compare operations.
-static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
- FlagsContinuation* cont) {
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1168,9 +1640,9 @@
// Shared routine for multiple word compare operations.
-static void VisitWordCompare(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont,
- bool commutative, ImmediateMode immediate_mode) {
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative, ImmediateMode immediate_mode) {
Arm64OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
@@ -1190,43 +1662,94 @@
}
-static void VisitWord32Compare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- VisitWordCompare(selector, node, kArm64Cmp32, cont, false, kArithmeticImm);
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Int32BinopMatcher m(node);
+ ArchOpcode opcode = kArm64Cmp32;
+
+ // Select negated compare for comparisons with negated right input.
+ if (m.right().IsInt32Sub()) {
+ Node* sub = m.right().node();
+ Int32BinopMatcher msub(sub);
+ if (msub.left().Is(0)) {
+ bool can_cover = selector->CanCover(node, sub);
+ node->ReplaceInput(1, msub.right().node());
+ // Even if the comparison node covers the subtraction, after the input
+ // replacement above, the node still won't cover the input to the
+ // subtraction; the subtraction still uses it.
+ // In order to get shifted operations to work, we must remove the rhs
+ // input to the subtraction, as TryMatchAnyShift requires this node to
+ // cover the input shift. We do this by setting it to the lhs input,
+ // as we know it's zero, and the result of the subtraction isn't used by
+ // any other node.
+ if (can_cover) sub->ReplaceInput(1, msub.left().node());
+ opcode = kArm64Cmn32;
+ }
+ }
+ VisitBinop<Int32BinopMatcher>(selector, node, opcode, kArithmeticImm, cont);
}
-static void VisitWordTest(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
+void VisitWordTest(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
cont);
}
-static void VisitWord32Test(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
+void VisitWord32Test(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
VisitWordTest(selector, node, kArm64Tst32, cont);
}
-static void VisitWord64Test(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
+void VisitWord64Test(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
VisitWordTest(selector, node, kArm64Tst, cont);
}
-// Shared routine for multiple float compare operations.
-static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(left),
- g.UseRegister(right), cont);
+ Float32BinopMatcher m(node);
+ if (m.right().Is(0.0f)) {
+ VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()), cont);
+ } else if (m.left().Is(0.0f)) {
+ cont->Commute();
+ VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.right().node()),
+ g.UseImmediate(m.left().node()), cont);
+ } else {
+ VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()), cont);
+ }
}
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(selector);
+ Float64BinopMatcher m(node);
+ if (m.right().Is(0.0)) {
+ VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()), cont);
+ } else if (m.left().Is(0.0)) {
+ cont->Commute();
+ VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.right().node()),
+ g.UseImmediate(m.left().node()), cont);
+ } else {
+ VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()), cont);
+ }
+}
+
+} // namespace
+
+
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
OperandGenerator g(this);
@@ -1236,25 +1759,12 @@
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (CanCover(user, value)) {
- if (value->opcode() == IrOpcode::kWord32Equal) {
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- } else if (value->opcode() == IrOpcode::kWord64Equal) {
- Int64BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
+ while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
} else {
break;
}
@@ -1294,27 +1804,40 @@
cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
kArithmeticImm);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+ kArithmeticImm);
+ case IrOpcode::kFloat32Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(this, value, &cont);
+ case IrOpcode::kFloat32LessThan:
+ cont.OverwriteAndNegateIfEqual(kFloatLessThan);
+ return VisitFloat32Compare(this, value, &cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
+ return VisitFloat32Compare(this, value, &cont);
case IrOpcode::kFloat64Equal:
- cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+ cont.OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kFloat64LessThan:
- cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont.OverwriteAndNegateIfEqual(kFloatLessThan);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
- Node* node = value->InputAt(0);
- Node* result = node->FindProjection(0);
- if (result == NULL || IsDefined(result)) {
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
@@ -1324,6 +1847,14 @@
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
kArithmeticImm, &cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
+ kArithmeticImm, &cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
+ kArithmeticImm, &cont);
default:
break;
}
@@ -1334,8 +1865,7 @@
return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
kArithmeticImm);
case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
- kArithmeticImm);
+ return VisitWord32Compare(this, value, &cont);
case IrOpcode::kWord32And: {
Int32BinopMatcher m(value);
if (m.right().HasValue() &&
@@ -1343,12 +1873,11 @@
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont.condition() == kEqual) ||
(cont.condition() == kNotEqual));
- Emit(cont.Encode(kArm64TestAndBranch32), NULL,
+ Emit(cont.Encode(kArm64TestAndBranch32), g.NoOutput(),
g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros32(m.right().Value())),
- g.Label(cont.true_block()),
- g.Label(cont.false_block()))->MarkAsControl();
+ g.Label(cont.true_block()), g.Label(cont.false_block()));
return;
}
return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
@@ -1361,12 +1890,11 @@
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont.condition() == kEqual) ||
(cont.condition() == kNotEqual));
- Emit(cont.Encode(kArm64TestAndBranch), NULL,
+ Emit(cont.Encode(kArm64TestAndBranch), g.NoOutput(),
g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros64(m.right().Value())),
- g.Label(cont.true_block()),
- g.Label(cont.false_block()))->MarkAsControl();
+ g.Label(cont.true_block()), g.Label(cont.false_block()));
return;
}
return VisitWordCompare(this, value, kArm64Tst, &cont, true,
@@ -1378,9 +1906,37 @@
}
// Branch could not be combined with a compare, compare against 0 and branch.
- Emit(cont.Encode(kArm64CompareAndBranch32), NULL, g.UseRegister(value),
- g.Label(cont.true_block()),
- g.Label(cont.false_block()))->MarkAsControl();
+ Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
+ g.UseRegister(value), g.Label(cont.true_block()),
+ g.Label(cont.false_block()));
+}
+
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ Arm64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kArm64Sub32, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
}
@@ -1401,6 +1957,14 @@
case IrOpcode::kWord32And:
return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
kLogical32Imm);
+ case IrOpcode::kWord32Equal: {
+ // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
+ Int32BinopMatcher mequal(value);
+ node->ReplaceInput(0, mequal.left().node());
+ node->ReplaceInput(1, mequal.right().node());
+ cont.Negate();
+ return VisitWord32Compare(this, node, &cont);
+ }
default:
break;
}
@@ -1457,7 +2021,7 @@
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
kArithmeticImm, &cont);
@@ -1468,7 +2032,7 @@
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
kArithmeticImm, &cont);
@@ -1478,6 +2042,28 @@
}
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
+}
+
+
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
@@ -1496,35 +2082,118 @@
}
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+}
+
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kFloatLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kFloatLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kFloatLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kFloatLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+ CanCover(node, left)) {
+ Node* right_of_left = left->InputAt(1);
+ Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
+ g.UseRegister(right_of_left), g.TempImmediate(32),
+ g.TempImmediate(32));
+ Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
+ return;
+ }
+ Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+ CanCover(node, left)) {
+ Node* right_of_left = left->InputAt(1);
+ Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
+ g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
+ Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
+ return;
+ }
+ Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ return MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/arm64/linkage-arm64.cc b/src/compiler/arm64/linkage-arm64.cc
deleted file mode 100644
index 291b552..0000000
--- a/src/compiler/arm64/linkage-arm64.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct Arm64LinkageHelperTraits {
- static Register ReturnValueReg() { return x0; }
- static Register ReturnValue2Reg() { return x1; }
- static Register JSCallFunctionReg() { return x1; }
- static Register ContextReg() { return cp; }
- static Register RuntimeCallFunctionReg() { return x1; }
- static Register RuntimeCallArgCountReg() { return x0; }
- static RegList CCalleeSaveRegisters() {
- // TODO(dcarney): correct callee saved registers.
- return 0;
- }
- static Register CRegisterParameter(int i) {
- static Register register_parameters[] = {x0, x1, x2, x3, x4, x5, x6, x7};
- return register_parameters[i];
- }
- static int CRegisterParametersLength() { return 8; }
-};
-
-
-typedef LinkageHelper<Arm64LinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index cde5e71..c70dfbf 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -4,38 +4,481 @@
#include "src/compiler/ast-graph-builder.h"
+#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/control-builders.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/node-properties.h"
-#include "src/full-codegen.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/state-values-utils.h"
+#include "src/compiler/type-hint-analyzer.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
namespace compiler {
+
+// Each expression in the AST is evaluated in a specific context. This context
+// decides how the evaluation result is passed up the visitor.
+class AstGraphBuilder::AstContext BASE_EMBEDDED {
+ public:
+ bool IsEffect() const { return kind_ == Expression::kEffect; }
+ bool IsValue() const { return kind_ == Expression::kValue; }
+ bool IsTest() const { return kind_ == Expression::kTest; }
+
+ // Determines how to combine the frame state with the value
+ // that is about to be plugged into this AstContext.
+ OutputFrameStateCombine GetStateCombine() {
+ return IsEffect() ? OutputFrameStateCombine::Ignore()
+ : OutputFrameStateCombine::Push();
+ }
+
+ // Plug a node into this expression context. Call this function in tail
+ // position in the Visit functions for expressions.
+ virtual void ProduceValue(Node* value) = 0;
+
+ // Unplugs a node from this expression context. Call this to retrieve the
+ // result of another Visit function that already plugged the context.
+ virtual Node* ConsumeValue() = 0;
+
+ // Shortcut for "context->ProduceValue(context->ConsumeValue())".
+ void ReplaceValue() { ProduceValue(ConsumeValue()); }
+
+ protected:
+ AstContext(AstGraphBuilder* owner, Expression::Context kind);
+ virtual ~AstContext();
+
+ AstGraphBuilder* owner() const { return owner_; }
+ Environment* environment() const { return owner_->environment(); }
+
+// We want to be able to assert, in a context-specific way, that the stack
+// height makes sense when the context is filled.
+#ifdef DEBUG
+ int original_height_;
+#endif
+
+ private:
+ Expression::Context kind_;
+ AstGraphBuilder* owner_;
+ AstContext* outer_;
+};
+
+
+// Context to evaluate expression for its side effects only.
+class AstGraphBuilder::AstEffectContext final : public AstContext {
+ public:
+ explicit AstEffectContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kEffect) {}
+ ~AstEffectContext() final;
+ void ProduceValue(Node* value) final;
+ Node* ConsumeValue() final;
+};
+
+
+// Context to evaluate expression for its value (and side effects).
+class AstGraphBuilder::AstValueContext final : public AstContext {
+ public:
+ explicit AstValueContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kValue) {}
+ ~AstValueContext() final;
+ void ProduceValue(Node* value) final;
+ Node* ConsumeValue() final;
+};
+
+
+// Context to evaluate expression for a condition value (and side effects).
+class AstGraphBuilder::AstTestContext final : public AstContext {
+ public:
+ AstTestContext(AstGraphBuilder* owner, TypeFeedbackId feedback_id)
+ : AstContext(owner, Expression::kTest), feedback_id_(feedback_id) {}
+ ~AstTestContext() final;
+ void ProduceValue(Node* value) final;
+ Node* ConsumeValue() final;
+
+ private:
+ TypeFeedbackId const feedback_id_;
+};
+
+
+// Scoped class tracking context objects created by the visitor. Represents
+// mutations of the context chain within the function body and allows to
+// change the current {scope} and {context} during visitation.
+class AstGraphBuilder::ContextScope BASE_EMBEDDED {
+ public:
+ ContextScope(AstGraphBuilder* builder, Scope* scope, Node* context)
+ : builder_(builder),
+ outer_(builder->execution_context()),
+ scope_(scope),
+ depth_(builder_->environment()->context_chain_length()) {
+ builder_->environment()->PushContext(context); // Push.
+ builder_->set_execution_context(this);
+ }
+
+ ~ContextScope() {
+ builder_->set_execution_context(outer_); // Pop.
+ builder_->environment()->PopContext();
+ CHECK_EQ(depth_, builder_->environment()->context_chain_length());
+ }
+
+ // Current scope during visitation.
+ Scope* scope() const { return scope_; }
+
+ private:
+ AstGraphBuilder* builder_;
+ ContextScope* outer_;
+ Scope* scope_;
+ int depth_;
+};
+
+
+// Scoped class tracking control statements entered by the visitor. There are
+// different types of statements participating in this stack to properly track
+// local as well as non-local control flow:
+// - IterationStatement : Allows proper 'break' and 'continue' behavior.
+// - BreakableStatement : Allows 'break' from block and switch statements.
+// - TryCatchStatement : Intercepts 'throw' and implicit exceptional edges.
+// - TryFinallyStatement: Intercepts 'break', 'continue', 'throw' and 'return'.
+class AstGraphBuilder::ControlScope BASE_EMBEDDED {
+ public:
+ explicit ControlScope(AstGraphBuilder* builder)
+ : builder_(builder),
+ outer_(builder->execution_control()),
+ context_length_(builder->environment()->context_chain_length()),
+ stack_height_(builder->environment()->stack_height()) {
+ builder_->set_execution_control(this); // Push.
+ }
+
+ virtual ~ControlScope() {
+ builder_->set_execution_control(outer_); // Pop.
+ }
+
+ // Either 'break' or 'continue' to the target statement.
+ void BreakTo(BreakableStatement* target);
+ void ContinueTo(BreakableStatement* target);
+
+ // Either 'return' or 'throw' the given value.
+ void ReturnValue(Node* return_value);
+ void ThrowValue(Node* exception_value);
+
+ class DeferredCommands;
+
+ protected:
+ enum Command { CMD_BREAK, CMD_CONTINUE, CMD_RETURN, CMD_THROW };
+
+ // Performs one of the above commands on this stack of control scopes. This
+ // walks through the stack giving each scope a chance to execute or defer the
+ // given command by overriding the {Execute} method appropriately. Note that
+ // this also drops extra operands from the environment for each skipped scope.
+ void PerformCommand(Command cmd, Statement* target, Node* value);
+
+ // Interface to execute a given command in this scope. Returning {true} here
+ // indicates successful execution whereas {false} requests to skip scope.
+ virtual bool Execute(Command cmd, Statement* target, Node* value) {
+ // For function-level control.
+ switch (cmd) {
+ case CMD_THROW:
+ builder()->BuildThrow(value);
+ return true;
+ case CMD_RETURN:
+ builder()->BuildReturn(value);
+ return true;
+ case CMD_BREAK:
+ case CMD_CONTINUE:
+ break;
+ }
+ return false;
+ }
+
+ Environment* environment() { return builder_->environment(); }
+ AstGraphBuilder* builder() const { return builder_; }
+ int context_length() const { return context_length_; }
+ int stack_height() const { return stack_height_; }
+
+ private:
+ AstGraphBuilder* builder_;
+ ControlScope* outer_;
+ int context_length_;
+ int stack_height_;
+};
+
+
+// Helper class for a try-finally control scope. It can record intercepted
+// control-flow commands that cause entry into a finally-block, and re-apply
+// them after again leaving that block. Special tokens are used to identify
+// paths going through the finally-block to dispatch after leaving the block.
+class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
+ public:
+ explicit DeferredCommands(AstGraphBuilder* owner)
+ : owner_(owner), deferred_(owner->local_zone()) {}
+
+ // One recorded control-flow command.
+ struct Entry {
+ Command command; // The command type being applied on this path.
+ Statement* statement; // The target statement for the command or {nullptr}.
+ Node* token; // A token identifying this particular path.
+ };
+
+ // Records a control-flow command while entering the finally-block. This also
+ // generates a new dispatch token that identifies one particular path.
+ Node* RecordCommand(Command cmd, Statement* stmt, Node* value) {
+ Node* token = NewPathTokenForDeferredCommand();
+ deferred_.push_back({cmd, stmt, token});
+ return token;
+ }
+
+ // Returns the dispatch token to be used to identify the implicit fall-through
+ // path at the end of a try-block into the corresponding finally-block.
+ Node* GetFallThroughToken() { return NewPathTokenForImplicitFallThrough(); }
+
+ // Applies all recorded control-flow commands after the finally-block again.
+ // This generates a dynamic dispatch on the token from the entry point.
+ void ApplyDeferredCommands(Node* token, Node* value) {
+ SwitchBuilder dispatch(owner_, static_cast<int>(deferred_.size()));
+ dispatch.BeginSwitch();
+ for (size_t i = 0; i < deferred_.size(); ++i) {
+ Node* condition = NewPathDispatchCondition(token, deferred_[i].token);
+ dispatch.BeginLabel(static_cast<int>(i), condition);
+ dispatch.EndLabel();
+ }
+ for (size_t i = 0; i < deferred_.size(); ++i) {
+ dispatch.BeginCase(static_cast<int>(i));
+ owner_->execution_control()->PerformCommand(
+ deferred_[i].command, deferred_[i].statement, value);
+ dispatch.EndCase();
+ }
+ dispatch.EndSwitch();
+ }
+
+ protected:
+ Node* NewPathTokenForDeferredCommand() {
+ return owner_->jsgraph()->Constant(static_cast<int>(deferred_.size()));
+ }
+ Node* NewPathTokenForImplicitFallThrough() {
+ return owner_->jsgraph()->Constant(-1);
+ }
+ Node* NewPathDispatchCondition(Node* t1, Node* t2) {
+ // TODO(mstarzinger): This should be machine()->WordEqual(), but our Phi
+ // nodes all have kRepTagged|kTypeAny, which causes representation mismatch.
+ return owner_->NewNode(owner_->javascript()->StrictEqual(), t1, t2);
+ }
+
+ private:
+ AstGraphBuilder* owner_;
+ ZoneVector<Entry> deferred_;
+};
+
+
+// Control scope implementation for a BreakableStatement.
+class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
+ public:
+ ControlScopeForBreakable(AstGraphBuilder* owner, BreakableStatement* target,
+ ControlBuilder* control)
+ : ControlScope(owner), target_(target), control_(control) {}
+
+ protected:
+ bool Execute(Command cmd, Statement* target, Node* value) override {
+ if (target != target_) return false; // We are not the command target.
+ switch (cmd) {
+ case CMD_BREAK:
+ control_->Break();
+ return true;
+ case CMD_CONTINUE:
+ case CMD_THROW:
+ case CMD_RETURN:
+ break;
+ }
+ return false;
+ }
+
+ private:
+ BreakableStatement* target_;
+ ControlBuilder* control_;
+};
+
+
+// Control scope implementation for an IterationStatement.
+class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
+ public:
+ ControlScopeForIteration(AstGraphBuilder* owner, IterationStatement* target,
+ LoopBuilder* control)
+ : ControlScope(owner), target_(target), control_(control) {}
+
+ protected:
+ bool Execute(Command cmd, Statement* target, Node* value) override {
+ if (target != target_) return false; // We are not the command target.
+ switch (cmd) {
+ case CMD_BREAK:
+ control_->Break();
+ return true;
+ case CMD_CONTINUE:
+ control_->Continue();
+ return true;
+ case CMD_THROW:
+ case CMD_RETURN:
+ break;
+ }
+ return false;
+ }
+
+ private:
+ BreakableStatement* target_;
+ LoopBuilder* control_;
+};
+
+
+// Control scope implementation for a TryCatchStatement.
+class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
+ public:
+ ControlScopeForCatch(AstGraphBuilder* owner, TryCatchBuilder* control)
+ : ControlScope(owner), control_(control) {
+ builder()->try_nesting_level_++; // Increment nesting.
+ builder()->try_catch_nesting_level_++;
+ }
+ ~ControlScopeForCatch() {
+ builder()->try_nesting_level_--; // Decrement nesting.
+ builder()->try_catch_nesting_level_--;
+ }
+
+ protected:
+ bool Execute(Command cmd, Statement* target, Node* value) override {
+ switch (cmd) {
+ case CMD_THROW:
+ control_->Throw(value);
+ return true;
+ case CMD_BREAK:
+ case CMD_CONTINUE:
+ case CMD_RETURN:
+ break;
+ }
+ return false;
+ }
+
+ private:
+ TryCatchBuilder* control_;
+};
+
+
+// Control scope implementation for a TryFinallyStatement.
+class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
+ public:
+ ControlScopeForFinally(AstGraphBuilder* owner, DeferredCommands* commands,
+ TryFinallyBuilder* control)
+ : ControlScope(owner), commands_(commands), control_(control) {
+ builder()->try_nesting_level_++; // Increment nesting.
+ }
+ ~ControlScopeForFinally() {
+ builder()->try_nesting_level_--; // Decrement nesting.
+ }
+
+ protected:
+ bool Execute(Command cmd, Statement* target, Node* value) override {
+ Node* token = commands_->RecordCommand(cmd, target, value);
+ control_->LeaveTry(token, value);
+ return true;
+ }
+
+ private:
+ DeferredCommands* commands_;
+ TryFinallyBuilder* control_;
+};
+
+
+// Helper for generating before and after frame states.
+class AstGraphBuilder::FrameStateBeforeAndAfter {
+ public:
+ FrameStateBeforeAndAfter(AstGraphBuilder* builder, BailoutId id_before)
+ : builder_(builder), frame_state_before_(nullptr) {
+ frame_state_before_ = id_before == BailoutId::None()
+ ? builder_->jsgraph()->EmptyFrameState()
+ : builder_->environment()->Checkpoint(id_before);
+ }
+
+ void AddToNode(
+ Node* node, BailoutId id_after,
+ OutputFrameStateCombine combine = OutputFrameStateCombine::Ignore()) {
+ int count = OperatorProperties::GetFrameStateInputCount(node->op());
+ DCHECK_LE(count, 2);
+
+ if (count >= 1) {
+ // Add the frame state for after the operation.
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+
+ Node* frame_state_after =
+ id_after == BailoutId::None()
+ ? builder_->jsgraph()->EmptyFrameState()
+ : builder_->environment()->Checkpoint(id_after, combine);
+
+ NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
+ }
+
+ if (count >= 2) {
+ // Add the frame state for before the operation.
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 1)->opcode());
+ NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
+ }
+ }
+
+ private:
+ AstGraphBuilder* builder_;
+ Node* frame_state_before_;
+};
+
+
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, LoopAssignmentAnalysis* loop)
- : StructuredGraphBuilder(local_zone, jsgraph->graph(), jsgraph->common()),
+ JSGraph* jsgraph, LoopAssignmentAnalysis* loop,
+ TypeHintAnalysis* type_hint_analysis)
+ : isolate_(info->isolate()),
+ local_zone_(local_zone),
info_(info),
jsgraph_(jsgraph),
+ environment_(nullptr),
+ ast_context_(nullptr),
globals_(0, local_zone),
- breakable_(NULL),
- execution_context_(NULL),
- loop_assignment_analysis_(loop) {
- InitializeAstVisitor(local_zone);
+ execution_control_(nullptr),
+ execution_context_(nullptr),
+ try_catch_nesting_level_(0),
+ try_nesting_level_(0),
+ input_buffer_size_(0),
+ input_buffer_(nullptr),
+ exit_controls_(local_zone),
+ loop_assignment_analysis_(loop),
+ type_hint_analysis_(type_hint_analysis),
+ state_values_cache_(jsgraph),
+ liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
+ local_zone),
+ frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
+ info->scope()->num_stack_slots(), info->shared_info(),
+ CALL_MAINTAINS_NATIVE_CONTEXT)) {
+ InitializeAstVisitor(info->isolate());
+}
+
+
+Node* AstGraphBuilder::GetFunctionClosureForContext() {
+ Scope* closure_scope = current_scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function as
+ // their closure, not the anonymous closure containing the global code.
+ return BuildLoadNativeContextField(Context::CLOSURE_INDEX);
+ } else {
+ DCHECK(closure_scope->is_function_scope());
+ return GetFunctionClosure();
+ }
}
Node* AstGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
- // Parameter -1 is special for the function closure
- const Operator* op = common()->Parameter(-1);
+ int index = Linkage::kJSCallClosureParamIndex;
+ const Operator* op = common()->Parameter(index, "%closure");
Node* node = NewNode(op, graph()->start());
function_closure_.set(node);
}
@@ -45,8 +488,9 @@
Node* AstGraphBuilder::GetFunctionContext() {
if (!function_context_.is_set()) {
- // Parameter (arity + 1) is special for the outer context of the function
- const Operator* op = common()->Parameter(info()->num_parameters() + 1);
+ int params = info()->num_parameters_including_this();
+ int index = Linkage::GetJSCallContextParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%context");
Node* node = NewNode(op, graph()->start());
function_context_.set(node);
}
@@ -54,58 +498,117 @@
}
-bool AstGraphBuilder::CreateGraph() {
- Scope* scope = info()->scope();
- DCHECK(graph() != NULL);
+Node* AstGraphBuilder::GetNewTarget() {
+ if (!new_target_.is_set()) {
+ int params = info()->num_parameters_including_this();
+ int index = Linkage::GetJSCallNewTargetParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%new.target");
+ Node* node = NewNode(op, graph()->start());
+ new_target_.set(node);
+ }
+ return new_target_.get();
+}
- // Set up the basic structure of the graph.
- int parameter_count = info()->num_parameters();
- graph()->SetStart(graph()->NewNode(common()->Start(parameter_count)));
+
+bool AstGraphBuilder::CreateGraph(bool stack_check) {
+ Scope* scope = info()->scope();
+ DCHECK_NOT_NULL(graph());
+
+ // Set up the basic structure of the graph. Outputs for {Start} are the formal
+ // parameters (including the receiver) plus new target, number of arguments,
+ // context and closure.
+ int actual_parameter_count = info()->num_parameters_including_this() + 4;
+ graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
// Initialize the top-level environment.
Environment env(this, scope, graph()->start());
set_environment(&env);
+ if (info()->is_osr()) {
+ // Use OSR normal entry as the start of the top-level environment.
+ // It will be replaced with {Dead} after typing and optimizations.
+ NewNode(common()->OsrNormalEntry());
+ }
+
// Initialize the incoming context.
- Node* outer_context = GetFunctionContext();
- set_current_context(outer_context);
+ ContextScope incoming(this, scope, GetFunctionContext());
- // Build receiver check for sloppy mode if necessary.
- // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
- Node* original_receiver = env.Lookup(scope->receiver());
- Node* patched_receiver = BuildPatchReceiverToGlobalProxy(original_receiver);
- env.Bind(scope->receiver(), patched_receiver);
+ // Initialize control scope.
+ ControlScope control(this);
- // Build node to initialize local function context.
- Node* closure = GetFunctionClosure();
- Node* inner_context = BuildLocalFunctionContext(outer_context, closure);
+ // TODO(mstarzinger): For now we cannot assume that the {this} parameter is
+ // not {the_hole}, because for derived classes {this} has a TDZ and the
+ // JSConstructStubForDerived magically passes {the_hole} as a receiver.
+ if (scope->has_this_declaration() && scope->receiver()->is_const_mode()) {
+ env.RawParameterBind(0, jsgraph()->TheHoleConstant());
+ }
- // Push top-level function scope for the function body.
- ContextScope top_context(this, scope, inner_context);
+ // Build local context only if there are context allocated variables.
+ if (info()->num_heap_slots() > 0) {
+ // Push a new inner context scope for the current activation.
+ Node* inner_context = BuildLocalActivationContext(GetFunctionContext());
+ ContextScope top_context(this, scope, inner_context);
+ CreateGraphBody(stack_check);
+ } else {
+ // Simply use the outer function context in building the graph.
+ CreateGraphBody(stack_check);
+ }
+
+ // Finish the basic structure of the graph.
+ DCHECK_NE(0u, exit_controls_.size());
+ int const input_count = static_cast<int>(exit_controls_.size());
+ Node** const inputs = &exit_controls_.front();
+ Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
+ graph()->SetEnd(end);
+
+ // Compute local variable liveness information and use it to relax
+ // frame states.
+ ClearNonLiveSlotsInFrameStates();
+
+ // Failures indicated by stack overflow.
+ return !HasStackOverflow();
+}
+
+
+void AstGraphBuilder::CreateGraphBody(bool stack_check) {
+ Scope* scope = info()->scope();
// Build the arguments object if it is used.
BuildArgumentsObject(scope->arguments());
+ // Build rest arguments array if it is used.
+ int rest_index;
+ Variable* rest_parameter = scope->rest_parameter(&rest_index);
+ BuildRestArgumentsArray(rest_parameter, rest_index);
+
+ // Build assignment to {.this_function} variable if it is used.
+ BuildThisFunctionVariable(scope->this_function_var());
+
+ // Build assignment to {new.target} variable if it is used.
+ BuildNewTargetVariable(scope->new_target_var());
+
// Emit tracing call if requested to do so.
if (FLAG_trace) {
NewNode(javascript()->CallRuntime(Runtime::kTraceEnter, 0));
}
- // Visit implicit declaration of the function name.
- if (scope->is_function_scope() && scope->function() != NULL) {
- VisitVariableDeclaration(scope->function());
+ // Visit illegal re-declaration and bail out if it exists.
+ if (scope->HasIllegalRedeclaration()) {
+ VisitForEffect(scope->GetIllegalRedeclaration());
+ return;
}
// Visit declarations within the function scope.
VisitDeclarations(scope->declarations());
// Build a stack-check before the body.
- Node* node = BuildStackCheck();
- PrepareFrameState(node, BailoutId::FunctionEntry());
+ if (stack_check) {
+ Node* node = NewNode(javascript()->StackCheck());
+ PrepareFrameState(node, BailoutId::FunctionEntry());
+ }
// Visit statements in the function body.
- VisitStatements(info()->function()->body());
- if (HasStackOverflow()) return false;
+ VisitStatements(info()->literal()->body());
// Emit tracing call if requested to do so.
if (FLAG_trace) {
@@ -115,60 +618,88 @@
}
// Return 'undefined' in case we can fall off the end.
- Node* control = NewNode(common()->Return(), jsgraph()->UndefinedConstant());
- UpdateControlDependencyToLeaveFunction(control);
-
- // Finish the basic structure of the graph.
- environment()->UpdateControlDependency(exit_control());
- graph()->SetEnd(NewNode(common()->End()));
-
- return true;
+ BuildReturn(jsgraph()->UndefinedConstant());
}
-// Left-hand side can only be a property, a global or a variable slot.
-enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+void AstGraphBuilder::ClearNonLiveSlotsInFrameStates() {
+ if (!FLAG_analyze_environment_liveness ||
+ !info()->is_deoptimization_enabled()) {
+ return;
+ }
-
-// Determine the left-hand side kind of an assignment.
-static LhsKind DetermineLhsKind(Expression* expr) {
- Property* property = expr->AsProperty();
- DCHECK(expr->IsValidReferenceExpression());
- LhsKind lhs_kind =
- (property == NULL) ? VARIABLE : (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- return lhs_kind;
+ NonLiveFrameStateSlotReplacer replacer(
+ &state_values_cache_, jsgraph()->UndefinedConstant(),
+ liveness_analyzer()->local_count(), local_zone());
+ Variable* arguments = info()->scope()->arguments();
+ if (arguments != nullptr && arguments->IsStackAllocated()) {
+ replacer.MarkPermanentlyLive(arguments->index());
+ }
+ liveness_analyzer()->Run(&replacer);
+ if (FLAG_trace_environment_liveness) {
+ OFStream os(stdout);
+ liveness_analyzer()->Print(os);
+ }
}
-StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment(
- StructuredGraphBuilder::Environment* env) {
- return new (zone()) Environment(*reinterpret_cast<Environment*>(env));
+// Gets the bailout id just before reading a variable proxy, but only for
+// unallocated variables.
+static BailoutId BeforeId(VariableProxy* proxy) {
+ return proxy->var()->IsUnallocatedOrGlobalSlot() ? proxy->BeforeId()
+ : BailoutId::None();
+}
+
+
+static const char* GetDebugParameterName(Zone* zone, Scope* scope, int index) {
+#if DEBUG
+ const AstRawString* name = scope->parameter(index)->raw_name();
+ if (name && name->length() > 0) {
+ char* data = zone->NewArray<char>(name->length() + 1);
+ data[name->length()] = 0;
+ memcpy(data, name->raw_data(), name->length());
+ return data;
+ }
+#endif
+ return nullptr;
}
AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
Scope* scope,
Node* control_dependency)
- : StructuredGraphBuilder::Environment(builder, control_dependency),
+ : builder_(builder),
parameters_count_(scope->num_parameters() + 1),
locals_count_(scope->num_stack_slots()),
- parameters_node_(NULL),
- locals_node_(NULL),
- stack_node_(NULL) {
+ liveness_block_(IsLivenessAnalysisEnabled()
+ ? builder_->liveness_analyzer()->NewBlock()
+ : nullptr),
+ values_(builder_->local_zone()),
+ contexts_(builder_->local_zone()),
+ control_dependency_(control_dependency),
+ effect_dependency_(control_dependency),
+ parameters_node_(nullptr),
+ locals_node_(nullptr),
+ stack_node_(nullptr) {
DCHECK_EQ(scope->num_parameters() + 1, parameters_count());
// Bind the receiver variable.
- Node* receiver = builder->graph()->NewNode(common()->Parameter(0),
- builder->graph()->start());
- values()->push_back(receiver);
+ int param_num = 0;
+ if (builder->info()->is_this_defined()) {
+ const Operator* op = common()->Parameter(param_num++, "%this");
+ Node* receiver = builder->graph()->NewNode(op, builder->graph()->start());
+ values()->push_back(receiver);
+ } else {
+ values()->push_back(builder->jsgraph()->UndefinedConstant());
+ }
// Bind all parameter variables. The parameter indices are shifted by 1
- // (receiver is parameter index -1 but environment index 0).
+ // (receiver is variable index -1 but {Parameter} node index 0 and located at
+ // index 0 in the environment).
for (int i = 0; i < scope->num_parameters(); ++i) {
- Node* parameter = builder->graph()->NewNode(common()->Parameter(i + 1),
- builder->graph()->start());
+ const char* debug_name = GetDebugParameterName(graph()->zone(), scope, i);
+ const Operator* op = common()->Parameter(param_num++, debug_name);
+ Node* parameter = builder->graph()->NewNode(op, builder->graph()->start());
values()->push_back(parameter);
}
@@ -178,21 +709,127 @@
}
-AstGraphBuilder::Environment::Environment(const Environment& copy)
- : StructuredGraphBuilder::Environment(
- static_cast<StructuredGraphBuilder::Environment>(copy)),
- parameters_count_(copy.parameters_count_),
- locals_count_(copy.locals_count_),
- parameters_node_(copy.parameters_node_),
- locals_node_(copy.locals_node_),
- stack_node_(copy.stack_node_) {}
+AstGraphBuilder::Environment::Environment(AstGraphBuilder::Environment* copy,
+ LivenessAnalyzerBlock* liveness_block)
+ : builder_(copy->builder_),
+ parameters_count_(copy->parameters_count_),
+ locals_count_(copy->locals_count_),
+ liveness_block_(liveness_block),
+ values_(copy->zone()),
+ contexts_(copy->zone()),
+ control_dependency_(copy->control_dependency_),
+ effect_dependency_(copy->effect_dependency_),
+ parameters_node_(copy->parameters_node_),
+ locals_node_(copy->locals_node_),
+ stack_node_(copy->stack_node_) {
+ const size_t kStackEstimate = 7; // optimum from experimentation!
+ values_.reserve(copy->values_.size() + kStackEstimate);
+ values_.insert(values_.begin(), copy->values_.begin(), copy->values_.end());
+ contexts_.reserve(copy->contexts_.size());
+ contexts_.insert(contexts_.begin(), copy->contexts_.begin(),
+ copy->contexts_.end());
+}
+
+
+void AstGraphBuilder::Environment::Bind(Variable* variable, Node* node) {
+ DCHECK(variable->IsStackAllocated());
+ if (variable->IsParameter()) {
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but located at index 0 in the environment).
+ values()->at(variable->index() + 1) = node;
+ } else {
+ DCHECK(variable->IsStackLocal());
+ values()->at(variable->index() + parameters_count_) = node;
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
+ liveness_block()->Bind(variable->index());
+ }
+ }
+}
+
+
+Node* AstGraphBuilder::Environment::Lookup(Variable* variable) {
+ DCHECK(variable->IsStackAllocated());
+ if (variable->IsParameter()) {
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but located at index 0 in the environment).
+ return values()->at(variable->index() + 1);
+ } else {
+ DCHECK(variable->IsStackLocal());
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
+ liveness_block()->Lookup(variable->index());
+ }
+ return values()->at(variable->index() + parameters_count_);
+ }
+}
+
+
+void AstGraphBuilder::Environment::MarkAllLocalsLive() {
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
+ for (int i = 0; i < locals_count_; i++) {
+ liveness_block()->Lookup(i);
+ }
+ }
+}
+
+
+void AstGraphBuilder::Environment::RawParameterBind(int index, Node* node) {
+ DCHECK_LT(index, parameters_count());
+ values()->at(index) = node;
+}
+
+
+Node* AstGraphBuilder::Environment::RawParameterLookup(int index) {
+ DCHECK_LT(index, parameters_count());
+ return values()->at(index);
+}
+
+
+AstGraphBuilder::Environment*
+AstGraphBuilder::Environment::CopyForConditional() {
+ LivenessAnalyzerBlock* copy_liveness_block = nullptr;
+ if (liveness_block() != nullptr) {
+ copy_liveness_block =
+ builder_->liveness_analyzer()->NewBlock(liveness_block());
+ liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
+ }
+ return new (zone()) Environment(this, copy_liveness_block);
+}
+
+
+AstGraphBuilder::Environment*
+AstGraphBuilder::Environment::CopyAsUnreachable() {
+ Environment* env = new (zone()) Environment(this, nullptr);
+ env->MarkAsUnreachable();
+ return env;
+}
+
+
+AstGraphBuilder::Environment*
+AstGraphBuilder::Environment::CopyAndShareLiveness() {
+ if (liveness_block() != nullptr) {
+ // Finish the current liveness block before copying.
+ liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
+ }
+ Environment* env = new (zone()) Environment(this, liveness_block());
+ return env;
+}
+
+
+AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForLoop(
+ BitVector* assigned, bool is_osr) {
+ PrepareForLoop(assigned, is_osr);
+ return CopyAndShareLiveness();
+}
void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
int offset, int count) {
bool should_update = false;
- Node** env_values = (count == 0) ? NULL : &values()->at(offset);
- if (*state_values == NULL || (*state_values)->InputCount() != count) {
+ Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+ if (*state_values == nullptr || (*state_values)->InputCount() != count) {
should_update = true;
} else {
DCHECK(static_cast<size_t>(offset + count) <= values()->size());
@@ -210,18 +847,50 @@
}
+void AstGraphBuilder::Environment::UpdateStateValuesWithCache(
+ Node** state_values, int offset, int count) {
+ Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+ *state_values = builder_->state_values_cache_.GetNodeForValues(
+ env_values, static_cast<size_t>(count));
+}
+
+
Node* AstGraphBuilder::Environment::Checkpoint(
BailoutId ast_id, OutputFrameStateCombine combine) {
+ if (!builder()->info()->is_deoptimization_enabled()) {
+ return builder()->jsgraph()->EmptyFrameState();
+ }
+
UpdateStateValues(¶meters_node_, 0, parameters_count());
- UpdateStateValues(&locals_node_, parameters_count(), locals_count());
+ UpdateStateValuesWithCache(&locals_node_, parameters_count(), locals_count());
UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
stack_height());
- const Operator* op = common()->FrameState(JS_FRAME, ast_id, combine);
+ const Operator* op = common()->FrameState(
+ ast_id, combine, builder()->frame_state_function_info());
- return graph()->NewNode(op, parameters_node_, locals_node_, stack_node_,
- GetContext(),
- builder()->jsgraph()->UndefinedConstant());
+ Node* result = graph()->NewNode(op, parameters_node_, locals_node_,
+ stack_node_, builder()->current_context(),
+ builder()->GetFunctionClosure(),
+ builder()->graph()->start());
+
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
+ liveness_block()->Checkpoint(result);
+ }
+ return result;
+}
+
+
+bool AstGraphBuilder::Environment::IsLivenessAnalysisEnabled() {
+ return FLAG_analyze_environment_liveness &&
+ builder()->info()->is_deoptimization_enabled();
+}
+
+
+bool AstGraphBuilder::Environment::IsLivenessBlockConsistent() {
+ return (!IsLivenessAnalysisEnabled() || IsMarkedAsUnreachable()) ==
+ (liveness_block() == nullptr);
}
@@ -266,11 +935,11 @@
void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
- environment()->Push(owner()->BuildToBoolean(value));
+ environment()->Push(owner()->BuildToBoolean(value, feedback_id_));
}
-Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return NULL; }
+Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return nullptr; }
Node* AstGraphBuilder::AstValueContext::ConsumeValue() {
@@ -283,36 +952,68 @@
}
-AstGraphBuilder::BreakableScope* AstGraphBuilder::BreakableScope::FindBreakable(
- BreakableStatement* target) {
- BreakableScope* current = this;
- while (current != NULL && current->target_ != target) {
- owner_->environment()->Drop(current->drop_extra_);
- current = current->next_;
+Scope* AstGraphBuilder::current_scope() const {
+ return execution_context_->scope();
+}
+
+
+Node* AstGraphBuilder::current_context() const {
+ return environment()->Context();
+}
+
+
+void AstGraphBuilder::ControlScope::PerformCommand(Command command,
+ Statement* target,
+ Node* value) {
+ Environment* env = environment()->CopyAsUnreachable();
+ ControlScope* current = this;
+ while (current != nullptr) {
+ environment()->TrimStack(current->stack_height());
+ environment()->TrimContextChain(current->context_length());
+ if (current->Execute(command, target, value)) break;
+ current = current->outer_;
}
- DCHECK(current != NULL); // Always found (unless stack is malformed).
- return current;
+ builder()->set_environment(env);
+ DCHECK_NOT_NULL(current); // Always handled (unless stack is malformed).
}
-void AstGraphBuilder::BreakableScope::BreakTarget(BreakableStatement* stmt) {
- FindBreakable(stmt)->control_->Break();
+void AstGraphBuilder::ControlScope::BreakTo(BreakableStatement* stmt) {
+ PerformCommand(CMD_BREAK, stmt, builder()->jsgraph()->TheHoleConstant());
}
-void AstGraphBuilder::BreakableScope::ContinueTarget(BreakableStatement* stmt) {
- FindBreakable(stmt)->control_->Continue();
+void AstGraphBuilder::ControlScope::ContinueTo(BreakableStatement* stmt) {
+ PerformCommand(CMD_CONTINUE, stmt, builder()->jsgraph()->TheHoleConstant());
+}
+
+
+void AstGraphBuilder::ControlScope::ReturnValue(Node* return_value) {
+ PerformCommand(CMD_RETURN, nullptr, return_value);
+}
+
+
+void AstGraphBuilder::ControlScope::ThrowValue(Node* exception_value) {
+ PerformCommand(CMD_THROW, nullptr, exception_value);
}
void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
- if (expr == NULL) {
+ if (expr == nullptr) {
return environment()->Push(jsgraph()->NullConstant());
}
VisitForValue(expr);
}
+void AstGraphBuilder::VisitForValueOrTheHole(Expression* expr) {
+ if (expr == nullptr) {
+ return environment()->Push(jsgraph()->TheHoleConstant());
+ }
+ VisitForValue(expr);
+}
+
+
void AstGraphBuilder::VisitForValues(ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
VisitForValue(exprs->at(i));
@@ -341,7 +1042,7 @@
void AstGraphBuilder::VisitForTest(Expression* expr) {
- AstTestContext for_condition(this);
+ AstTestContext for_condition(this, expr->test_id());
if (!CheckStackOverflow()) {
expr->Accept(this);
} else {
@@ -365,7 +1066,8 @@
VariableMode mode = decl->mode();
bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
Handle<Oddball> value = variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value();
@@ -373,21 +1075,21 @@
globals()->push_back(value);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
if (hole_init) {
Node* value = jsgraph()->TheHoleConstant();
environment()->Bind(variable, value);
}
break;
- case Variable::CONTEXT:
+ case VariableLocation::CONTEXT:
if (hole_init) {
Node* value = jsgraph()->TheHoleConstant();
const Operator* op = javascript()->StoreContext(0, variable->index());
NewNode(op, current_context(), value);
}
break;
- case Variable::LOOKUP:
+ case VariableLocation::LOOKUP:
UNIMPLEMENTED();
}
}
@@ -396,40 +1098,36 @@
void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
Variable* variable = decl->proxy()->var();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(decl->fun(), info()->script(), info());
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
+ decl->fun(), info()->script(), info());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals()->push_back(variable->name());
globals()->push_back(function);
break;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
VisitForValue(decl->fun());
Node* value = environment()->Pop();
environment()->Bind(variable, value);
break;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
VisitForValue(decl->fun());
Node* value = environment()->Pop();
const Operator* op = javascript()->StoreContext(0, variable->index());
NewNode(op, current_context(), value);
break;
}
- case Variable::LOOKUP:
+ case VariableLocation::LOOKUP:
UNIMPLEMENTED();
}
}
-void AstGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) {
- UNREACHABLE();
-}
-
-
void AstGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
UNREACHABLE();
}
@@ -440,43 +1138,26 @@
}
-void AstGraphBuilder::VisitModuleLiteral(ModuleLiteral* modl) { UNREACHABLE(); }
-
-
-void AstGraphBuilder::VisitModuleVariable(ModuleVariable* modl) {
- UNREACHABLE();
-}
-
-
-void AstGraphBuilder::VisitModulePath(ModulePath* modl) { UNREACHABLE(); }
-
-
-void AstGraphBuilder::VisitModuleUrl(ModuleUrl* modl) { UNREACHABLE(); }
-
-
void AstGraphBuilder::VisitBlock(Block* stmt) {
BlockBuilder block(this);
- BreakableScope scope(this, stmt, &block, 0);
- if (stmt->labels() != NULL) block.BeginBlock();
- if (stmt->scope() == NULL) {
+ ControlScopeForBreakable scope(this, stmt, &block);
+ if (stmt->labels() != nullptr) block.BeginBlock();
+ if (stmt->scope() == nullptr) {
// Visit statements in the same scope, no declarations.
VisitStatements(stmt->statements());
} else {
- const Operator* op = javascript()->CreateBlockContext();
- Node* scope_info = jsgraph()->Constant(stmt->scope()->GetScopeInfo());
- Node* context = NewNode(op, scope_info, GetFunctionClosure());
- ContextScope scope(this, stmt->scope(), context);
-
// Visit declarations and statements in a block scope.
- VisitDeclarations(stmt->scope()->declarations());
- VisitStatements(stmt->statements());
+ if (stmt->scope()->NeedsContext()) {
+ Node* context = BuildLocalBlockContext(stmt->scope());
+ ContextScope scope(this, stmt->scope(), context);
+ VisitDeclarations(stmt->scope()->declarations());
+ VisitStatements(stmt->statements());
+ } else {
+ VisitDeclarations(stmt->scope()->declarations());
+ VisitStatements(stmt->statements());
+ }
}
- if (stmt->labels() != NULL) block.EndBlock();
-}
-
-
-void AstGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
- UNREACHABLE();
+ if (stmt->labels() != nullptr) block.EndBlock();
}
@@ -490,6 +1171,12 @@
}
+void AstGraphBuilder::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* stmt) {
+ Visit(stmt->statement());
+}
+
+
void AstGraphBuilder::VisitIfStatement(IfStatement* stmt) {
IfBuilder compare_if(this);
VisitForTest(stmt->condition());
@@ -504,47 +1191,42 @@
void AstGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
- breakable()->ContinueTarget(stmt->target());
- set_environment(env);
+ execution_control()->ContinueTo(stmt->target());
}
void AstGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
- StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
- breakable()->BreakTarget(stmt->target());
- set_environment(env);
+ execution_control()->BreakTo(stmt->target());
}
void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
VisitForValue(stmt->expression());
Node* result = environment()->Pop();
- Node* control = NewNode(common()->Return(), result);
- UpdateControlDependencyToLeaveFunction(control);
+ execution_control()->ReturnValue(result);
}
void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
VisitForValue(stmt->expression());
Node* value = environment()->Pop();
+ Node* object = BuildToObject(value, stmt->ToObjectId());
const Operator* op = javascript()->CreateWithContext();
- Node* context = NewNode(op, value, GetFunctionClosure());
- ContextScope scope(this, stmt->scope(), context);
- Visit(stmt->statement());
+ Node* context = NewNode(op, object, GetFunctionClosureForContext());
+ PrepareFrameState(context, stmt->EntryId());
+ VisitInScope(stmt->statement(), stmt->scope(), context);
}
void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ZoneList<CaseClause*>* clauses = stmt->cases();
SwitchBuilder compare_switch(this, clauses->length());
- BreakableScope scope(this, stmt, &compare_switch, 0);
+ ControlScopeForBreakable scope(this, stmt, &compare_switch);
compare_switch.BeginSwitch();
int default_index = -1;
// Keep the switch value on the stack until a case matches.
VisitForValue(stmt->tag());
- Node* tag = environment()->Top();
// Iterate over all cases and create nodes for label comparison.
for (int i = 0; i < clauses->length(); i++) {
@@ -560,6 +1242,7 @@
// value is still on the operand stack while the label is evaluated.
VisitForValue(clause->label());
Node* label = environment()->Pop();
+ Node* tag = environment()->Top();
const Operator* op = javascript()->StrictEqual();
Node* condition = NewNode(op, tag, label);
compare_switch.BeginLabel(i, condition);
@@ -589,8 +1272,8 @@
void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder while_loop(this);
- while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
- VisitIterationBody(stmt, &while_loop, 0);
+ while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
+ VisitIterationBody(stmt, &while_loop);
while_loop.EndBody();
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
@@ -601,11 +1284,11 @@
void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
LoopBuilder while_loop(this);
- while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
+ while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
while_loop.BreakUnless(condition);
- VisitIterationBody(stmt, &while_loop, 0);
+ VisitIterationBody(stmt, &while_loop);
while_loop.EndBody();
while_loop.EndLoop();
}
@@ -614,219 +1297,368 @@
void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
LoopBuilder for_loop(this);
VisitIfNotNull(stmt->init());
- for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
- if (stmt->cond() != NULL) {
+ for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
+ if (stmt->cond() != nullptr) {
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
for_loop.BreakUnless(condition);
} else {
for_loop.BreakUnless(jsgraph()->TrueConstant());
}
- VisitIterationBody(stmt, &for_loop, 0);
+ VisitIterationBody(stmt, &for_loop);
for_loop.EndBody();
VisitIfNotNull(stmt->next());
for_loop.EndLoop();
}
-// TODO(dcarney): this is a big function. Try to clean up some.
void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
VisitForValue(stmt->subject());
- Node* obj = environment()->Pop();
- // Check for undefined or null before entering loop.
- IfBuilder is_undefined(this);
- Node* is_undefined_cond =
- NewNode(javascript()->StrictEqual(), obj, jsgraph()->UndefinedConstant());
- is_undefined.If(is_undefined_cond);
- is_undefined.Then();
- is_undefined.Else();
+ Node* object = environment()->Pop();
+ BlockBuilder for_block(this);
+ for_block.BeginBlock();
+ // Check for null or undefined before entering loop.
+ Node* is_null_cond =
+ NewNode(javascript()->StrictEqual(), object, jsgraph()->NullConstant());
+ for_block.BreakWhen(is_null_cond, BranchHint::kFalse);
+ Node* is_undefined_cond = NewNode(javascript()->StrictEqual(), object,
+ jsgraph()->UndefinedConstant());
+ for_block.BreakWhen(is_undefined_cond, BranchHint::kFalse);
{
- IfBuilder is_null(this);
- Node* is_null_cond =
- NewNode(javascript()->StrictEqual(), obj, jsgraph()->NullConstant());
- is_null.If(is_null_cond);
- is_null.Then();
- is_null.Else();
// Convert object to jsobject.
- // PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
- obj = NewNode(javascript()->ToObject(), obj);
- PrepareFrameState(obj, stmt->ToObjectId(), OutputFrameStateCombine::Push());
- environment()->Push(obj);
- // TODO(dcarney): should do a fast enum cache check here to skip runtime.
- environment()->Push(obj);
- Node* cache_type = ProcessArguments(
- javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), 1);
- PrepareFrameState(cache_type, stmt->EnumId(),
- OutputFrameStateCombine::Push());
- // TODO(dcarney): these next runtime calls should be removed in favour of
- // a few simplified instructions.
- environment()->Push(obj);
- environment()->Push(cache_type);
- Node* cache_pair =
- ProcessArguments(javascript()->CallRuntime(Runtime::kForInInit, 2), 2);
- // cache_type may have been replaced.
- Node* cache_array = NewNode(common()->Projection(0), cache_pair);
- cache_type = NewNode(common()->Projection(1), cache_pair);
+ object = BuildToObject(object, stmt->ToObjectId());
+ environment()->Push(object);
+
+ // Prepare for-in cache.
+ Node* prepare = NewNode(javascript()->ForInPrepare(), object);
+ PrepareFrameState(prepare, stmt->EnumId(), OutputFrameStateCombine::Push());
+ Node* cache_type = NewNode(common()->Projection(0), prepare);
+ Node* cache_array = NewNode(common()->Projection(1), prepare);
+ Node* cache_length = NewNode(common()->Projection(2), prepare);
+
+ // Construct the rest of the environment.
environment()->Push(cache_type);
environment()->Push(cache_array);
- Node* cache_length = ProcessArguments(
- javascript()->CallRuntime(Runtime::kForInCacheArrayLength, 2), 2);
+ environment()->Push(cache_length);
+ environment()->Push(jsgraph()->ZeroConstant());
+
+ // Build the actual loop body.
+ LoopBuilder for_loop(this);
+ for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
{
- // TODO(dcarney): this check is actually supposed to be for the
- // empty enum case only.
- IfBuilder have_no_properties(this);
- Node* empty_array_cond = NewNode(javascript()->StrictEqual(),
- cache_length, jsgraph()->ZeroConstant());
- have_no_properties.If(empty_array_cond);
- have_no_properties.Then();
- // Pop obj and skip loop.
- environment()->Pop();
- have_no_properties.Else();
+ // These stack values are renamed in the case of OSR, so reload them
+ // from the environment.
+ Node* index = environment()->Peek(0);
+ Node* cache_length = environment()->Peek(1);
+ Node* cache_array = environment()->Peek(2);
+ Node* cache_type = environment()->Peek(3);
+ Node* object = environment()->Peek(4);
+
+ // Check loop termination condition.
+ Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
+ for_loop.BreakWhen(exit_cond);
+
+ // Compute the next enumerated value.
+ Node* value = NewNode(javascript()->ForInNext(), object, cache_array,
+ cache_type, index);
+ PrepareFrameState(value, stmt->FilterId(),
+ OutputFrameStateCombine::Push());
+ IfBuilder test_value(this);
+ Node* test_value_cond = NewNode(javascript()->StrictEqual(), value,
+ jsgraph()->UndefinedConstant());
+ test_value.If(test_value_cond, BranchHint::kFalse);
+ test_value.Then();
+ test_value.Else();
{
- // Construct the rest of the environment.
- environment()->Push(cache_type);
- environment()->Push(cache_array);
- environment()->Push(cache_length);
- environment()->Push(jsgraph()->ZeroConstant());
- // PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- LoopBuilder for_loop(this);
- for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
- // Check loop termination condition.
- Node* index = environment()->Peek(0);
- Node* exit_cond =
- NewNode(javascript()->LessThan(), index, cache_length);
- // TODO(jarin): provide real bailout id.
- PrepareFrameState(exit_cond, BailoutId::None());
- for_loop.BreakUnless(exit_cond);
- // TODO(dcarney): this runtime call should be a handful of
- // simplified instructions that
- // basically produce
- // value = array[index]
- environment()->Push(obj);
- environment()->Push(cache_array);
- environment()->Push(cache_type);
- environment()->Push(index);
- Node* pair = ProcessArguments(
- javascript()->CallRuntime(Runtime::kForInNext, 4), 4);
- Node* value = NewNode(common()->Projection(0), pair);
- Node* should_filter = NewNode(common()->Projection(1), pair);
- environment()->Push(value);
- {
- // Test if FILTER_KEY needs to be called.
- IfBuilder test_should_filter(this);
- Node* should_filter_cond =
- NewNode(javascript()->StrictEqual(), should_filter,
- jsgraph()->TrueConstant());
- test_should_filter.If(should_filter_cond);
- test_should_filter.Then();
- value = environment()->Pop();
- Node* builtins = BuildLoadBuiltinsObject();
- Node* function = BuildLoadObjectField(
- builtins,
- JSBuiltinsObject::OffsetOfFunctionWithId(Builtins::FILTER_KEY));
- // Callee.
- environment()->Push(function);
- // Receiver.
- environment()->Push(obj);
- // Args.
- environment()->Push(value);
- // result is either the string key or Smi(0) indicating the property
- // is gone.
- Node* res = ProcessArguments(
- javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS), 3);
- // TODO(jarin): provide real bailout id.
- PrepareFrameState(res, BailoutId::None());
- Node* property_missing = NewNode(javascript()->StrictEqual(), res,
- jsgraph()->ZeroConstant());
- {
- IfBuilder is_property_missing(this);
- is_property_missing.If(property_missing);
- is_property_missing.Then();
- // Inc counter and continue.
- Node* index_inc =
- NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
- // TODO(jarin): provide real bailout id.
- PrepareFrameState(index_inc, BailoutId::None());
- environment()->Poke(0, index_inc);
- for_loop.Continue();
- is_property_missing.Else();
- is_property_missing.End();
- }
- // Replace 'value' in environment.
- environment()->Push(res);
- test_should_filter.Else();
- test_should_filter.End();
- }
- value = environment()->Pop();
// Bind value and do loop body.
- VisitForInAssignment(stmt->each(), value);
- VisitIterationBody(stmt, &for_loop, 5);
- for_loop.EndBody();
- // Inc counter and continue.
- Node* index_inc =
- NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
- // TODO(jarin): provide real bailout id.
- PrepareFrameState(index_inc, BailoutId::None());
- environment()->Poke(0, index_inc);
- for_loop.EndLoop();
- environment()->Drop(5);
- // PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(stmt->EachFeedbackSlot());
+ VisitForInAssignment(stmt->each(), value, feedback, stmt->FilterId(),
+ stmt->AssignmentId());
+ VisitIterationBody(stmt, &for_loop);
}
- have_no_properties.End();
+ test_value.End();
+ index = environment()->Peek(0);
+ for_loop.EndBody();
+
+ // Increment counter and continue.
+ index = NewNode(javascript()->ForInStep(), index);
+ environment()->Poke(0, index);
}
- is_null.End();
+ for_loop.EndLoop();
+ environment()->Drop(5);
}
- is_undefined.End();
+ for_block.EndBlock();
}
void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
- VisitForValue(stmt->subject());
- environment()->Pop();
- // TODO(turbofan): create and use loop builder.
+ LoopBuilder for_loop(this);
+ VisitForEffect(stmt->assign_iterator());
+ for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
+ VisitForEffect(stmt->next_result());
+ VisitForTest(stmt->result_done());
+ Node* condition = environment()->Pop();
+ for_loop.BreakWhen(condition);
+ VisitForEffect(stmt->assign_each());
+ VisitIterationBody(stmt, &for_loop);
+ for_loop.EndBody();
+ for_loop.EndLoop();
}
void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
+ TryCatchBuilder try_control(this);
+
+ // Evaluate the try-block inside a control scope. This simulates a handler
+ // that is intercepting 'throw' control commands.
+ try_control.BeginTry();
+ {
+ ControlScopeForCatch scope(this, &try_control);
+ STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
+ environment()->Push(current_context());
+ Visit(stmt->try_block());
+ environment()->Pop();
+ }
+ try_control.EndTry();
+
+ // Insert lazy bailout point.
+ // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
+ // point. Ideally, we whould not re-enter optimized code when deoptimized
+ // lazily. Tracked by issue v8:4195.
+ NewNode(common()->LazyBailout(),
+ jsgraph()->ZeroConstant(), // dummy target.
+ environment()->Checkpoint(stmt->HandlerId())); // frame state.
+
+ // Clear message object as we enter the catch block.
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ NewNode(javascript()->StoreMessage(), the_hole);
+
+ // Create a catch scope that binds the exception.
+ Node* exception = try_control.GetExceptionNode();
+ Handle<String> name = stmt->variable()->name();
+ const Operator* op = javascript()->CreateCatchContext(name);
+ Node* context = NewNode(op, exception, GetFunctionClosureForContext());
+
+ // Evaluate the catch-block.
+ VisitInScope(stmt->catch_block(), stmt->scope(), context);
+ try_control.EndCatch();
}
void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- UNREACHABLE();
+ TryFinallyBuilder try_control(this);
+
+ // We keep a record of all paths that enter the finally-block to be able to
+ // dispatch to the correct continuation point after the statements in the
+ // finally-block have been evaluated.
+ //
+ // The try-finally construct can enter the finally-block in three ways:
+ // 1. By exiting the try-block normally, falling through at the end.
+ // 2. By exiting the try-block with a function-local control flow transfer
+ // (i.e. through break/continue/return statements).
+ // 3. By exiting the try-block with a thrown exception.
+ Node* fallthrough_result = jsgraph()->TheHoleConstant();
+ ControlScope::DeferredCommands* commands =
+ new (local_zone()) ControlScope::DeferredCommands(this);
+
+ // Evaluate the try-block inside a control scope. This simulates a handler
+ // that is intercepting all control commands.
+ try_control.BeginTry();
+ {
+ ControlScopeForFinally scope(this, commands, &try_control);
+ STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
+ environment()->Push(current_context());
+ Visit(stmt->try_block());
+ environment()->Pop();
+ }
+ try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
+
+ // Insert lazy bailout point.
+ // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
+ // point. Ideally, we whould not re-enter optimized code when deoptimized
+ // lazily. Tracked by issue v8:4195.
+ NewNode(common()->LazyBailout(),
+ jsgraph()->ZeroConstant(), // dummy target.
+ environment()->Checkpoint(stmt->HandlerId())); // frame state.
+
+ // The result value semantics depend on how the block was entered:
+ // - ReturnStatement: It represents the return value being returned.
+ // - ThrowStatement: It represents the exception being thrown.
+ // - BreakStatement/ContinueStatement: Filled with the hole.
+ // - Falling through into finally-block: Filled with the hole.
+ Node* result = try_control.GetResultValueNode();
+ Node* token = try_control.GetDispatchTokenNode();
+
+ // The result value, dispatch token and message is expected on the operand
+ // stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
+ Node* message = NewNode(javascript()->LoadMessage());
+ environment()->Push(token); // TODO(mstarzinger): Cook token!
+ environment()->Push(result);
+ environment()->Push(message);
+
+ // Clear message object as we enter the finally block.
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ NewNode(javascript()->StoreMessage(), the_hole);
+
+ // Evaluate the finally-block.
+ Visit(stmt->finally_block());
+ try_control.EndFinally();
+
+ // The result value, dispatch token and message is restored from the operand
+ // stack (this is in sync with FullCodeGenerator::ExitFinallyBlock).
+ message = environment()->Pop();
+ result = environment()->Pop();
+ token = environment()->Pop(); // TODO(mstarzinger): Uncook token!
+ NewNode(javascript()->StoreMessage(), message);
+
+ // Dynamic dispatch after the finally-block.
+ commands->ApplyDeferredCommands(token, result);
+
+ // TODO(mstarzinger): Remove bailout once everything works.
+ if (!FLAG_turbo_try_finally) SetStackOverflow();
}
void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- // TODO(turbofan): Do we really need a separate reloc-info for this?
- Node* node = NewNode(javascript()->CallRuntime(Runtime::kDebugBreak, 0));
+ Node* node =
+ NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement, 0));
PrepareFrameState(node, stmt->DebugBreakId());
+ environment()->MarkAllLocalsLive();
}
void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- Node* context = current_context();
-
- // Build a new shared function info if we cannot find one in the baseline
- // code. We also have a stack overflow if the recursive compilation did.
- expr->InitializeSharedInfo(handle(info()->shared_info()->code()));
- Handle<SharedFunctionInfo> shared_info = expr->shared_info();
- if (shared_info.is_null()) {
- shared_info = Compiler::BuildFunctionInfo(expr, info()->script(), info());
- CHECK(!shared_info.is_null()); // TODO(mstarzinger): Set stack overflow?
- }
+ // Find or build a shared function info.
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
+ CHECK(!shared_info.is_null()); // TODO(mstarzinger): Set stack overflow?
// Create node to instantiate a new closure.
- Node* info = jsgraph()->Constant(shared_info);
- Node* pretenure = jsgraph()->BooleanConstant(expr->pretenure());
- const Operator* op = javascript()->CallRuntime(Runtime::kNewClosure, 3);
- Node* value = NewNode(op, context, info, pretenure);
+ PretenureFlag pretenure = expr->pretenure() ? TENURED : NOT_TENURED;
+ const Operator* op = javascript()->CreateClosure(shared_info, pretenure);
+ Node* value = NewNode(op);
ast_context()->ProduceValue(value);
}
void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
- UNREACHABLE();
+ // Visit declarations and class literal in a block scope.
+ if (expr->scope()->ContextLocalCount() > 0) {
+ Node* context = BuildLocalBlockContext(expr->scope());
+ ContextScope scope(this, expr->scope(), context);
+ VisitDeclarations(expr->scope()->declarations());
+ VisitClassLiteralContents(expr);
+ } else {
+ VisitDeclarations(expr->scope()->declarations());
+ VisitClassLiteralContents(expr);
+ }
+}
+
+
+void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
+ Node* class_name = expr->raw_name() ? jsgraph()->Constant(expr->name())
+ : jsgraph()->UndefinedConstant();
+
+ // The class name is expected on the operand stack.
+ environment()->Push(class_name);
+ VisitForValueOrTheHole(expr->extends());
+ VisitForValue(expr->constructor());
+
+ // Create node to instantiate a new class.
+ Node* constructor = environment()->Pop();
+ Node* extends = environment()->Pop();
+ Node* name = environment()->Pop();
+ Node* start = jsgraph()->Constant(expr->start_position());
+ Node* end = jsgraph()->Constant(expr->end_position());
+ const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass, 5);
+ Node* literal = NewNode(opc, name, extends, constructor, start, end);
+ PrepareFrameState(literal, expr->CreateLiteralId(),
+ OutputFrameStateCombine::Push());
+
+ // The prototype is ensured to exist by Runtime_DefineClass. No access check
+ // is needed here since the constructor is created by the class literal.
+ Node* prototype =
+ BuildLoadObjectField(literal, JSFunction::kPrototypeOrInitialMapOffset);
+
+ // The class literal and the prototype are both expected on the operand stack
+ // during evaluation of the method values.
+ environment()->Push(literal);
+ environment()->Push(prototype);
+
+ // Create nodes to store method values into the literal.
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ environment()->Push(environment()->Peek(property->is_static() ? 1 : 0));
+
+ VisitForValue(property->key());
+ Node* name = BuildToName(environment()->Pop(), expr->GetIdForProperty(i));
+ environment()->Push(name);
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ Node* check = BuildThrowIfStaticPrototype(environment()->Pop(),
+ expr->GetIdForProperty(i));
+ environment()->Push(check);
+ }
+
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ Node* key = environment()->Pop();
+ Node* receiver = environment()->Pop();
+
+ BuildSetHomeObject(value, receiver, property);
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED: {
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kDefineClassMethod, 3);
+ NewNode(op, receiver, key, value);
+ break;
+ }
+ case ObjectLiteral::Property::GETTER: {
+ Node* attr = jsgraph()->Constant(DONT_ENUM);
+ const Operator* op = javascript()->CallRuntime(
+ Runtime::kDefineGetterPropertyUnchecked, 4);
+ NewNode(op, receiver, key, value, attr);
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ Node* attr = jsgraph()->Constant(DONT_ENUM);
+ const Operator* op = javascript()->CallRuntime(
+ Runtime::kDefineSetterPropertyUnchecked, 4);
+ NewNode(op, receiver, key, value, attr);
+ break;
+ }
+ }
+ }
+
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ prototype = environment()->Pop();
+ literal = environment()->Pop();
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ literal = NewNode(op, literal, prototype);
+
+ // Assign to class variable.
+ if (expr->class_variable_proxy() != nullptr) {
+ Variable* var = expr->class_variable_proxy()->var();
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ VectorSlotPair feedback = CreateVectorSlotPair(
+ expr->NeedsProxySlot() ? expr->ProxySlot()
+ : FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(var, literal, Token::INIT, feedback,
+ BailoutId::None(), states);
+ }
+ ast_context()->ProduceValue(literal);
}
@@ -835,6 +1667,13 @@
}
+void AstGraphBuilder::VisitDoExpression(DoExpression* expr) {
+ VisitBlock(expr->block());
+ VisitVariableProxy(expr->result());
+ ast_context()->ReplaceValue();
+}
+
+
void AstGraphBuilder::VisitConditional(Conditional* expr) {
IfBuilder compare_if(this);
VisitForTest(expr->condition());
@@ -851,7 +1690,9 @@
void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
VectorSlotPair pair = CreateVectorSlotPair(expr->VariableFeedbackSlot());
- Node* value = BuildVariableLoad(expr->var(), expr->id(), pair);
+ FrameStateBeforeAndAfter states(this, BeforeId(expr));
+ Node* value = BuildVariableLoad(expr->var(), expr->id(), states, pair,
+ ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -866,14 +1707,9 @@
Node* closure = GetFunctionClosure();
// Create node to materialize a regular expression literal.
- Node* literals_array =
- BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* literal_index = jsgraph()->Constant(expr->literal_index());
- Node* pattern = jsgraph()->Constant(expr->pattern());
- Node* flags = jsgraph()->Constant(expr->flags());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- Node* literal = NewNode(op, literals_array, literal_index, pattern, flags);
+ const Operator* op = javascript()->CreateLiteralRegExp(
+ expr->pattern(), expr->flags(), expr->literal_index());
+ Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(literal);
}
@@ -883,15 +1719,10 @@
Node* closure = GetFunctionClosure();
// Create node to deep-copy the literal boilerplate.
- expr->BuildConstantProperties(isolate());
- Node* literals_array =
- BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* literal_index = jsgraph()->Constant(expr->literal_index());
- Node* constants = jsgraph()->Constant(expr->constant_properties());
- Node* flags = jsgraph()->Constant(expr->ComputeFlags());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kCreateObjectLiteral, 4);
- Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+ const Operator* op = javascript()->CreateLiteralObject(
+ expr->constant_properties(), expr->ComputeFlags(true),
+ expr->literal_index());
+ Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
@@ -899,18 +1730,15 @@
// property values and is the value of the entire expression.
environment()->Push(literal);
- // Mark all computed expressions that are bound to a key that is shadowed by
- // a later occurrence of the same key. For the marked expressions, no store
- // code is emitted.
- expr->CalculateEmitStore(zone());
-
// Create nodes to store computed values into the literal.
- AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
+ int property_index = 0;
+ AccessorTable accessor_table(local_zone());
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- Literal* key = property->key();
+ Literal* key = property->key()->AsLiteral();
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
@@ -923,61 +1751,73 @@
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForValue(property->value());
+ FrameStateBeforeAndAfter states(this, property->value()->id());
Node* value = environment()->Pop();
- Unique<Name> name = MakeUnique(key->AsPropertyName());
- Node* store = NewNode(javascript()->StoreNamed(strict_mode(), name),
- literal, value);
- PrepareFrameState(store, key->id());
+ Node* literal = environment()->Top();
+ Handle<Name> name = key->AsPropertyName();
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->GetSlot(0));
+ Node* store = BuildNamedStore(literal, name, value, feedback);
+ states.AddToNode(store, key->id(),
+ OutputFrameStateCombine::Ignore());
+ BuildSetHomeObject(value, literal, property, 1);
} else {
VisitForEffect(property->value());
}
break;
}
- environment()->Push(literal); // Duplicate receiver.
+ environment()->Push(environment()->Top()); // Duplicate receiver.
VisitForValue(property->key());
VisitForValue(property->value());
Node* value = environment()->Pop();
Node* key = environment()->Pop();
Node* receiver = environment()->Pop();
if (property->emit_store()) {
- Node* strict = jsgraph()->Constant(SLOPPY);
+ Node* language = jsgraph()->Constant(SLOPPY);
const Operator* op =
javascript()->CallRuntime(Runtime::kSetProperty, 4);
- NewNode(op, receiver, key, value, strict);
+ Node* set_property = NewNode(op, receiver, key, value, language);
+ // SetProperty should not lazy deopt on an object literal.
+ PrepareFrameState(set_property, BailoutId::None());
+ BuildSetHomeObject(value, receiver, property);
}
break;
}
case ObjectLiteral::Property::PROTOTYPE: {
- environment()->Push(literal); // Duplicate receiver.
+ environment()->Push(environment()->Top()); // Duplicate receiver.
VisitForValue(property->value());
Node* value = environment()->Pop();
Node* receiver = environment()->Pop();
- if (property->emit_store()) {
- const Operator* op =
- javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
- Node* set_prototype = NewNode(op, receiver, value);
- // SetPrototype should not lazy deopt on an object
- // literal.
- PrepareFrameState(set_prototype, BailoutId::None());
- }
+ DCHECK(property->emit_store());
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+ Node* set_prototype = NewNode(op, receiver, value);
+ // SetPrototype should not lazy deopt on an object literal.
+ PrepareFrameState(set_prototype,
+ expr->GetIdForPropertySet(property_index));
break;
}
case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = property->value();
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = property;
+ }
break;
case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = property->value();
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = property;
+ }
break;
}
}
// Create nodes to define accessors, using only a single call to the runtime
// for each pair of corresponding getters and setters.
+ literal = environment()->Top(); // Reload from operand stack.
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end(); ++it) {
VisitForValue(it->first);
- VisitForValueOrNull(it->second->getter);
- VisitForValueOrNull(it->second->setter);
+ VisitObjectLiteralAccessor(literal, it->second->getter);
+ VisitObjectLiteralAccessor(literal, it->second->setter);
Node* setter = environment()->Pop();
Node* getter = environment()->Pop();
Node* name = environment()->Pop();
@@ -989,7 +1829,75 @@
PrepareFrameState(call, BailoutId::None());
}
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // Runtime_CreateObjectLiteralBoilerplate. The second "dynamic" part starts
+ // with the first computed property name and continues with all properties to
+ // its right. All the code from above initializes the static component of the
+ // object literal, and arranges for the map of the result to reflect the
+ // static order in which the keys appear. For the dynamic properties, we
+ // compile them into a series of "SetOwnProperty" runtime calls. This will
+ // preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ environment()->Push(environment()->Top()); // Duplicate receiver.
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+ Node* call = NewNode(op, receiver, value);
+ PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
+ continue;
+ }
+
+ environment()->Push(environment()->Top()); // Duplicate receiver.
+ VisitForValue(property->key());
+ Node* name = BuildToName(environment()->Pop(),
+ expr->GetIdForPropertyName(property_index));
+ environment()->Push(name);
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ Node* key = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ BuildSetHomeObject(value, receiver, property);
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
+ Node* attr = jsgraph()->Constant(NONE);
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ Node* call = NewNode(op, receiver, key, value, attr);
+ PrepareFrameState(call, BailoutId::None());
+ break;
+ }
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE(); // Handled specially above.
+ break;
+ case ObjectLiteral::Property::GETTER: {
+ Node* attr = jsgraph()->Constant(NONE);
+ const Operator* op = javascript()->CallRuntime(
+ Runtime::kDefineGetterPropertyUnchecked, 4);
+ Node* call = NewNode(op, receiver, key, value, attr);
+ PrepareFrameState(call, BailoutId::None());
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ Node* attr = jsgraph()->Constant(NONE);
+ const Operator* op = javascript()->CallRuntime(
+ Runtime::kDefineSetterPropertyUnchecked, 4);
+ Node* call = NewNode(op, receiver, key, value, attr);
+ PrepareFrameState(call, BailoutId::None());
+ break;
+ }
+ }
+ }
+
// Transform literals that contain functions to fast properties.
+ literal = environment()->Top(); // Reload from operand stack.
if (expr->has_function()) {
const Operator* op =
javascript()->CallRuntime(Runtime::kToFastProperties, 1);
@@ -1000,83 +1908,163 @@
}
+void AstGraphBuilder::VisitObjectLiteralAccessor(
+ Node* home_object, ObjectLiteralProperty* property) {
+ if (property == nullptr) {
+ VisitForValueOrNull(nullptr);
+ } else {
+ VisitForValue(property->value());
+ BuildSetHomeObject(environment()->Top(), home_object, property);
+ }
+}
+
+
void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Node* closure = GetFunctionClosure();
// Create node to deep-copy the literal boilerplate.
- expr->BuildConstantElements(isolate());
- Node* literals_array =
- BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* literal_index = jsgraph()->Constant(expr->literal_index());
- Node* constants = jsgraph()->Constant(expr->constant_elements());
- Node* flags = jsgraph()->Constant(expr->ComputeFlags());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kCreateArrayLiteral, 4);
- Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+ const Operator* op = javascript()->CreateLiteralArray(
+ expr->constant_elements(), expr->ComputeFlags(true),
+ expr->literal_index());
+ Node* literal = NewNode(op, closure);
+ PrepareFrameState(literal, expr->CreateLiteralId(),
+ OutputFrameStateCombine::Push());
- // The array and the literal index are both expected on the operand stack
- // during computation of the element values.
+ // The array is expected on the operand stack during computation of the
+ // element values.
environment()->Push(literal);
- environment()->Push(literal_index);
// Create nodes to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
- for (int i = 0; i < expr->values()->length(); i++) {
- Expression* subexpr = expr->values()->at(i);
+ int array_index = 0;
+ for (; array_index < expr->values()->length(); array_index++) {
+ Expression* subexpr = expr->values()->at(array_index);
+ if (subexpr->IsSpread()) break;
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
VisitForValue(subexpr);
- Node* value = environment()->Pop();
- Node* index = jsgraph()->Constant(i);
- Node* store = NewNode(javascript()->StoreProperty(strict_mode()), literal,
- index, value);
- PrepareFrameState(store, expr->GetIdForElement(i));
+ {
+ FrameStateBeforeAndAfter states(this, subexpr->id());
+ VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
+ Node* value = environment()->Pop();
+ Node* index = jsgraph()->Constant(array_index);
+ Node* literal = environment()->Top();
+ Node* store = BuildKeyedStore(literal, index, value, pair);
+ states.AddToNode(store, expr->GetIdForElement(array_index),
+ OutputFrameStateCombine::Ignore());
+ }
}
- environment()->Pop(); // Array literal index.
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ for (; array_index < expr->values()->length(); array_index++) {
+ Expression* subexpr = expr->values()->at(array_index);
+ Node* result;
+
+ if (subexpr->IsSpread()) {
+ VisitForValue(subexpr->AsSpread()->expression());
+ FrameStateBeforeAndAfter states(this,
+ subexpr->AsSpread()->expression()->id());
+ Node* iterable = environment()->Pop();
+ Node* array = environment()->Pop();
+ Node* function = BuildLoadNativeContextField(
+ Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX);
+ result = NewNode(javascript()->CallFunction(3, language_mode()), function,
+ array, iterable);
+ states.AddToNode(result, expr->GetIdForElement(array_index));
+ } else {
+ VisitForValue(subexpr);
+ Node* value = environment()->Pop();
+ Node* array = environment()->Pop();
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kAppendElement, 2);
+ result = NewNode(op, array, value);
+ PrepareFrameState(result, expr->GetIdForElement(array_index));
+ }
+
+ environment()->Push(result);
+ }
+
ast_context()->ProduceValue(environment()->Pop());
}
-void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
- DCHECK(expr->IsValidReferenceExpression());
+void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
+ const VectorSlotPair& feedback,
+ BailoutId bailout_id_before,
+ BailoutId bailout_id_after) {
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->AsProperty();
- LhsKind assign_type = DetermineLhsKind(expr);
+ LhsKind assign_type = Property::GetAssignType(property);
// Evaluate LHS expression and store the value.
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
- // TODO(jarin) Fill in the correct bailout id.
- BuildVariableAssignment(var, value, Token::ASSIGN, BailoutId::None());
+ environment()->Push(value);
+ FrameStateBeforeAndAfter states(this, bailout_id_before);
+ value = environment()->Pop();
+ BuildVariableAssignment(var, value, Token::ASSIGN, feedback,
+ bailout_id_after, states);
break;
}
case NAMED_PROPERTY: {
environment()->Push(value);
VisitForValue(property->obj());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
Node* object = environment()->Pop();
value = environment()->Pop();
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store =
- NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
- // TODO(jarin) Fill in the correct bailout id.
- PrepareFrameState(store, BailoutId::None());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store = BuildNamedStore(object, name, value, feedback);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
case KEYED_PROPERTY: {
environment()->Push(value);
VisitForValue(property->obj());
VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
- key, value);
- // TODO(jarin) Fill in the correct bailout id.
- PrepareFrameState(store, BailoutId::None());
+ Node* store = BuildKeyedStore(object, key, value, feedback);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ environment()->Push(value);
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ value = environment()->Pop();
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ environment()->Push(value);
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ Node* key = environment()->Pop();
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ value = environment()->Pop();
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
}
@@ -1084,48 +2072,67 @@
void AstGraphBuilder::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
- LhsKind assign_type = DetermineLhsKind(expr->target());
+ LhsKind assign_type = Property::GetAssignType(property);
+ bool needs_frame_state_before = true;
// Evaluate LHS expression.
switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
+ case VARIABLE: {
+ Variable* variable = expr->target()->AsVariableProxy()->var();
+ if (variable->location() == VariableLocation::PARAMETER ||
+ variable->location() == VariableLocation::LOCAL ||
+ variable->location() == VariableLocation::CONTEXT) {
+ needs_frame_state_before = false;
+ }
break;
+ }
case NAMED_PROPERTY:
VisitForValue(property->obj());
break;
- case KEYED_PROPERTY: {
+ case KEYED_PROPERTY:
VisitForValue(property->obj());
VisitForValue(property->key());
break;
- }
+ case NAMED_SUPER_PROPERTY:
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ break;
+ case KEYED_SUPER_PROPERTY:
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ VisitForValue(property->key());
+ break;
}
+ BailoutId before_store_id = BailoutId::None();
// Evaluate the value and potentially handle compound assignments by loading
// the left-hand side value and performing a binary operation.
if (expr->is_compound()) {
- Node* old_value = NULL;
+ Node* old_value = nullptr;
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->target()->AsVariableProxy();
VectorSlotPair pair =
CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- old_value = BuildVariableLoad(proxy->var(), expr->target()->id(), pair);
+ FrameStateBeforeAndAfter states(this, BeforeId(proxy));
+ old_value =
+ BuildVariableLoad(proxy->var(), expr->target()->id(), states, pair,
+ OutputFrameStateCombine::Push());
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Top();
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = NewNode(javascript()->LoadNamed(name, pair), object);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ old_value = BuildNamedLoad(object, name, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
@@ -1133,50 +2140,104 @@
Node* object = environment()->Peek(1);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = NewNode(javascript()->LoadProperty(pair), object, key);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ old_value = BuildKeyedLoad(object, key, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ Node* home_object = environment()->Top();
+ Node* receiver = environment()->Peek(1);
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ VectorSlotPair pair =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ Node* key = environment()->Top();
+ Node* home_object = environment()->Peek(1);
+ Node* receiver = environment()->Peek(2);
+ VectorSlotPair pair =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
}
environment()->Push(old_value);
VisitForValue(expr->value());
- Node* right = environment()->Pop();
- Node* left = environment()->Pop();
- Node* value = BuildBinaryOp(left, right, expr->binary_op());
- PrepareFrameState(value, expr->binary_operation()->id(),
- OutputFrameStateCombine::Push());
+ Node* value;
+ {
+ FrameStateBeforeAndAfter states(this, expr->value()->id());
+ Node* right = environment()->Pop();
+ Node* left = environment()->Pop();
+ value =
+ BuildBinaryOp(left, right, expr->binary_op(),
+ expr->binary_operation()->BinaryOperationFeedbackId());
+ states.AddToNode(value, expr->binary_operation()->id(),
+ OutputFrameStateCombine::Push());
+ }
environment()->Push(value);
+ if (needs_frame_state_before) {
+ before_store_id = expr->binary_operation()->id();
+ }
} else {
VisitForValue(expr->value());
+ if (needs_frame_state_before) {
+ before_store_id = expr->value()->id();
+ }
}
+ FrameStateBeforeAndAfter store_states(this, before_store_id);
// Store the value.
Node* value = environment()->Pop();
+ VectorSlotPair feedback = CreateVectorSlotPair(expr->AssignmentSlot());
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->target()->AsVariableProxy()->var();
- BuildVariableAssignment(variable, value, expr->op(), expr->AssignmentId(),
- ast_context()->GetStateCombine());
+ BuildVariableAssignment(variable, value, expr->op(), feedback, expr->id(),
+ store_states, ast_context()->GetStateCombine());
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store =
- NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
- PrepareFrameState(store, expr->AssignmentId(),
- ast_context()->GetStateCombine());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store = BuildNamedStore(object, name, value, feedback);
+ store_states.AddToNode(store, expr->id(),
+ ast_context()->GetStateCombine());
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
- key, value);
- PrepareFrameState(store, expr->AssignmentId(),
- ast_context()->GetStateCombine());
+ Node* store = BuildKeyedStore(object, key, value, feedback);
+ store_states.AddToNode(store, expr->id(),
+ ast_context()->GetStateCombine());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
+ store_states.AddToNode(store, expr->id(),
+ ast_context()->GetStateCombine());
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ Node* key = environment()->Pop();
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
+ store_states.AddToNode(store, expr->id(),
+ ast_context()->GetStateCombine());
break;
}
}
@@ -1186,11 +2247,8 @@
void AstGraphBuilder::VisitYield(Yield* expr) {
- VisitForValue(expr->generator_object());
- VisitForValue(expr->expression());
- environment()->Pop();
- environment()->Pop();
- // TODO(turbofan): VisitYield
+ // TODO(turbofan): Implement yield here.
+ SetStackOverflow();
ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
}
@@ -1198,29 +2256,62 @@
void AstGraphBuilder::VisitThrow(Throw* expr) {
VisitForValue(expr->exception());
Node* exception = environment()->Pop();
- const Operator* op = javascript()->CallRuntime(Runtime::kThrow, 1);
- Node* value = NewNode(op, exception);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ Node* value = BuildThrowError(exception, expr->id());
ast_context()->ProduceValue(value);
}
void AstGraphBuilder::VisitProperty(Property* expr) {
- Node* value;
+ Node* value = nullptr;
+ LhsKind property_kind = Property::GetAssignType(expr);
VectorSlotPair pair = CreateVectorSlotPair(expr->PropertyFeedbackSlot());
- if (expr->key()->IsPropertyName()) {
- VisitForValue(expr->obj());
- Node* object = environment()->Pop();
- Unique<Name> name = MakeUnique(expr->key()->AsLiteral()->AsPropertyName());
- value = NewNode(javascript()->LoadNamed(name, pair), object);
- } else {
- VisitForValue(expr->obj());
- VisitForValue(expr->key());
- Node* key = environment()->Pop();
- Node* object = environment()->Pop();
- value = NewNode(javascript()->LoadProperty(pair), object, key);
+ switch (property_kind) {
+ case VARIABLE:
+ UNREACHABLE();
+ break;
+ case NAMED_PROPERTY: {
+ VisitForValue(expr->obj());
+ FrameStateBeforeAndAfter states(this, expr->obj()->id());
+ Node* object = environment()->Pop();
+ Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
+ value = BuildNamedLoad(object, name, pair);
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ VisitForValue(expr->obj());
+ VisitForValue(expr->key());
+ FrameStateBeforeAndAfter states(this, expr->key()->id());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Pop();
+ value = BuildKeyedLoad(object, key, pair);
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
+ FrameStateBeforeAndAfter states(this, expr->obj()->id());
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
+ value = BuildNamedSuperLoad(receiver, home_object, name, pair);
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
+ VisitForValue(expr->key());
+ FrameStateBeforeAndAfter states(this, expr->key()->id());
+ Node* key = environment()->Pop();
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ break;
+ }
}
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -1231,68 +2322,136 @@
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
- CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
- Node* receiver_value = NULL;
- Node* callee_value = NULL;
+ ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
+ Node* receiver_value = nullptr;
+ Node* callee_value = nullptr;
bool possibly_eval = false;
switch (call_type) {
case Call::GLOBAL_CALL: {
VariableProxy* proxy = callee->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+ FrameStateBeforeAndAfter states(this, BeforeId(proxy));
callee_value =
- BuildVariableLoad(proxy->var(), expr->expression()->id(), pair);
+ BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
+ pair, OutputFrameStateCombine::Push());
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
break;
}
case Call::LOOKUP_SLOT_CALL: {
Variable* variable = callee->AsVariableProxy()->var();
- DCHECK(variable->location() == Variable::LOOKUP);
+ DCHECK(variable->location() == VariableLocation::LOOKUP);
Node* name = jsgraph()->Constant(variable->name());
const Operator* op =
javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
Node* pair = NewNode(op, current_context(), name);
callee_value = NewNode(common()->Projection(0), pair);
receiver_value = NewNode(common()->Projection(1), pair);
-
- PrepareFrameState(pair, expr->EvalOrLookupId(),
+ PrepareFrameState(pair, expr->LookupId(),
OutputFrameStateCombine::Push(2));
break;
}
- case Call::PROPERTY_CALL: {
+ case Call::NAMED_PROPERTY_CALL: {
Property* property = callee->AsProperty();
- VisitForValue(property->obj());
- Node* object = environment()->Top();
- VectorSlotPair pair =
+ VectorSlotPair feedback =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- if (property->key()->IsPropertyName()) {
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- callee_value = NewNode(javascript()->LoadNamed(name, pair), object);
- } else {
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- callee_value = NewNode(javascript()->LoadProperty(pair), object, key);
- }
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ VisitForValue(property->obj());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* object = environment()->Top();
+ callee_value = BuildNamedLoad(object, name, feedback);
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
receiver_value = environment()->Pop();
- // Note that a PROPERTY_CALL requires the receiver to be wrapped into an
- // object for sloppy callees. This could also be modeled explicitly here,
- // thereby obsoleting the need for a flag to the call operator.
- flags = CALL_AS_METHOD;
break;
}
- case Call::SUPER_CALL: {
- // todo(dslomov): implement super calls in turbofan.
- UNIMPLEMENTED();
+ case Call::KEYED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ VisitForValue(property->obj());
+ VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Top();
+ callee_value = BuildKeyedLoad(object, key, feedback);
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
break;
}
+ case Call::NAMED_SUPER_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ SuperPropertyReference* super_ref =
+ property->obj()->AsSuperPropertyReference();
+ VisitForValue(super_ref->home_object());
+ VisitForValue(super_ref->this_var());
+ Node* home = environment()->Peek(1);
+ Node* object = environment()->Top();
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ callee_value = BuildNamedSuperLoad(object, home, name, VectorSlotPair());
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. Since the receiver is not the target of
+ // the load, it could very well be null or undefined at this point.
+ receiver_value = environment()->Pop();
+ environment()->Drop(1);
+ break;
+ }
+ case Call::KEYED_SUPER_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ SuperPropertyReference* super_ref =
+ property->obj()->AsSuperPropertyReference();
+ VisitForValue(super_ref->home_object());
+ VisitForValue(super_ref->this_var());
+ environment()->Push(environment()->Top()); // Duplicate this_var.
+ environment()->Push(environment()->Peek(2)); // Duplicate home_obj.
+ VisitForValue(property->key());
+ Node* key = environment()->Pop();
+ Node* home = environment()->Pop();
+ Node* object = environment()->Pop();
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. Since the receiver is not the target of
+ // the load, it could very well be null or undefined at this point.
+ receiver_value = environment()->Pop();
+ environment()->Drop(1);
+ break;
+ }
+ case Call::SUPER_CALL:
+ return VisitCallSuper(expr);
case Call::POSSIBLY_EVAL_CALL:
possibly_eval = true;
+ if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
+ Variable* variable = callee->AsVariableProxy()->var();
+ Node* name = jsgraph()->Constant(variable->name());
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
+ Node* pair = NewNode(op, current_context(), name);
+ callee_value = NewNode(common()->Projection(0), pair);
+ receiver_value = NewNode(common()->Projection(1), pair);
+ PrepareFrameState(pair, expr->LookupId(),
+ OutputFrameStateCombine::Push(2));
+ break;
+ }
// Fall through.
case Call::OTHER_CALL:
VisitForValue(callee);
callee_value = environment()->Pop();
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
break;
}
@@ -1306,8 +2465,8 @@
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
- // Resolve callee and receiver for a potential direct eval call. This block
- // will mutate the callee and receiver values pushed onto the environment.
+ // Resolve callee for a potential direct eval call. This block will mutate the
+ // callee value pushed onto the environment.
if (possibly_eval && args->length() > 0) {
int arg_count = args->length();
@@ -1316,29 +2475,59 @@
Node* source = environment()->Peek(arg_count - 1);
// Create node to ask for help resolving potential eval call. This will
- // provide a fully resolved callee and the corresponding receiver.
+ // provide a fully resolved callee to patch into the environment.
Node* function = GetFunctionClosure();
- Node* receiver = environment()->Lookup(info()->scope()->receiver());
- Node* strict = jsgraph()->Constant(strict_mode());
- Node* position = jsgraph()->Constant(info()->scope()->start_position());
+ Node* language = jsgraph()->Constant(language_mode());
+ Node* position = jsgraph()->Constant(current_scope()->start_position());
const Operator* op =
- javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
- Node* pair =
- NewNode(op, callee, source, function, receiver, strict, position);
- PrepareFrameState(pair, expr->EvalOrLookupId(),
+ javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ Node* new_callee =
+ NewNode(op, callee, source, function, language, position);
+ PrepareFrameState(new_callee, expr->EvalId(),
OutputFrameStateCombine::PokeAt(arg_count + 1));
- Node* new_callee = NewNode(common()->Projection(0), pair);
- Node* new_receiver = NewNode(common()->Projection(1), pair);
- // Patch callee and receiver on the environment.
+ // Patch callee on the environment.
environment()->Poke(arg_count + 1, new_callee);
- environment()->Poke(arg_count + 0, new_receiver);
}
// Create node to perform the function call.
- const Operator* call = javascript()->CallFunction(args->length() + 2, flags);
+ VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
+ const Operator* call = javascript()->CallFunction(
+ args->length() + 2, language_mode(), feedback, receiver_hint);
+ FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ environment()->Push(value->InputAt(0)); // The callee passed to the call.
+ states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+ environment()->Drop(1);
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCallSuper(Call* expr) {
+ SuperCallReference* super = expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super);
+
+ // Prepare the callee to the super call.
+ VisitForValue(super->this_function_var());
+ Node* this_function = environment()->Pop();
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kInlineGetSuperConstructor, 1);
+ Node* super_function = NewNode(op, this_function);
+ environment()->Push(super_function);
+
+ // Evaluate all arguments to the super call.
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForValues(args);
+
+ // The new target is loaded from the {new.target} variable.
+ VisitForValue(super->new_target_var());
+
+ // Create node to perform the super call.
+ const Operator* call =
+ javascript()->CallConstruct(args->length() + 2, VectorSlotPair());
+ FrameStateBeforeAndAfter states(this, super->new_target_var()->id());
+ Node* value = ProcessArguments(call, args->length() + 2);
+ states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(value);
}
@@ -1350,29 +2539,30 @@
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
+ // The baseline compiler doesn't push the new.target, so we need to record
+ // the frame state before the push.
+ FrameStateBeforeAndAfter states(
+ this, args->is_empty() ? expr->expression()->id() : args->last()->id());
+
+ // The new target is the same as the callee.
+ environment()->Push(environment()->Peek(args->length()));
+
// Create node to perform the construct call.
- const Operator* call = javascript()->CallConstruct(args->length() + 1);
- Node* value = ProcessArguments(call, args->length() + 1);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ VectorSlotPair feedback = CreateVectorSlotPair(expr->CallNewFeedbackSlot());
+ const Operator* call =
+ javascript()->CallConstruct(args->length() + 2, feedback);
+ Node* value = ProcessArguments(call, args->length() + 2);
+ states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(value);
}
void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
-
// The callee and the receiver both have to be pushed onto the operand stack
// before arguments are being evaluated.
- CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
- Node* receiver_value = BuildLoadBuiltinsObject();
- Unique<String> unique = MakeUnique(name);
- VectorSlotPair pair = CreateVectorSlotPair(expr->CallRuntimeFeedbackSlot());
- Node* callee_value =
- NewNode(javascript()->LoadNamed(unique, pair), receiver_value);
- // TODO(jarin): Find/create a bailout id to deoptimize to (crankshaft
- // refuses to optimize functions with jsruntime calls).
- PrepareFrameState(callee_value, BailoutId::None(),
- OutputFrameStateCombine::Push());
+ Node* callee_value = BuildLoadNativeContextField(expr->context_index());
+ Node* receiver_value = jsgraph()->UndefinedConstant();
+
environment()->Push(callee_value);
environment()->Push(receiver_value);
@@ -1381,23 +2571,31 @@
VisitForValues(args);
// Create node to perform the JS runtime call.
- const Operator* call = javascript()->CallFunction(args->length() + 2, flags);
+ const Operator* call =
+ javascript()->CallFunction(args->length() + 2, language_mode());
+ FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
- const Runtime::Function* function = expr->function();
-
// Handle calls to runtime functions implemented in JavaScript separately as
// the call follows JavaScript ABI and the callee is statically unknown.
if (expr->is_jsruntime()) {
- DCHECK(function == NULL && expr->name()->length() > 0);
return VisitCallJSRuntime(expr);
}
+ const Runtime::Function* function = expr->function();
+
+ // TODO(mstarzinger): This bailout is a gigantic hack, the owner is ashamed.
+ if (function->function_id == Runtime::kInlineGeneratorNext ||
+ function->function_id == Runtime::kInlineGeneratorThrow) {
+ ast_context()->ProduceValue(jsgraph()->TheHoleConstant());
+ return SetStackOverflow();
+ }
+
// Evaluate all arguments to the runtime call.
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
@@ -1405,8 +2603,9 @@
// Create node to perform the runtime call.
Runtime::FunctionId functionId = function->function_id;
const Operator* call = javascript()->CallRuntime(functionId, args->length());
+ FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length());
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -1428,97 +2627,177 @@
void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
- DCHECK(expr->expression()->IsValidReferenceExpression());
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->expression()->AsProperty();
- LhsKind assign_type = DetermineLhsKind(expr->expression());
+ LhsKind assign_type = Property::GetAssignType(property);
// Reserve space for result of postfix operation.
bool is_postfix = expr->is_postfix() && !ast_context()->IsEffect();
- if (is_postfix) environment()->Push(jsgraph()->UndefinedConstant());
+ if (is_postfix && assign_type != VARIABLE) {
+ environment()->Push(jsgraph()->ZeroConstant());
+ }
// Evaluate LHS expression and get old value.
- Node* old_value = NULL;
+ Node* old_value = nullptr;
int stack_depth = -1;
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+ FrameStateBeforeAndAfter states(this, BeforeId(proxy));
old_value =
- BuildVariableLoad(proxy->var(), expr->expression()->id(), pair);
+ BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
+ pair, OutputFrameStateCombine::Push());
stack_depth = 0;
break;
}
case NAMED_PROPERTY: {
VisitForValue(property->obj());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
Node* object = environment()->Top();
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = NewNode(javascript()->LoadNamed(name, pair), object);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ old_value = BuildNamedLoad(object, name, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 1;
break;
}
case KEYED_PROPERTY: {
VisitForValue(property->obj());
VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
Node* key = environment()->Top();
Node* object = environment()->Peek(1);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = NewNode(javascript()->LoadProperty(pair), object, key);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ old_value = BuildKeyedLoad(object, key, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 2;
break;
}
+ case NAMED_SUPER_PROPERTY: {
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Node* home_object = environment()->Top();
+ Node* receiver = environment()->Peek(1);
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ VectorSlotPair pair =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ stack_depth = 2;
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
+ VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Node* key = environment()->Top();
+ Node* home_object = environment()->Peek(1);
+ Node* receiver = environment()->Peek(2);
+ VectorSlotPair pair =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
+ states.AddToNode(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ stack_depth = 3;
+ break;
+ }
}
// Convert old value into a number.
- old_value = NewNode(javascript()->ToNumber(), old_value);
+ if (!is_strong(language_mode())) {
+ old_value = NewNode(javascript()->ToNumber(), old_value);
+ PrepareFrameState(old_value, expr->ToNumberId(),
+ OutputFrameStateCombine::Push());
+ }
+
+ // Create a proper eager frame state for the stores.
+ environment()->Push(old_value);
+ FrameStateBeforeAndAfter store_states(this, expr->ToNumberId());
+ old_value = environment()->Pop();
// Save result for postfix expressions at correct stack depth.
- if (is_postfix) environment()->Poke(stack_depth, old_value);
+ if (is_postfix) {
+ if (assign_type != VARIABLE) {
+ environment()->Poke(stack_depth, old_value);
+ } else {
+ environment()->Push(old_value);
+ }
+ }
// Create node to perform +1/-1 operation.
- Node* value =
- BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
- // TODO(jarin) Insert proper bailout id here (will need to change
- // full code generator).
- PrepareFrameState(value, BailoutId::None());
+ Node* value;
+ {
+ // TODO(bmeurer): Cleanup this feedback/bailout mess!
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
+ expr->binary_op(), TypeFeedbackId::None());
+ // This should never deoptimize outside strong mode because otherwise we
+ // have converted to number before.
+ states.AddToNode(value, is_strong(language_mode()) ? expr->ToNumberId()
+ : BailoutId::None(),
+ OutputFrameStateCombine::Ignore());
+ }
// Store the value.
+ VectorSlotPair feedback = CreateVectorSlotPair(expr->CountSlot());
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->expression()->AsVariableProxy()->var();
environment()->Push(value);
- BuildVariableAssignment(variable, value, expr->op(),
- expr->AssignmentId());
+ BuildVariableAssignment(variable, value, expr->op(), feedback,
+ expr->AssignmentId(), store_states);
environment()->Pop();
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store =
- NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store = BuildNamedStore(object, name, value, feedback);
environment()->Push(value);
- PrepareFrameState(store, expr->AssignmentId());
+ store_states.AddToNode(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
environment()->Pop();
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
- key, value);
+ Node* store = BuildKeyedStore(object, key, value, feedback);
environment()->Push(value);
- PrepareFrameState(store, expr->AssignmentId());
+ store_states.AddToNode(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
+ environment()->Pop();
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
+ environment()->Push(value);
+ store_states.AddToNode(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
+ environment()->Pop();
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ Node* key = environment()->Pop();
+ Node* home_object = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
+ environment()->Push(value);
+ store_states.AddToNode(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
environment()->Pop();
break;
}
@@ -1541,10 +2820,12 @@
default: {
VisitForValue(expr->left());
VisitForValue(expr->right());
+ FrameStateBeforeAndAfter states(this, expr->right()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
- Node* value = BuildBinaryOp(left, right, expr->op());
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ Node* value = BuildBinaryOp(left, right, expr->op(),
+ expr->BinaryOperationFeedbackId());
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
}
@@ -1567,16 +2848,16 @@
op = javascript()->StrictNotEqual();
break;
case Token::LT:
- op = javascript()->LessThan();
+ op = javascript()->LessThan(language_mode());
break;
case Token::GT:
- op = javascript()->GreaterThan();
+ op = javascript()->GreaterThan(language_mode());
break;
case Token::LTE:
- op = javascript()->LessThanOrEqual();
+ op = javascript()->LessThanOrEqual(language_mode());
break;
case Token::GTE:
- op = javascript()->GreaterThanOrEqual();
+ op = javascript()->GreaterThanOrEqual(language_mode());
break;
case Token::INSTANCEOF:
op = javascript()->InstanceOf();
@@ -1585,31 +2866,55 @@
op = javascript()->HasProperty();
break;
default:
- op = NULL;
+ op = nullptr;
UNREACHABLE();
}
VisitForValue(expr->left());
VisitForValue(expr->right());
+ FrameStateBeforeAndAfter states(this, expr->right()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = NewNode(op, left, right);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
+void AstGraphBuilder::VisitSpread(Spread* expr) {
+ // Handled entirely by the parser itself.
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
+ // Handled entirely by the parser itself.
+ UNREACHABLE();
+}
+
+
void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
Node* value = GetFunctionClosure();
ast_context()->ProduceValue(value);
}
-void AstGraphBuilder::VisitSuperReference(SuperReference* expr) {
+void AstGraphBuilder::VisitSuperPropertyReference(
+ SuperPropertyReference* expr) {
+ Node* value = BuildThrowUnsupportedSuperError(expr->id());
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitSuperCallReference(SuperCallReference* expr) {
+ // Handled by VisitCall
UNREACHABLE();
}
-void AstGraphBuilder::VisitCaseClause(CaseClause* expr) { UNREACHABLE(); }
+void AstGraphBuilder::VisitCaseClause(CaseClause* expr) {
+ // Handled entirely in VisitSwitch.
+ UNREACHABLE();
+}
void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
@@ -1622,24 +2927,36 @@
for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsStrictMode::encode(strict_mode());
+ DeclareGlobalsLanguageMode::encode(language_mode());
Node* flags = jsgraph()->Constant(encoded_flags);
Node* pairs = jsgraph()->Constant(data);
- const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 3);
- NewNode(op, current_context(), pairs, flags);
+ const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 2);
+ Node* call = NewNode(op, pairs, flags);
+ PrepareFrameState(call, BailoutId::Declarations());
globals()->clear();
}
void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
- if (stmt == NULL) return;
+ if (stmt == nullptr) return;
+ Visit(stmt);
+}
+
+
+void AstGraphBuilder::VisitInScope(Statement* stmt, Scope* s, Node* context) {
+ ContextScope scope(this, s, context);
+ DCHECK(s->declarations()->is_empty());
Visit(stmt);
}
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
- LoopBuilder* loop, int drop_extra) {
- BreakableScope scope(this, stmt, loop, drop_extra);
+ LoopBuilder* loop) {
+ ControlScopeForIteration scope(this, stmt, loop);
+ if (FLAG_turbo_loop_stackcheck || !info()->shared_info()->asm_function()) {
+ Node* node = NewNode(javascript()->StackCheck());
+ PrepareFrameState(node, stmt->StackCheckId());
+ }
Visit(stmt->body());
}
@@ -1650,7 +2967,9 @@
// Delete of an unqualified identifier is only allowed in classic mode but
// deleting "this" is allowed in all language modes.
Variable* variable = expr->expression()->AsVariableProxy()->var();
- DCHECK(strict_mode() == SLOPPY || variable->is_this());
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ DCHECK(is_sloppy(language_mode()) || variable->HasThisName(isolate()));
value = BuildVariableDelete(variable, expr->id(),
ast_context()->GetStateCombine());
} else if (expr->expression()->IsProperty()) {
@@ -1659,7 +2978,7 @@
VisitForValue(property->key());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- value = NewNode(javascript()->DeleteProperty(strict_mode()), object, key);
+ value = NewNode(javascript()->DeleteProperty(language_mode()), object, key);
PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
} else {
VisitForEffect(expr->expression());
@@ -1683,8 +3002,10 @@
// perform a non-contextual load in case the operand is a variable proxy.
VariableProxy* proxy = expr->expression()->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- operand = BuildVariableLoad(proxy->var(), expr->expression()->id(), pair,
- NOT_CONTEXTUAL);
+ FrameStateBeforeAndAfter states(this, BeforeId(proxy));
+ operand =
+ BuildVariableLoad(proxy->var(), expr->expression()->id(), states, pair,
+ OutputFrameStateCombine::Push(), INSIDE_TYPEOF);
} else {
VisitForValue(expr->expression());
operand = environment()->Pop();
@@ -1697,8 +3018,9 @@
void AstGraphBuilder::VisitNot(UnaryOperation* expr) {
VisitForValue(expr->expression());
Node* operand = environment()->Pop();
- // TODO(mstarzinger): Possible optimization when we are in effect context.
- Node* value = NewNode(javascript()->UnaryNot(), operand);
+ Node* input = BuildToBoolean(operand, expr->expression()->test_id());
+ Node* value = NewNode(common()->Select(MachineRepresentation::kTagged), input,
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
ast_context()->ProduceValue(value);
}
@@ -1715,13 +3037,15 @@
IfBuilder compare_if(this);
VisitForValue(expr->left());
Node* condition = environment()->Top();
- compare_if.If(BuildToBoolean(condition));
+ compare_if.If(BuildToBoolean(condition, expr->left()->test_id()));
compare_if.Then();
if (is_logical_and) {
environment()->Pop();
Visit(expr->right());
} else if (ast_context()->IsEffect()) {
environment()->Pop();
+ } else if (ast_context()->IsTest()) {
+ environment()->Poke(0, jsgraph()->TrueConstant());
}
compare_if.Else();
if (!is_logical_and) {
@@ -1729,23 +3053,76 @@
Visit(expr->right());
} else if (ast_context()->IsEffect()) {
environment()->Pop();
+ } else if (ast_context()->IsTest()) {
+ environment()->Poke(0, jsgraph()->FalseConstant());
}
compare_if.End();
ast_context()->ReplaceValue();
}
-StrictMode AstGraphBuilder::strict_mode() const {
- return info()->strict_mode();
+LanguageMode AstGraphBuilder::language_mode() const {
+ return info()->language_mode();
}
VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
- FeedbackVectorICSlot slot) const {
+ FeedbackVectorSlot slot) const {
return VectorSlotPair(handle(info()->shared_info()->feedback_vector()), slot);
}
+void AstGraphBuilder::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Visit(node->expression());
+}
+
+
+namespace {
+
+// Limit of context chain length to which inline check is possible.
+const int kMaxCheckDepth = 30;
+
+// Sentinel for {TryLoadDynamicVariable} disabling inline checks.
+const uint32_t kFullCheckRequired = -1;
+
+} // namespace
+
+
+uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
+ DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
+ bool found_eval_scope = false;
+ uint32_t check_depths = 0;
+ for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
+ if (s->num_heap_slots() <= 0) continue;
+ // TODO(mstarzinger): If we have reached an eval scope, we check all
+ // extensions from this point. Replicated from full-codegen, figure out
+ // whether this is still needed. If not, drop {found_eval_scope} below.
+ if (s->is_eval_scope()) found_eval_scope = true;
+ if (!s->calls_sloppy_eval() && !found_eval_scope) continue;
+ int depth = current_scope()->ContextChainLength(s);
+ if (depth > kMaxCheckDepth) return kFullCheckRequired;
+ check_depths |= 1 << depth;
+ }
+ return check_depths;
+}
+
+
+uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
+ DCHECK_EQ(DYNAMIC_LOCAL, variable->mode());
+ uint32_t check_depths = 0;
+ for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
+ if (s->num_heap_slots() <= 0) continue;
+ if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
+ int depth = current_scope()->ContextChainLength(s);
+ if (depth > kMaxCheckDepth) return kFullCheckRequired;
+ check_depths |= 1 << depth;
+ if (s == variable->scope()) break;
+ }
+ return check_depths;
+}
+
+
Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
DCHECK(environment()->stack_height() >= arity);
Node** all = info()->zone()->NewArray<Node*>(arity);
@@ -1757,52 +3134,31 @@
}
-Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object). Otherwise there is nothing left to do here.
- if (info()->strict_mode() != SLOPPY || info()->is_native()) return receiver;
-
- // There is no need to perform patching if the receiver is never used. Note
- // that scope predicates are purely syntactical, a call to eval might still
- // inspect the receiver value.
- if (!info()->scope()->uses_this() && !info()->scope()->inner_uses_this() &&
- !info()->scope()->calls_sloppy_eval()) {
- return receiver;
- }
-
- IfBuilder receiver_check(this);
- Node* undefined = jsgraph()->UndefinedConstant();
- Node* check = NewNode(javascript()->StrictEqual(), receiver, undefined);
- receiver_check.If(check);
- receiver_check.Then();
- environment()->Push(BuildLoadGlobalProxy());
- receiver_check.Else();
- environment()->Push(receiver);
- receiver_check.End();
- return environment()->Pop();
-}
-
-
-Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots <= 0) return context;
+Node* AstGraphBuilder::BuildLocalActivationContext(Node* context) {
+ Scope* scope = info()->scope();
// Allocate a new local context.
- const Operator* op = javascript()->CreateFunctionContext();
- Node* local_context = NewNode(op, closure);
- set_current_context(local_context);
+ Node* local_context = scope->is_script_scope()
+ ? BuildLocalScriptContext(scope)
+ : BuildLocalFunctionContext(scope);
+
+ if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
+ Node* receiver = environment()->RawParameterLookup(0);
+ // Context variable (at bottom of the context chain).
+ Variable* variable = scope->receiver();
+ DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
+ const Operator* op = javascript()->StoreContext(0, variable->index());
+ NewNode(op, local_context, receiver);
+ }
// Copy parameters into context if necessary.
- int num_parameters = info()->scope()->num_parameters();
+ int num_parameters = scope->num_parameters();
for (int i = 0; i < num_parameters; i++) {
- Variable* variable = info()->scope()->parameter(i);
+ Variable* variable = scope->parameter(i);
if (!variable->IsContextSlot()) continue;
- // Temporary parameter node. The parameter indices are shifted by 1
- // (receiver is parameter index -1 but environment index 0).
- Node* parameter = NewNode(common()->Parameter(i + 1), graph()->start());
+ Node* parameter = environment()->RawParameterLookup(i + 1);
// Context variable (at bottom of the context chain).
- DCHECK_EQ(0, info()->scope()->ContextChainLength(variable->scope()));
+ DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
const Operator* op = javascript()->StoreContext(0, variable->index());
NewNode(op, local_context, parameter);
}
@@ -1811,25 +3167,145 @@
}
+Node* AstGraphBuilder::BuildLocalFunctionContext(Scope* scope) {
+ DCHECK(scope->is_function_scope());
+
+ // Allocate a new local context.
+ int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ const Operator* op = javascript()->CreateFunctionContext(slot_count);
+ Node* local_context = NewNode(op, GetFunctionClosure());
+
+ return local_context;
+}
+
+
+Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
+ DCHECK(scope->is_script_scope());
+
+ // Allocate a new local context.
+ Handle<ScopeInfo> scope_info = scope->GetScopeInfo(isolate());
+ const Operator* op = javascript()->CreateScriptContext(scope_info);
+ Node* local_context = NewNode(op, GetFunctionClosure());
+ PrepareFrameState(local_context, BailoutId::ScriptContext(),
+ OutputFrameStateCombine::Push());
+
+ return local_context;
+}
+
+
+Node* AstGraphBuilder::BuildLocalBlockContext(Scope* scope) {
+ DCHECK(scope->is_block_scope());
+
+ // Allocate a new local context.
+ Handle<ScopeInfo> scope_info = scope->GetScopeInfo(isolate());
+ const Operator* op = javascript()->CreateBlockContext(scope_info);
+ Node* local_context = NewNode(op, GetFunctionClosureForContext());
+
+ return local_context;
+}
+
+
Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
- if (arguments == NULL) return NULL;
+ if (arguments == nullptr) return nullptr;
// Allocate and initialize a new arguments object.
- Node* callee = GetFunctionClosure();
- const Operator* op = javascript()->CallRuntime(Runtime::kNewArguments, 1);
- Node* object = NewNode(op, callee);
+ CreateArgumentsParameters::Type type =
+ is_strict(language_mode()) || !info()->has_simple_parameters()
+ ? CreateArgumentsParameters::kUnmappedArguments
+ : CreateArgumentsParameters::kMappedArguments;
+ const Operator* op = javascript()->CreateArguments(type, 0);
+ Node* object = NewNode(op, GetFunctionClosure());
+ PrepareFrameState(object, BailoutId::None());
- // Assign the object to the arguments variable.
+ // Assign the object to the {arguments} variable. This should never lazy
+ // deopt, so it is fine to send invalid bailout id.
DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
- // This should never lazy deopt, so it is fine to send invalid bailout id.
- BuildVariableAssignment(arguments, object, Token::ASSIGN, BailoutId::None());
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(arguments, object, Token::ASSIGN, VectorSlotPair(),
+ BailoutId::None(), states);
+ return object;
+}
+
+Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
+ if (rest == nullptr) return nullptr;
+
+ // Allocate and initialize a new arguments object.
+ CreateArgumentsParameters::Type type = CreateArgumentsParameters::kRestArray;
+ const Operator* op = javascript()->CreateArguments(type, index);
+ Node* object = NewNode(op, GetFunctionClosure());
+ PrepareFrameState(object, BailoutId::None());
+
+ // Assign the object to the {rest} variable. This should never lazy
+ // deopt, so it is fine to send invalid bailout id.
+ DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
+ BailoutId::None(), states);
+ return object;
+}
+
+
+Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
+ if (this_function_var == nullptr) return nullptr;
+
+ // Retrieve the closure we were called with.
+ Node* this_function = GetFunctionClosure();
+
+ // Assign the object to the {.this_function} variable. This should never lazy
+ // deopt, so it is fine to send invalid bailout id.
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(this_function_var, this_function, Token::INIT,
+ VectorSlotPair(), BailoutId::None(), states);
+ return this_function;
+}
+
+
+Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
+ if (new_target_var == nullptr) return nullptr;
+
+ // Retrieve the new target we were called with.
+ Node* object = GetNewTarget();
+
+ // Assign the object to the {new.target} variable. This should never lazy
+ // deopt, so it is fine to send invalid bailout id.
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(new_target_var, object, Token::INIT, VectorSlotPair(),
+ BailoutId::None(), states);
return object;
}
Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
Node* not_hole) {
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+ return NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, for_hole, not_hole);
+}
+
+
+Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
+ Node* not_hole,
+ BailoutId bailout_id) {
+ IfBuilder hole_check(this);
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+ hole_check.If(check);
+ hole_check.Then();
+ Node* error = BuildThrowReferenceError(variable, bailout_id);
+ environment()->Push(error);
+ hole_check.Else();
+ environment()->Push(not_hole);
+ hole_check.End();
+ return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::BuildHoleCheckElseThrow(Node* value, Variable* variable,
+ Node* for_hole,
+ BailoutId bailout_id) {
IfBuilder hole_check(this);
Node* the_hole = jsgraph()->TheHoleConstant();
Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
@@ -1837,47 +3313,50 @@
hole_check.Then();
environment()->Push(for_hole);
hole_check.Else();
- environment()->Push(not_hole);
+ Node* error = BuildThrowReferenceError(variable, bailout_id);
+ environment()->Push(error);
hole_check.End();
return environment()->Pop();
}
-Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable,
- Node* not_hole,
- BailoutId bailout_id) {
- IfBuilder hole_check(this);
- Node* the_hole = jsgraph()->TheHoleConstant();
- Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
- hole_check.If(check);
- hole_check.Then();
- environment()->Push(BuildThrowReferenceError(variable, bailout_id));
- hole_check.Else();
- environment()->Push(not_hole);
- hole_check.End();
+Node* AstGraphBuilder::BuildThrowIfStaticPrototype(Node* name,
+ BailoutId bailout_id) {
+ IfBuilder prototype_check(this);
+ Node* prototype_string =
+ jsgraph()->Constant(isolate()->factory()->prototype_string());
+ Node* check = NewNode(javascript()->StrictEqual(), name, prototype_string);
+ prototype_check.If(check);
+ prototype_check.Then();
+ Node* error = BuildThrowStaticPrototypeError(bailout_id);
+ environment()->Push(error);
+ prototype_check.Else();
+ environment()->Push(name);
+ prototype_check.End();
return environment()->Pop();
}
Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
- ContextualMode contextual_mode) {
+ OutputFrameStateCombine combine,
+ TypeofMode typeof_mode) {
Node* the_hole = jsgraph()->TheHoleConstant();
VariableMode mode = variable->mode();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
- Node* global = BuildLoadGlobalObject();
- Unique<Name> name = MakeUnique(variable->name());
- const Operator* op =
- javascript()->LoadNamed(name, feedback, contextual_mode);
- Node* node = NewNode(op, global);
- PrepareFrameState(node, bailout_id, OutputFrameStateCombine::Push());
- return node;
+ Handle<Name> name = variable->name();
+ if (Node* node = TryLoadGlobalConstant(name)) return node;
+ Node* value = BuildGlobalLoad(name, feedback, typeof_mode);
+ states.AddToNode(value, bailout_id, combine);
+ return value;
}
- case Variable::PARAMETER:
- case Variable::LOCAL: {
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
// Local var, const, or let variable.
Node* value = environment()->Lookup(variable);
if (mode == CONST_LEGACY) {
@@ -1893,12 +3372,12 @@
if (value->op() == the_hole->op()) {
value = BuildThrowReferenceError(variable, bailout_id);
} else if (value->opcode() == IrOpcode::kPhi) {
- value = BuildHoleCheckThrow(value, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
}
}
return value;
}
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
bool immutable = variable->maybe_assigned() == kNotAssigned;
@@ -1914,318 +3393,974 @@
value = BuildHoleCheckSilent(value, undefined, value);
} else if (mode == LET || mode == CONST) {
// Perform check for uninitialized let/const variables.
- value = BuildHoleCheckThrow(value, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
}
return value;
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
- Node* name = jsgraph()->Constant(variable->name());
- Runtime::FunctionId function_id =
- (contextual_mode == CONTEXTUAL)
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
- const Operator* op = javascript()->CallRuntime(function_id, 2);
- Node* pair = NewNode(op, current_context(), name);
- PrepareFrameState(pair, bailout_id, OutputFrameStateCombine::Push(1));
- return NewNode(common()->Projection(0), pair);
+ Handle<String> name = variable->name();
+ if (Node* node =
+ TryLoadDynamicVariable(variable, name, bailout_id, states,
+ feedback, combine, typeof_mode)) {
+ return node;
+ }
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* value = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(value, bailout_id, combine);
+ return value;
}
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
-Node* AstGraphBuilder::BuildVariableDelete(
- Variable* variable, BailoutId bailout_id,
- OutputFrameStateCombine state_combine) {
+Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
+ BailoutId bailout_id,
+ OutputFrameStateCombine combine) {
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
Node* name = jsgraph()->Constant(variable->name());
- const Operator* op = javascript()->DeleteProperty(strict_mode());
+ const Operator* op = javascript()->DeleteProperty(language_mode());
Node* result = NewNode(op, global, name);
- PrepareFrameState(result, bailout_id, state_combine);
+ PrepareFrameState(result, bailout_id, combine);
return result;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
// Local var, const, or let variable or context variable.
- return jsgraph()->BooleanConstant(variable->is_this());
- case Variable::LOOKUP: {
+ return jsgraph()->BooleanConstant(variable->HasThisName(isolate()));
+ }
+ case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
Node* name = jsgraph()->Constant(variable->name());
const Operator* op =
javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
Node* result = NewNode(op, current_context(), name);
- PrepareFrameState(result, bailout_id, state_combine);
+ PrepareFrameState(result, bailout_id, combine);
return result;
}
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
Node* AstGraphBuilder::BuildVariableAssignment(
- Variable* variable, Node* value, Token::Value op, BailoutId bailout_id,
- OutputFrameStateCombine combine) {
+ Variable* variable, Node* value, Token::Value op,
+ const VectorSlotPair& feedback, BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states, OutputFrameStateCombine combine) {
Node* the_hole = jsgraph()->TheHoleConstant();
VariableMode mode = variable->mode();
switch (variable->location()) {
- case Variable::UNALLOCATED: {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
- Node* global = BuildLoadGlobalObject();
- Unique<Name> name = MakeUnique(variable->name());
- const Operator* op = javascript()->StoreNamed(strict_mode(), name);
- Node* store = NewNode(op, global, value);
- PrepareFrameState(store, bailout_id, combine);
+ Handle<Name> name = variable->name();
+ Node* store = BuildGlobalStore(name, value, feedback);
+ states.AddToNode(store, bailout_id, combine);
return store;
}
- case Variable::PARAMETER:
- case Variable::LOCAL:
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
// Local var, const, or let variable.
- if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+ if (mode == CONST_LEGACY && op == Token::INIT) {
// Perform an initialization check for legacy const variables.
Node* current = environment()->Lookup(variable);
if (current->op() != the_hole->op()) {
value = BuildHoleCheckSilent(current, value, current);
}
- } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
- // Non-initializing assignments to legacy const is
+ } else if (mode == CONST_LEGACY && op != Token::INIT) {
+ // Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
return BuildThrowConstAssignError(bailout_id);
}
return value;
- } else if (mode == LET && op != Token::INIT_LET) {
+ } else if (mode == LET && op == Token::INIT) {
+ // No initialization check needed because scoping guarantees it. Note
+ // that we still perform a lookup to keep the variable live, because
+ // baseline code might contain debug code that inspects the variable.
+ Node* current = environment()->Lookup(variable);
+ CHECK_NOT_NULL(current);
+ } else if (mode == LET && op != Token::INIT) {
// Perform an initialization check for let declared variables.
- // Also note that the dynamic hole-check is only done to ensure that
- // this does not break in the presence of do-expressions within the
- // temporal dead zone of a let declared variable.
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
- value = BuildThrowReferenceError(variable, bailout_id);
- } else if (value->opcode() == IrOpcode::kPhi) {
- value = BuildHoleCheckThrow(current, variable, value, bailout_id);
+ return BuildThrowReferenceError(variable, bailout_id);
+ } else if (current->opcode() == IrOpcode::kPhi) {
+ BuildHoleCheckThenThrow(current, variable, value, bailout_id);
}
- } else if (mode == CONST && op != Token::INIT_CONST) {
- // Non-initializing assignments to const is exception in all modes.
+ } else if (mode == CONST && op == Token::INIT) {
+ // Perform an initialization check for const {this} variables.
+ // Note that the {this} variable is the only const variable being able
+ // to trigger bind operations outside the TDZ, via {super} calls.
+ Node* current = environment()->Lookup(variable);
+ if (current->op() != the_hole->op() && variable->is_this()) {
+ value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
+ }
+ } else if (mode == CONST && op != Token::INIT) {
+ // Assignment to const is exception in all modes.
+ Node* current = environment()->Lookup(variable);
+ if (current->op() == the_hole->op()) {
+ return BuildThrowReferenceError(variable, bailout_id);
+ } else if (current->opcode() == IrOpcode::kPhi) {
+ BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ }
return BuildThrowConstAssignError(bailout_id);
}
environment()->Bind(variable, value);
return value;
- case Variable::CONTEXT: {
+ case VariableLocation::CONTEXT: {
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
- if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+ if (mode == CONST_LEGACY && op == Token::INIT) {
// Perform an initialization check for legacy const variables.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op, current_context());
value = BuildHoleCheckSilent(current, value, current);
- } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
- // Non-initializing assignments to legacy const is
+ } else if (mode == CONST_LEGACY && op != Token::INIT) {
+ // Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
- if (strict_mode() == STRICT) {
+ if (is_strict(language_mode())) {
return BuildThrowConstAssignError(bailout_id);
}
return value;
- } else if (mode == LET && op != Token::INIT_LET) {
+ } else if (mode == LET && op != Token::INIT) {
// Perform an initialization check for let declared variables.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op, current_context());
- value = BuildHoleCheckThrow(current, variable, value, bailout_id);
- } else if (mode == CONST && op != Token::INIT_CONST) {
- // Non-initializing assignments to const is exception in all modes.
+ value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ } else if (mode == CONST && op == Token::INIT) {
+ // Perform an initialization check for const {this} variables.
+ // Note that the {this} variable is the only const variable being able
+ // to trigger bind operations outside the TDZ, via {super} calls.
+ if (variable->is_this()) {
+ const Operator* op =
+ javascript()->LoadContext(depth, variable->index(), false);
+ Node* current = NewNode(op, current_context());
+ value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
+ }
+ } else if (mode == CONST && op != Token::INIT) {
+ // Assignment to const is exception in all modes.
+ const Operator* op =
+ javascript()->LoadContext(depth, variable->index(), false);
+ Node* current = NewNode(op, current_context());
+ BuildHoleCheckThenThrow(current, variable, value, bailout_id);
return BuildThrowConstAssignError(bailout_id);
}
const Operator* op = javascript()->StoreContext(depth, variable->index());
return NewNode(op, current_context(), value);
}
- case Variable::LOOKUP: {
+ case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
Node* name = jsgraph()->Constant(variable->name());
- Node* strict = jsgraph()->Constant(strict_mode());
+ Node* language = jsgraph()->Constant(language_mode());
// TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
// initializations of const declarations.
const Operator* op =
javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
- Node* store = NewNode(op, value, current_context(), name, strict);
+ Node* store = NewNode(op, value, current_context(), name, language);
PrepareFrameState(store, bailout_id, combine);
return store;
}
}
UNREACHABLE();
- return NULL;
+ return nullptr;
+}
+
+
+Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
+ const VectorSlotPair& feedback) {
+ const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
+ Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+ return node;
+}
+
+
+Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
+ const VectorSlotPair& feedback) {
+ const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
+ Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+ return node;
+}
+
+
+Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
+ const VectorSlotPair& feedback) {
+ const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
+ Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+ return node;
+}
+
+
+Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
+ Node* value,
+ const VectorSlotPair& feedback) {
+ const Operator* op =
+ javascript()->StoreNamed(language_mode(), name, feedback);
+ Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+ return node;
+}
+
+
+Node* AstGraphBuilder::BuildNamedSuperLoad(Node* receiver, Node* home_object,
+ Handle<Name> name,
+ const VectorSlotPair& feedback) {
+ Node* name_node = jsgraph()->Constant(name);
+ Node* language = jsgraph()->Constant(language_mode());
+ const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper, 4);
+ Node* node = NewNode(op, receiver, home_object, name_node, language);
+ return node;
+}
+
+
+Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
+ Node* key,
+ const VectorSlotPair& feedback) {
+ Node* language = jsgraph()->Constant(language_mode());
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ Node* node = NewNode(op, receiver, home_object, key, language);
+ return node;
+}
+
+
+Node* AstGraphBuilder::BuildKeyedSuperStore(Node* receiver, Node* home_object,
+ Node* key, Node* value) {
+ Runtime::FunctionId function_id = is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy;
+ const Operator* op = javascript()->CallRuntime(function_id, 4);
+ Node* node = NewNode(op, receiver, home_object, key, value);
+ return node;
+}
+
+
+Node* AstGraphBuilder::BuildNamedSuperStore(Node* receiver, Node* home_object,
+ Handle<Name> name, Node* value) {
+ Node* name_node = jsgraph()->Constant(name);
+ Runtime::FunctionId function_id = is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy;
+ const Operator* op = javascript()->CallRuntime(function_id, 4);
+ Node* node = NewNode(op, receiver, home_object, name_node, value);
+ return node;
+}
+
+
+Node* AstGraphBuilder::BuildGlobalLoad(Handle<Name> name,
+ const VectorSlotPair& feedback,
+ TypeofMode typeof_mode) {
+ const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
+ Node* node = NewNode(op, BuildLoadFeedbackVector());
+ return node;
+}
+
+
+Node* AstGraphBuilder::BuildGlobalStore(Handle<Name> name, Node* value,
+ const VectorSlotPair& feedback) {
+ const Operator* op =
+ javascript()->StoreGlobal(language_mode(), name, feedback);
+ Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+ return node;
}
Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
- Node* field_load = NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
- jsgraph()->Int32Constant(offset - kHeapObjectTag));
- return field_load;
+ return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
+ jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
}
-Node* AstGraphBuilder::BuildLoadBuiltinsObject() {
- Node* global = BuildLoadGlobalObject();
- Node* builtins =
- BuildLoadObjectField(global, JSGlobalObject::kBuiltinsOffset);
- return builtins;
+Node* AstGraphBuilder::BuildLoadImmutableObjectField(Node* object, int offset) {
+ return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
+ object,
+ jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
+ graph()->start(), graph()->start());
}
Node* AstGraphBuilder::BuildLoadGlobalObject() {
- Node* context = GetFunctionContext();
- const Operator* load_op =
- javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true);
- return NewNode(load_op, context);
+ return BuildLoadNativeContextField(Context::EXTENSION_INDEX);
}
-Node* AstGraphBuilder::BuildLoadGlobalProxy() {
- Node* global = BuildLoadGlobalObject();
- Node* proxy =
- BuildLoadObjectField(global, JSGlobalObject::kGlobalProxyOffset);
- return proxy;
+Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
+ const Operator* op =
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
+ Node* native_context = NewNode(op, current_context());
+ return NewNode(javascript()->LoadContext(0, index, true), native_context);
}
-Node* AstGraphBuilder::BuildToBoolean(Node* input) {
- // TODO(titzer): this should be in a JSOperatorReducer.
- switch (input->opcode()) {
- case IrOpcode::kInt32Constant:
- return jsgraph_->BooleanConstant(!Int32Matcher(input).Is(0));
- case IrOpcode::kFloat64Constant:
- return jsgraph_->BooleanConstant(!Float64Matcher(input).Is(0));
- case IrOpcode::kNumberConstant:
- return jsgraph_->BooleanConstant(!NumberMatcher(input).Is(0));
- case IrOpcode::kHeapConstant: {
- Handle<Object> object = HeapObjectMatcher<Object>(input).Value().handle();
- if (object->IsTrue()) return jsgraph_->TrueConstant();
- if (object->IsFalse()) return jsgraph_->FalseConstant();
- // TODO(turbofan): other constants.
- break;
- }
- default:
- break;
+Node* AstGraphBuilder::BuildLoadFeedbackVector() {
+ if (!feedback_vector_.is_set()) {
+ Node* closure = GetFunctionClosure();
+ Node* shared = BuildLoadImmutableObjectField(
+ closure, JSFunction::kSharedFunctionInfoOffset);
+ Node* vector = BuildLoadImmutableObjectField(
+ shared, SharedFunctionInfo::kFeedbackVectorOffset);
+ feedback_vector_.set(vector);
}
- if (NodeProperties::IsTyped(input)) {
- Type* upper = NodeProperties::GetBounds(input).upper;
- if (upper->Is(Type::Boolean())) return input;
- }
+ return feedback_vector_.get();
+}
- return NewNode(javascript()->ToBoolean(), input);
+
+Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
+ if (Node* node = TryFastToBoolean(input)) return node;
+ ToBooleanHints hints;
+ if (!type_hint_analysis_ ||
+ !type_hint_analysis_->GetToBooleanHints(feedback_id, &hints)) {
+ hints = ToBooleanHint::kAny;
+ }
+ return NewNode(javascript()->ToBoolean(hints), input);
+}
+
+
+Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
+ if (Node* node = TryFastToName(input)) return node;
+ Node* name = NewNode(javascript()->ToName(), input);
+ PrepareFrameState(name, bailout_id);
+ return name;
+}
+
+
+Node* AstGraphBuilder::BuildToObject(Node* input, BailoutId bailout_id) {
+ Node* object = NewNode(javascript()->ToObject(), input);
+ PrepareFrameState(object, bailout_id, OutputFrameStateCombine::Push());
+ return object;
+}
+
+
+Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
+ ObjectLiteralProperty* property,
+ int slot_number) {
+ Expression* expr = property->value();
+ if (!FunctionLiteral::NeedsHomeObject(expr)) return value;
+ Handle<Name> name = isolate()->factory()->home_object_symbol();
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->GetSlot(slot_number));
+ Node* store = BuildNamedStore(value, name, home_object, feedback);
+ states.AddToNode(store, BailoutId::None(), OutputFrameStateCombine::Ignore());
+ return store;
+}
+
+
+Node* AstGraphBuilder::BuildThrowError(Node* exception, BailoutId bailout_id) {
+ const Operator* op = javascript()->CallRuntime(Runtime::kThrow, 1);
+ Node* call = NewNode(op, exception);
+ PrepareFrameState(call, bailout_id);
+ Node* control = NewNode(common()->Throw(), call);
+ UpdateControlDependencyToLeaveFunction(control);
+ return call;
}
Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
BailoutId bailout_id) {
- // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
Node* variable_name = jsgraph()->Constant(variable->name());
const Operator* op =
javascript()->CallRuntime(Runtime::kThrowReferenceError, 1);
Node* call = NewNode(op, variable_name);
PrepareFrameState(call, bailout_id);
+ Node* control = NewNode(common()->Throw(), call);
+ UpdateControlDependencyToLeaveFunction(control);
return call;
}
Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
- // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
const Operator* op =
javascript()->CallRuntime(Runtime::kThrowConstAssignError, 0);
Node* call = NewNode(op);
PrepareFrameState(call, bailout_id);
+ Node* control = NewNode(common()->Throw(), call);
+ UpdateControlDependencyToLeaveFunction(control);
return call;
}
-Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
+Node* AstGraphBuilder::BuildThrowStaticPrototypeError(BailoutId bailout_id) {
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError, 0);
+ Node* call = NewNode(op);
+ PrepareFrameState(call, bailout_id);
+ Node* control = NewNode(common()->Throw(), call);
+ UpdateControlDependencyToLeaveFunction(control);
+ return call;
+}
+
+
+Node* AstGraphBuilder::BuildThrowUnsupportedSuperError(BailoutId bailout_id) {
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+ Node* call = NewNode(op);
+ PrepareFrameState(call, bailout_id);
+ Node* control = NewNode(common()->Throw(), call);
+ UpdateControlDependencyToLeaveFunction(control);
+ return call;
+}
+
+
+Node* AstGraphBuilder::BuildReturn(Node* return_value) {
+ Node* control = NewNode(common()->Return(), return_value);
+ UpdateControlDependencyToLeaveFunction(control);
+ return control;
+}
+
+
+Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
+ NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), exception_value);
+ Node* control = NewNode(common()->Throw(), exception_value);
+ UpdateControlDependencyToLeaveFunction(control);
+ return control;
+}
+
+
+Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
+ TypeFeedbackId feedback_id) {
const Operator* js_op;
+ BinaryOperationHints hints;
+ if (!type_hint_analysis_ ||
+ !type_hint_analysis_->GetBinaryOperationHints(feedback_id, &hints)) {
+ hints = BinaryOperationHints::Any();
+ }
switch (op) {
case Token::BIT_OR:
- js_op = javascript()->BitwiseOr();
+ js_op = javascript()->BitwiseOr(language_mode(), hints);
break;
case Token::BIT_AND:
- js_op = javascript()->BitwiseAnd();
+ js_op = javascript()->BitwiseAnd(language_mode(), hints);
break;
case Token::BIT_XOR:
- js_op = javascript()->BitwiseXor();
+ js_op = javascript()->BitwiseXor(language_mode(), hints);
break;
case Token::SHL:
- js_op = javascript()->ShiftLeft();
+ js_op = javascript()->ShiftLeft(language_mode(), hints);
break;
case Token::SAR:
- js_op = javascript()->ShiftRight();
+ js_op = javascript()->ShiftRight(language_mode(), hints);
break;
case Token::SHR:
- js_op = javascript()->ShiftRightLogical();
+ js_op = javascript()->ShiftRightLogical(language_mode(), hints);
break;
case Token::ADD:
- js_op = javascript()->Add();
+ js_op = javascript()->Add(language_mode(), hints);
break;
case Token::SUB:
- js_op = javascript()->Subtract();
+ js_op = javascript()->Subtract(language_mode(), hints);
break;
case Token::MUL:
- js_op = javascript()->Multiply();
+ js_op = javascript()->Multiply(language_mode(), hints);
break;
case Token::DIV:
- js_op = javascript()->Divide();
+ js_op = javascript()->Divide(language_mode(), hints);
break;
case Token::MOD:
- js_op = javascript()->Modulus();
+ js_op = javascript()->Modulus(language_mode(), hints);
break;
default:
UNREACHABLE();
- js_op = NULL;
+ js_op = nullptr;
}
return NewNode(js_op, left, right);
}
-Node* AstGraphBuilder::BuildStackCheck() {
- IfBuilder stack_check(this);
- Node* limit =
- NewNode(jsgraph()->machine()->Load(kMachPtr),
- jsgraph()->ExternalConstant(
- ExternalReference::address_of_stack_limit(isolate())),
- jsgraph()->ZeroConstant());
- Node* stack = NewNode(jsgraph()->machine()->LoadStackPointer());
- Node* tag = NewNode(jsgraph()->machine()->UintLessThan(), limit, stack);
- stack_check.If(tag, BranchHint::kTrue);
- stack_check.Then();
- stack_check.Else();
- Node* guard = NewNode(javascript()->CallRuntime(Runtime::kStackGuard, 0));
- stack_check.End();
- return guard;
+Node* AstGraphBuilder::TryLoadGlobalConstant(Handle<Name> name) {
+ // Optimize global constants like "undefined", "Infinity", and "NaN".
+ Handle<Object> constant_value = isolate()->factory()->GlobalConstantFor(name);
+ if (!constant_value.is_null()) return jsgraph()->Constant(constant_value);
+ return nullptr;
+}
+
+
+Node* AstGraphBuilder::TryLoadDynamicVariable(
+ Variable* variable, Handle<String> name, BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states, const VectorSlotPair& feedback,
+ OutputFrameStateCombine combine, TypeofMode typeof_mode) {
+ VariableMode mode = variable->mode();
+
+ if (mode == DYNAMIC_GLOBAL) {
+ uint32_t bitset = ComputeBitsetForDynamicGlobal(variable);
+ if (bitset == kFullCheckRequired) return nullptr;
+
+ // We are using two blocks to model fast and slow cases.
+ BlockBuilder fast_block(this);
+ BlockBuilder slow_block(this);
+ environment()->Push(jsgraph()->TheHoleConstant());
+ slow_block.BeginBlock();
+ environment()->Pop();
+ fast_block.BeginBlock();
+
+ // Perform checks whether the fast mode applies, by looking for any
+ // extension object which might shadow the optimistic declaration.
+ for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
+ if ((bitset & 1) == 0) continue;
+ Node* load = NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+ current_context());
+ Node* check = NewNode(javascript()->StrictEqual(), load,
+ jsgraph()->TheHoleConstant());
+ fast_block.BreakUnless(check, BranchHint::kTrue);
+ }
+
+ // Fast case, because variable is not shadowed. Perform global slot load.
+ Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
+ states.AddToNode(fast, bailout_id, combine);
+ environment()->Push(fast);
+ slow_block.Break();
+ environment()->Pop();
+ fast_block.EndBlock();
+
+ // Slow case, because variable potentially shadowed. Perform dynamic lookup.
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(slow, bailout_id, combine);
+ environment()->Push(slow);
+ slow_block.EndBlock();
+
+ return environment()->Pop();
+ }
+
+ if (mode == DYNAMIC_LOCAL) {
+ uint32_t bitset = ComputeBitsetForDynamicContext(variable);
+ if (bitset == kFullCheckRequired) return nullptr;
+
+ // We are using two blocks to model fast and slow cases.
+ BlockBuilder fast_block(this);
+ BlockBuilder slow_block(this);
+ environment()->Push(jsgraph()->TheHoleConstant());
+ slow_block.BeginBlock();
+ environment()->Pop();
+ fast_block.BeginBlock();
+
+ // Perform checks whether the fast mode applies, by looking for any
+ // extension object which might shadow the optimistic declaration.
+ for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
+ if ((bitset & 1) == 0) continue;
+ Node* load = NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+ current_context());
+ Node* check = NewNode(javascript()->StrictEqual(), load,
+ jsgraph()->TheHoleConstant());
+ fast_block.BreakUnless(check, BranchHint::kTrue);
+ }
+
+ // Fast case, because variable is not shadowed. Perform context slot load.
+ Variable* local = variable->local_if_not_shadowed();
+ DCHECK(local->location() == VariableLocation::CONTEXT); // Must be context.
+ Node* fast = BuildVariableLoad(local, bailout_id, states, feedback, combine,
+ typeof_mode);
+ environment()->Push(fast);
+ slow_block.Break();
+ environment()->Pop();
+ fast_block.EndBlock();
+
+ // Slow case, because variable potentially shadowed. Perform dynamic lookup.
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(slow, bailout_id, combine);
+ environment()->Push(slow);
+ slow_block.EndBlock();
+
+ return environment()->Pop();
+ }
+
+ return nullptr;
+}
+
+
+Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
+ switch (input->opcode()) {
+ case IrOpcode::kNumberConstant: {
+ NumberMatcher m(input);
+ return jsgraph_->BooleanConstant(!m.Is(0) && !m.IsNaN());
+ }
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> object = HeapObjectMatcher(input).Value();
+ return jsgraph_->BooleanConstant(object->BooleanValue());
+ }
+ case IrOpcode::kJSEqual:
+ case IrOpcode::kJSNotEqual:
+ case IrOpcode::kJSStrictEqual:
+ case IrOpcode::kJSStrictNotEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSLessThanOrEqual:
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSToBoolean:
+ case IrOpcode::kJSDeleteProperty:
+ case IrOpcode::kJSHasProperty:
+ case IrOpcode::kJSInstanceOf:
+ return input;
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+
+Node* AstGraphBuilder::TryFastToName(Node* input) {
+ switch (input->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> object = HeapObjectMatcher(input).Value();
+ if (object->IsName()) return input;
+ break;
+ }
+ case IrOpcode::kJSToString:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSTypeOf:
+ return input;
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+
+bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
+ if (info()->osr_ast_id() == stmt->OsrEntryId()) {
+ info()->set_osr_expr_stack_height(std::max(
+ environment()->stack_height(), info()->osr_expr_stack_height()));
+ return true;
+ }
+ return false;
}
void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
OutputFrameStateCombine combine) {
- if (OperatorProperties::HasFrameStateInput(node->op())) {
- DCHECK(NodeProperties::GetFrameStateInput(node)->opcode() ==
- IrOpcode::kDead);
+ if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
NodeProperties::ReplaceFrameStateInput(
- node, environment()->Checkpoint(ast_id, combine));
+ node, 0, environment()->Checkpoint(ast_id, combine));
}
}
BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
IterationStatement* stmt) {
- if (loop_assignment_analysis_ == NULL) return NULL;
+ if (loop_assignment_analysis_ == nullptr) return nullptr;
return loop_assignment_analysis_->GetVariablesAssignedInLoop(stmt);
}
+
+Node** AstGraphBuilder::EnsureInputBufferSize(int size) {
+ if (size > input_buffer_size_) {
+ size = size + kInputBufferSizeIncrement + input_buffer_size_;
+ input_buffer_ = local_zone()->NewArray<Node*>(size);
+ input_buffer_size_ = size;
+ }
+ return input_buffer_;
+}
+
+
+Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
+ Node** value_inputs, bool incomplete) {
+ DCHECK_EQ(op->ValueInputCount(), value_input_count);
+
+ bool has_context = OperatorProperties::HasContextInput(op);
+ int frame_state_count = OperatorProperties::GetFrameStateInputCount(op);
+ bool has_control = op->ControlInputCount() == 1;
+ bool has_effect = op->EffectInputCount() == 1;
+
+ DCHECK(op->ControlInputCount() < 2);
+ DCHECK(op->EffectInputCount() < 2);
+
+ Node* result = nullptr;
+ if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
+ result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
+ } else {
+ bool inside_try_scope = try_nesting_level_ > 0;
+ int input_count_with_deps = value_input_count;
+ if (has_context) ++input_count_with_deps;
+ input_count_with_deps += frame_state_count;
+ if (has_control) ++input_count_with_deps;
+ if (has_effect) ++input_count_with_deps;
+ Node** buffer = EnsureInputBufferSize(input_count_with_deps);
+ memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+ Node** current_input = buffer + value_input_count;
+ if (has_context) {
+ *current_input++ = current_context();
+ }
+ for (int i = 0; i < frame_state_count; i++) {
+ // The frame state will be inserted later. Here we misuse
+ // the {Dead} node as a sentinel to be later overwritten
+ // with the real frame state.
+ *current_input++ = jsgraph()->Dead();
+ }
+ if (has_effect) {
+ *current_input++ = environment_->GetEffectDependency();
+ }
+ if (has_control) {
+ *current_input++ = environment_->GetControlDependency();
+ }
+ result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
+ if (!environment()->IsMarkedAsUnreachable()) {
+ // Update the current control dependency for control-producing nodes.
+ if (NodeProperties::IsControl(result)) {
+ environment_->UpdateControlDependency(result);
+ }
+ // Update the current effect dependency for effect-producing nodes.
+ if (result->op()->EffectOutputCount() > 0) {
+ environment_->UpdateEffectDependency(result);
+ }
+ // Add implicit exception continuation for throwing nodes.
+ if (!result->op()->HasProperty(Operator::kNoThrow) && inside_try_scope) {
+ // Conservative prediction whether caught locally.
+ IfExceptionHint hint = try_catch_nesting_level_ > 0
+ ? IfExceptionHint::kLocallyCaught
+ : IfExceptionHint::kLocallyUncaught;
+ // Copy the environment for the success continuation.
+ Environment* success_env = environment()->CopyForConditional();
+ const Operator* op = common()->IfException(hint);
+ Node* effect = environment()->GetEffectDependency();
+ Node* on_exception = graph()->NewNode(op, effect, result);
+ environment_->UpdateControlDependency(on_exception);
+ environment_->UpdateEffectDependency(on_exception);
+ execution_control()->ThrowValue(on_exception);
+ set_environment(success_env);
+ }
+ // Add implicit success continuation for throwing nodes.
+ if (!result->op()->HasProperty(Operator::kNoThrow)) {
+ const Operator* op = common()->IfSuccess();
+ Node* on_success = graph()->NewNode(op, result);
+ environment_->UpdateControlDependency(on_success);
+ }
+ }
+ }
+
+ return result;
+}
+
+
+void AstGraphBuilder::UpdateControlDependencyToLeaveFunction(Node* exit) {
+ if (environment()->IsMarkedAsUnreachable()) return;
+ environment()->MarkAsUnreachable();
+ exit_controls_.push_back(exit);
+}
+
+
+void AstGraphBuilder::Environment::Merge(Environment* other) {
+ DCHECK(values_.size() == other->values_.size());
+ DCHECK(contexts_.size() == other->contexts_.size());
+
+ // Nothing to do if the other environment is dead.
+ if (other->IsMarkedAsUnreachable()) return;
+
+ // Resurrect a dead environment by copying the contents of the other one and
+ // placing a singleton merge as the new control dependency.
+ if (this->IsMarkedAsUnreachable()) {
+ Node* other_control = other->control_dependency_;
+ Node* inputs[] = {other_control};
+ control_dependency_ =
+ graph()->NewNode(common()->Merge(1), arraysize(inputs), inputs, true);
+ effect_dependency_ = other->effect_dependency_;
+ values_ = other->values_;
+ contexts_ = other->contexts_;
+ if (IsLivenessAnalysisEnabled()) {
+ liveness_block_ =
+ builder_->liveness_analyzer()->NewBlock(other->liveness_block());
+ }
+ return;
+ }
+
+ // Record the merge for the local variable liveness calculation.
+ // For loops, we are connecting a back edge into the existing block;
+ // for merges, we create a new merged block.
+ if (IsLivenessAnalysisEnabled()) {
+ if (GetControlDependency()->opcode() != IrOpcode::kLoop) {
+ liveness_block_ =
+ builder_->liveness_analyzer()->NewBlock(liveness_block());
+ }
+ liveness_block()->AddPredecessor(other->liveness_block());
+ }
+
+ // Create a merge of the control dependencies of both environments and update
+ // the current environment's control dependency accordingly.
+ Node* control = builder_->MergeControl(this->GetControlDependency(),
+ other->GetControlDependency());
+ UpdateControlDependency(control);
+
+ // Create a merge of the effect dependencies of both environments and update
+ // the current environment's effect dependency accordingly.
+ Node* effect = builder_->MergeEffect(this->GetEffectDependency(),
+ other->GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ // Introduce Phi nodes for values that have differing input at merge points,
+ // potentially extending an existing Phi node if possible.
+ for (int i = 0; i < static_cast<int>(values_.size()); ++i) {
+ values_[i] = builder_->MergeValue(values_[i], other->values_[i], control);
+ }
+ for (int i = 0; i < static_cast<int>(contexts_.size()); ++i) {
+ contexts_[i] =
+ builder_->MergeValue(contexts_[i], other->contexts_[i], control);
+ }
+}
+
+
+void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned,
+ bool is_osr) {
+ int size = static_cast<int>(values()->size());
+
+ Node* control = builder_->NewLoop();
+ if (assigned == nullptr) {
+ // Assume that everything is updated in the loop.
+ for (int i = 0; i < size; ++i) {
+ values()->at(i) = builder_->NewPhi(1, values()->at(i), control);
+ }
+ } else {
+ // Only build phis for those locals assigned in this loop.
+ for (int i = 0; i < size; ++i) {
+ if (i < assigned->length() && !assigned->Contains(i)) continue;
+ Node* phi = builder_->NewPhi(1, values()->at(i), control);
+ values()->at(i) = phi;
+ }
+ }
+ Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ // Connect the loop to end via Terminate if it's not marked as unreachable.
+ if (!IsMarkedAsUnreachable()) {
+ // Connect the Loop node to end via a Terminate node.
+ Node* terminate = builder_->graph()->NewNode(
+ builder_->common()->Terminate(), effect, control);
+ builder_->exit_controls_.push_back(terminate);
+ }
+
+ if (builder_->info()->is_osr()) {
+ // Introduce phis for all context values in the case of an OSR graph.
+ for (size_t i = 0; i < contexts()->size(); ++i) {
+ Node* context = contexts()->at(i);
+ contexts()->at(i) = builder_->NewPhi(1, context, control);
+ }
+ }
+
+ if (is_osr) {
+ // Merge OSR values as inputs to the phis of the loop.
+ Graph* graph = builder_->graph();
+ Node* osr_loop_entry = builder_->graph()->NewNode(
+ builder_->common()->OsrLoopEntry(), graph->start(), graph->start());
+
+ builder_->MergeControl(control, osr_loop_entry);
+ builder_->MergeEffect(effect, osr_loop_entry, control);
+
+ for (int i = 0; i < size; ++i) {
+ Node* value = values()->at(i);
+ Node* osr_value =
+ graph->NewNode(builder_->common()->OsrValue(i), osr_loop_entry);
+ values()->at(i) = builder_->MergeValue(value, osr_value, control);
+ }
+
+ // Rename all the contexts in the environment.
+ // The innermost context is the OSR value, and the outer contexts are
+ // reconstructed by dynamically walking up the context chain.
+ Node* osr_context = nullptr;
+ const Operator* op =
+ builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
+ const Operator* op_inner =
+ builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
+ int last = static_cast<int>(contexts()->size() - 1);
+ for (int i = last; i >= 0; i--) {
+ Node* context = contexts()->at(i);
+ osr_context = (i == last) ? graph->NewNode(op_inner, osr_loop_entry)
+ : graph->NewNode(op, osr_context, osr_context,
+ osr_loop_entry);
+ contexts()->at(i) = builder_->MergeValue(context, osr_context, control);
+ }
+ }
+}
+
+
+Node* AstGraphBuilder::NewPhi(int count, Node* input, Node* control) {
+ const Operator* phi_op = common()->Phi(MachineRepresentation::kTagged, count);
+ Node** buffer = EnsureInputBufferSize(count + 1);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer, true);
+}
+
+
+// TODO(mstarzinger): Revisit this once we have proper effect states.
+Node* AstGraphBuilder::NewEffectPhi(int count, Node* input, Node* control) {
+ const Operator* phi_op = common()->EffectPhi(count);
+ Node** buffer = EnsureInputBufferSize(count + 1);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer, true);
+}
+
+
+Node* AstGraphBuilder::MergeControl(Node* control, Node* other) {
+ int inputs = control->op()->ControlInputCount() + 1;
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Control node for loop exists, add input.
+ const Operator* op = common()->Loop(inputs);
+ control->AppendInput(graph_zone(), other);
+ NodeProperties::ChangeOp(control, op);
+ } else if (control->opcode() == IrOpcode::kMerge) {
+ // Control node for merge exists, add input.
+ const Operator* op = common()->Merge(inputs);
+ control->AppendInput(graph_zone(), other);
+ NodeProperties::ChangeOp(control, op);
+ } else {
+ // Control node is a singleton, introduce a merge.
+ const Operator* op = common()->Merge(inputs);
+ Node* inputs[] = {control, other};
+ control = graph()->NewNode(op, arraysize(inputs), inputs, true);
+ }
+ return control;
+}
+
+
+Node* AstGraphBuilder::MergeEffect(Node* value, Node* other, Node* control) {
+ int inputs = control->op()->ControlInputCount();
+ if (value->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->InsertInput(graph_zone(), inputs - 1, other);
+ NodeProperties::ChangeOp(value, common()->EffectPhi(inputs));
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewEffectPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
+Node* AstGraphBuilder::MergeValue(Node* value, Node* other, Node* control) {
+ int inputs = control->op()->ControlInputCount();
+ if (value->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->InsertInput(graph_zone(), inputs - 1, other);
+ NodeProperties::ChangeOp(
+ value, common()->Phi(MachineRepresentation::kTagged, inputs));
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 0337c81..3b6302d 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -5,133 +5,87 @@
#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_
#define V8_COMPILER_AST_GRAPH_BUILDER_H_
-#include "src/v8.h"
-
-#include "src/ast.h"
-#include "src/compiler/graph-builder.h"
+#include "src/ast/ast.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/liveness-analyzer.h"
+#include "src/compiler/state-values-utils.h"
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class BitVector;
+
+
namespace compiler {
+// Forward declarations.
class ControlBuilder;
class Graph;
class LoopAssignmentAnalysis;
class LoopBuilder;
+class Node;
+class TypeHintAnalysis;
+
// The AstGraphBuilder produces a high-level IR graph, based on an
// underlying AST. The produced graph can either be compiled into a
// stand-alone function or be wired into another graph for the purposes
// of function inlining.
-class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
+class AstGraphBuilder : public AstVisitor {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- LoopAssignmentAnalysis* loop_assignment = NULL);
+ LoopAssignmentAnalysis* loop_assignment = nullptr,
+ TypeHintAnalysis* type_hint_analysis = nullptr);
// Creates a graph by visiting the entire AST.
- bool CreateGraph();
+ bool CreateGraph(bool stack_check = true);
- protected:
- class AstContext;
- class AstEffectContext;
- class AstValueContext;
- class AstTestContext;
- class BreakableScope;
- class ContextScope;
- class Environment;
-
- Environment* environment() {
- return reinterpret_cast<Environment*>(
- StructuredGraphBuilder::environment());
+ // Helpers to create new control nodes.
+ Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+ Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+ Node* NewMerge() { return NewNode(common()->Merge(1), true); }
+ Node* NewLoop() { return NewNode(common()->Loop(1), true); }
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+ return NewNode(common()->Branch(hint), condition);
}
- AstContext* ast_context() const { return ast_context_; }
- BreakableScope* breakable() const { return breakable_; }
- ContextScope* execution_context() const { return execution_context_; }
-
- void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
- void set_breakable(BreakableScope* brk) { breakable_ = brk; }
- void set_execution_context(ContextScope* ctx) { execution_context_ = ctx; }
-
- // Support for control flow builders. The concrete type of the environment
- // depends on the graph builder, but environments themselves are not virtual.
- typedef StructuredGraphBuilder::Environment BaseEnvironment;
- BaseEnvironment* CopyEnvironment(BaseEnvironment* env) OVERRIDE;
-
- // Getters for values in the activation record.
- Node* GetFunctionClosure();
- Node* GetFunctionContext();
-
- //
- // The following build methods all generate graph fragments and return one
- // resulting node. The operand stack height remains the same, variables and
- // other dependencies tracked by the environment might be mutated though.
- //
-
- // Builder to create a receiver check for sloppy mode.
- Node* BuildPatchReceiverToGlobalProxy(Node* receiver);
-
- // Builder to create a local function context.
- Node* BuildLocalFunctionContext(Node* context, Node* closure);
-
- // Builder to create an arguments object if it is used.
- Node* BuildArgumentsObject(Variable* arguments);
-
- // Builders for variable load and assignment.
- Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op,
- BailoutId bailout_id,
- OutputFrameStateCombine state_combine =
- OutputFrameStateCombine::Ignore());
- Node* BuildVariableDelete(Variable* var, BailoutId bailout_id,
- OutputFrameStateCombine state_combine);
- Node* BuildVariableLoad(Variable* var, BailoutId bailout_id,
- const VectorSlotPair& feedback,
- ContextualMode mode = CONTEXTUAL);
-
- // Builders for accessing the function context.
- Node* BuildLoadBuiltinsObject();
- Node* BuildLoadGlobalObject();
- Node* BuildLoadGlobalProxy();
- Node* BuildLoadClosure();
- Node* BuildLoadObjectField(Node* object, int offset);
-
- // Builders for automatic type conversion.
- Node* BuildToBoolean(Node* value);
-
- // Builders for error reporting at runtime.
- Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
- Node* BuildThrowConstAssignError(BailoutId bailout_id);
-
- // Builders for dynamic hole-checks at runtime.
- Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
- Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole,
- BailoutId bailout_id);
-
- // Builders for binary operations.
- Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
-
- // Builder for stack-check guards.
- Node* BuildStackCheck();
-
-#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
+ protected:
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
// Visiting functions for AST nodes make this an AstVisitor.
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Visiting function for declarations list is overridden.
- void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
+ void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
private:
+ class AstContext;
+ class AstEffectContext;
+ class AstValueContext;
+ class AstTestContext;
+ class ContextScope;
+ class ControlScope;
+ class ControlScopeForBreakable;
+ class ControlScopeForIteration;
+ class ControlScopeForCatch;
+ class ControlScopeForFinally;
+ class Environment;
+ class FrameStateBeforeAndAfter;
+ friend class ControlBuilder;
+
+ Isolate* isolate_;
+ Zone* local_zone_;
CompilationInfo* info_;
- AstContext* ast_context_;
JSGraph* jsgraph_;
+ Environment* environment_;
+ AstContext* ast_context_;
// List of global declarations for functions and variables.
ZoneVector<Handle<Object>> globals_;
- // Stack of breakable statements entered by the visitor.
- BreakableScope* breakable_;
+ // Stack of control scopes currently entered by the visitor.
+ ControlScope* execution_control_;
// Stack of context objects pushed onto the chain by the visitor.
ContextScope* execution_context_;
@@ -139,28 +93,308 @@
// Nodes representing values in the activation record.
SetOncePointer<Node> function_closure_;
SetOncePointer<Node> function_context_;
+ SetOncePointer<Node> new_target_;
+
+ // Tracks how many try-blocks are currently entered.
+ int try_catch_nesting_level_;
+ int try_nesting_level_;
+
+ // Temporary storage for building node input lists.
+ int input_buffer_size_;
+ Node** input_buffer_;
+
+ // Optimization to cache loaded feedback vector.
+ SetOncePointer<Node> feedback_vector_;
+
+ // Control nodes that exit the function body.
+ ZoneVector<Node*> exit_controls_;
// Result of loop assignment analysis performed before graph creation.
LoopAssignmentAnalysis* loop_assignment_analysis_;
+ // Result of type hint analysis performed before graph creation.
+ TypeHintAnalysis* type_hint_analysis_;
+
+ // Cache for StateValues nodes for frame states.
+ StateValuesCache state_values_cache_;
+
+ // Analyzer of local variable liveness.
+ LivenessAnalyzer liveness_analyzer_;
+
+ // Function info for frame state construction.
+ const FrameStateFunctionInfo* const frame_state_function_info_;
+
+ // Growth increment for the temporary buffer used to construct input lists to
+ // new nodes.
+ static const int kInputBufferSizeIncrement = 64;
+
+ Zone* local_zone() const { return local_zone_; }
+ Environment* environment() const { return environment_; }
+ AstContext* ast_context() const { return ast_context_; }
+ ControlScope* execution_control() const { return execution_control_; }
+ ContextScope* execution_context() const { return execution_context_; }
+ CommonOperatorBuilder* common() const { return jsgraph_->common(); }
CompilationInfo* info() const { return info_; }
- inline StrictMode strict_mode() const;
+ Isolate* isolate() const { return isolate_; }
+ LanguageMode language_mode() const;
JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph() { return jsgraph_->graph(); }
+ Zone* graph_zone() { return graph()->zone(); }
JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
ZoneVector<Handle<Object>>* globals() { return &globals_; }
+ Scope* current_scope() const;
+ Node* current_context() const;
+ LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
+ const FrameStateFunctionInfo* frame_state_function_info() const {
+ return frame_state_function_info_;
+ }
- // Current scope during visitation.
- inline Scope* current_scope() const;
+ void set_environment(Environment* env) { environment_ = env; }
+ void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
+ void set_execution_control(ControlScope* ctrl) { execution_control_ = ctrl; }
+ void set_execution_context(ContextScope* ctx) { execution_context_ = ctx; }
+
+ // Create the main graph body by visiting the AST.
+ void CreateGraphBody(bool stack_check);
+
+ // Get or create the node that represents the incoming function closure.
+ Node* GetFunctionClosureForContext();
+ Node* GetFunctionClosure();
+
+ // Get or create the node that represents the incoming function context.
+ Node* GetFunctionContext();
+
+ // Get or create the node that represents the incoming new target value.
+ Node* GetNewTarget();
+
+ // Node creation helpers.
+ Node* NewNode(const Operator* op, bool incomplete = false) {
+ return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1) {
+ return MakeNode(op, 1, &n1, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+ Node* buffer[] = {n1, n2};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* buffer[] = {n1, n2, n3};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* buffer[] = {n1, n2, n3, n4};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5) {
+ Node* buffer[] = {n1, n2, n3, n4, n5};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+ return MakeNode(op, arraysize(nodes), nodes, false);
+ }
+
+ Node* NewNode(const Operator* op, int value_input_count, Node** value_inputs,
+ bool incomplete = false) {
+ return MakeNode(op, value_input_count, value_inputs, incomplete);
+ }
+
+ // Creates a new Phi node having {count} input values.
+ Node* NewPhi(int count, Node* input, Node* control);
+ Node* NewEffectPhi(int count, Node* input, Node* control);
+
+ // Helpers for merging control, effect or value dependencies.
+ Node* MergeControl(Node* control, Node* other);
+ Node* MergeEffect(Node* value, Node* other, Node* control);
+ Node* MergeValue(Node* value, Node* other, Node* control);
+
+ // The main node creation chokepoint. Adds context, frame state, effect,
+ // and control dependencies depending on the operator.
+ Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
+ bool incomplete);
+
+ // Helper to indicate a node exits the function body.
+ void UpdateControlDependencyToLeaveFunction(Node* exit);
+
+ // Builds deoptimization for a given node.
+ void PrepareFrameState(Node* node, BailoutId ast_id,
+ OutputFrameStateCombine framestate_combine =
+ OutputFrameStateCombine::Ignore());
+
+ BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
+
+ // Check if the given statement is an OSR entry.
+ // If so, record the stack height into the compilation and return {true}.
+ bool CheckOsrEntry(IterationStatement* stmt);
+
+ // Computes local variable liveness and replaces dead variables in
+ // frame states with the undefined values.
+ void ClearNonLiveSlotsInFrameStates();
+
+ Node** EnsureInputBufferSize(int size);
// Named and keyed loads require a VectorSlotPair for successful lowering.
- VectorSlotPair CreateVectorSlotPair(FeedbackVectorICSlot slot) const;
+ VectorSlotPair CreateVectorSlotPair(FeedbackVectorSlot slot) const;
+
+ // Determine which contexts need to be checked for extension objects that
+ // might shadow the optimistic declaration of dynamic lookup variables.
+ uint32_t ComputeBitsetForDynamicGlobal(Variable* variable);
+ uint32_t ComputeBitsetForDynamicContext(Variable* variable);
+
+ // ===========================================================================
+ // The following build methods all generate graph fragments and return one
+ // resulting node. The operand stack height remains the same, variables and
+ // other dependencies tracked by the environment might be mutated though.
+
+ // Builders to create local function, script and block contexts.
+ Node* BuildLocalActivationContext(Node* context);
+ Node* BuildLocalFunctionContext(Scope* scope);
+ Node* BuildLocalScriptContext(Scope* scope);
+ Node* BuildLocalBlockContext(Scope* scope);
+
+ // Builder to create an arguments object if it is used.
+ Node* BuildArgumentsObject(Variable* arguments);
+
+ // Builder to create an array of rest parameters if used
+ Node* BuildRestArgumentsArray(Variable* rest, int index);
+
+ // Builder that assigns to the {.this_function} internal variable if needed.
+ Node* BuildThisFunctionVariable(Variable* this_function_var);
+
+ // Builder that assigns to the {new.target} internal variable if needed.
+ Node* BuildNewTargetVariable(Variable* new_target_var);
+
+ // Builders for variable load and assignment.
+ Node* BuildVariableAssignment(Variable* variable, Node* value,
+ Token::Value op, const VectorSlotPair& slot,
+ BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states,
+ OutputFrameStateCombine framestate_combine =
+ OutputFrameStateCombine::Ignore());
+ Node* BuildVariableDelete(Variable* variable, BailoutId bailout_id,
+ OutputFrameStateCombine framestate_combine);
+ Node* BuildVariableLoad(Variable* variable, BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states,
+ const VectorSlotPair& feedback,
+ OutputFrameStateCombine framestate_combine,
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+
+ // Builders for property loads and stores.
+ Node* BuildKeyedLoad(Node* receiver, Node* key,
+ const VectorSlotPair& feedback);
+ Node* BuildNamedLoad(Node* receiver, Handle<Name> name,
+ const VectorSlotPair& feedback);
+ Node* BuildKeyedStore(Node* receiver, Node* key, Node* value,
+ const VectorSlotPair& feedback);
+ Node* BuildNamedStore(Node* receiver, Handle<Name> name, Node* value,
+ const VectorSlotPair& feedback);
+
+ // Builders for super property loads and stores.
+ Node* BuildKeyedSuperStore(Node* receiver, Node* home_object, Node* key,
+ Node* value);
+ Node* BuildNamedSuperStore(Node* receiver, Node* home_object,
+ Handle<Name> name, Node* value);
+ Node* BuildNamedSuperLoad(Node* receiver, Node* home_object,
+ Handle<Name> name, const VectorSlotPair& feedback);
+ Node* BuildKeyedSuperLoad(Node* receiver, Node* home_object, Node* key,
+ const VectorSlotPair& feedback);
+
+ // Builders for global variable loads and stores.
+ Node* BuildGlobalLoad(Handle<Name> name, const VectorSlotPair& feedback,
+ TypeofMode typeof_mode);
+ Node* BuildGlobalStore(Handle<Name> name, Node* value,
+ const VectorSlotPair& feedback);
+
+ // Builders for accessing the function context.
+ Node* BuildLoadGlobalObject();
+ Node* BuildLoadNativeContextField(int index);
+ Node* BuildLoadFeedbackVector();
+
+ // Builder for accessing a (potentially immutable) object field.
+ Node* BuildLoadObjectField(Node* object, int offset);
+ Node* BuildLoadImmutableObjectField(Node* object, int offset);
+
+ // Builders for automatic type conversion.
+ Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
+ Node* BuildToName(Node* input, BailoutId bailout_id);
+ Node* BuildToObject(Node* input, BailoutId bailout_id);
+
+ // Builder for adding the [[HomeObject]] to a value if the value came from a
+ // function literal and needs a home object. Do nothing otherwise.
+ Node* BuildSetHomeObject(Node* value, Node* home_object,
+ ObjectLiteralProperty* property,
+ int slot_number = 0);
+
+ // Builders for error reporting at runtime.
+ Node* BuildThrowError(Node* exception, BailoutId bailout_id);
+ Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
+ Node* BuildThrowConstAssignError(BailoutId bailout_id);
+ Node* BuildThrowStaticPrototypeError(BailoutId bailout_id);
+ Node* BuildThrowUnsupportedSuperError(BailoutId bailout_id);
+
+ // Builders for dynamic hole-checks at runtime.
+ Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
+ Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
+ BailoutId bailout_id);
+ Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
+ BailoutId bailout_id);
+
+ // Builders for conditional errors.
+ Node* BuildThrowIfStaticPrototype(Node* name, BailoutId bailout_id);
+
+ // Builders for non-local control flow.
+ Node* BuildReturn(Node* return_value);
+ Node* BuildThrow(Node* exception_value);
+
+ // Builders for binary operations.
+ Node* BuildBinaryOp(Node* left, Node* right, Token::Value op,
+ TypeFeedbackId feedback_id);
// Process arguments to a call by popping {arity} elements off the operand
// stack and build a call node using the given call operator.
Node* ProcessArguments(const Operator* op, int arity);
+ // ===========================================================================
+ // The following build methods have the same contract as the above ones, but
+ // they can also return {nullptr} to indicate that no fragment was built. Note
+ // that these are optimizations, disabling any of them should still produce
+ // correct graphs.
+
+ // Optimization for variable load from global object.
+ Node* TryLoadGlobalConstant(Handle<Name> name);
+
+ // Optimization for variable load of dynamic lookup slot that is most likely
+ // to resolve to a global slot or context slot (inferred from scope chain).
+ Node* TryLoadDynamicVariable(Variable* variable, Handle<String> name,
+ BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states,
+ const VectorSlotPair& feedback,
+ OutputFrameStateCombine combine,
+ TypeofMode typeof_mode);
+
+ // Optimizations for automatic type conversion.
+ Node* TryFastToBoolean(Node* input);
+ Node* TryFastToName(Node* input);
+
+ // ===========================================================================
+ // The following visitation methods all recursively visit a subtree of the
+ // underlying AST and extent the graph. The operand stack is mutated in a way
+ // consistent with other compilers:
+ // - Expressions pop operands and push result, depending on {AstContext}.
+ // - Statements keep the operand stack balanced.
+
// Visit statements.
void VisitIfNotNull(Statement* stmt);
+ void VisitInScope(Statement* stmt, Scope* scope, Node* context);
// Visit expressions.
void Visit(Expression* expr);
@@ -168,10 +402,14 @@
void VisitForEffect(Expression* expr);
void VisitForValue(Expression* expr);
void VisitForValueOrNull(Expression* expr);
+ void VisitForValueOrTheHole(Expression* expr);
void VisitForValues(ZoneList<Expression*>* exprs);
// Common for all IterationStatement bodies.
- void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop, int);
+ void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop);
+
+ // Dispatched from VisitCall.
+ void VisitCallSuper(Call* expr);
// Dispatched from VisitCallRuntime.
void VisitCallJSRuntime(CallRuntime* expr);
@@ -188,14 +426,17 @@
void VisitArithmeticExpression(BinaryOperation* expr);
// Dispatched from VisitForInStatement.
- void VisitForInAssignment(Expression* expr, Node* value);
+ void VisitForInAssignment(Expression* expr, Node* value,
+ const VectorSlotPair& feedback,
+ BailoutId bailout_id_before,
+ BailoutId bailout_id_after);
- // Builds deoptimization for a given node.
- void PrepareFrameState(
- Node* node, BailoutId ast_id,
- OutputFrameStateCombine combine = OutputFrameStateCombine::Ignore());
+ // Dispatched from VisitObjectLiteral.
+ void VisitObjectLiteralAccessor(Node* home_object,
+ ObjectLiteralProperty* property);
- BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
+ // Dispatched from VisitClassLiteral.
+ void VisitClassLiteralContents(ClassLiteral* expr);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
@@ -210,38 +451,33 @@
//
// [parameters (+receiver)] [locals] [operand stack]
//
-class AstGraphBuilder::Environment
- : public StructuredGraphBuilder::Environment {
+class AstGraphBuilder::Environment : public ZoneObject {
public:
Environment(AstGraphBuilder* builder, Scope* scope, Node* control_dependency);
- Environment(const Environment& copy);
int parameters_count() const { return parameters_count_; }
int locals_count() const { return locals_count_; }
+ int context_chain_length() { return static_cast<int>(contexts_.size()); }
int stack_height() {
return static_cast<int>(values()->size()) - parameters_count_ -
locals_count_;
}
- // Operations on parameter or local variables. The parameter indices are
- // shifted by 1 (receiver is parameter index -1 but environment index 0).
- void Bind(Variable* variable, Node* node) {
- DCHECK(variable->IsStackAllocated());
- if (variable->IsParameter()) {
- values()->at(variable->index() + 1) = node;
- } else {
- DCHECK(variable->IsStackLocal());
- values()->at(variable->index() + parameters_count_) = node;
- }
- }
- Node* Lookup(Variable* variable) {
- DCHECK(variable->IsStackAllocated());
- if (variable->IsParameter()) {
- return values()->at(variable->index() + 1);
- } else {
- DCHECK(variable->IsStackLocal());
- return values()->at(variable->index() + parameters_count_);
- }
+ // Operations on parameter or local variables.
+ void Bind(Variable* variable, Node* node);
+ Node* Lookup(Variable* variable);
+ void MarkAllLocalsLive();
+
+ // Raw operations on parameter variables.
+ void RawParameterBind(int index, Node* node);
+ Node* RawParameterLookup(int index);
+
+ // Operations on the context chain.
+ Node* Context() const { return contexts_.back(); }
+ void PushContext(Node* context) { contexts()->push_back(context); }
+ void PopContext() { contexts()->pop_back(); }
+ void TrimContextChain(int trim_to_length) {
+ contexts()->resize(trim_to_length);
}
// Operations on the operand stack.
@@ -274,176 +510,82 @@
DCHECK(depth >= 0 && depth <= stack_height());
values()->erase(values()->end() - depth, values()->end());
}
+ void TrimStack(int trim_to_height) {
+ int depth = stack_height() - trim_to_height;
+ DCHECK(depth >= 0 && depth <= stack_height());
+ values()->erase(values()->end() - depth, values()->end());
+ }
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
- Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine);
+ Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine =
+ OutputFrameStateCombine::Ignore());
- protected:
- AstGraphBuilder* builder() const {
- return reinterpret_cast<AstGraphBuilder*>(
- StructuredGraphBuilder::Environment::builder());
+ // Control dependency tracked by this environment.
+ Node* GetControlDependency() { return control_dependency_; }
+ void UpdateControlDependency(Node* dependency) {
+ control_dependency_ = dependency;
}
- private:
- void UpdateStateValues(Node** state_values, int offset, int count);
+ // Effect dependency tracked by this environment.
+ Node* GetEffectDependency() { return effect_dependency_; }
+ void UpdateEffectDependency(Node* dependency) {
+ effect_dependency_ = dependency;
+ }
+ // Mark this environment as being unreachable.
+ void MarkAsUnreachable() {
+ UpdateControlDependency(builder()->jsgraph()->Dead());
+ liveness_block_ = nullptr;
+ }
+ bool IsMarkedAsUnreachable() {
+ return GetControlDependency()->opcode() == IrOpcode::kDead;
+ }
+
+ // Merge another environment into this one.
+ void Merge(Environment* other);
+
+ // Copies this environment at a control-flow split point.
+ Environment* CopyForConditional();
+
+ // Copies this environment to a potentially unreachable control-flow point.
+ Environment* CopyAsUnreachable();
+
+ // Copies this environment at a loop header control-flow point.
+ Environment* CopyForLoop(BitVector* assigned, bool is_osr = false);
+
+ private:
+ AstGraphBuilder* builder_;
int parameters_count_;
int locals_count_;
+ LivenessAnalyzerBlock* liveness_block_;
+ NodeVector values_;
+ NodeVector contexts_;
+ Node* control_dependency_;
+ Node* effect_dependency_;
Node* parameters_node_;
Node* locals_node_;
Node* stack_node_;
+
+ explicit Environment(Environment* copy,
+ LivenessAnalyzerBlock* liveness_block);
+ Environment* CopyAndShareLiveness();
+ void UpdateStateValues(Node** state_values, int offset, int count);
+ void UpdateStateValuesWithCache(Node** state_values, int offset, int count);
+ Zone* zone() const { return builder_->local_zone(); }
+ Graph* graph() const { return builder_->graph(); }
+ AstGraphBuilder* builder() const { return builder_; }
+ CommonOperatorBuilder* common() { return builder_->common(); }
+ NodeVector* values() { return &values_; }
+ NodeVector* contexts() { return &contexts_; }
+ LivenessAnalyzerBlock* liveness_block() { return liveness_block_; }
+ bool IsLivenessAnalysisEnabled();
+ bool IsLivenessBlockConsistent();
+
+ // Prepare environment to be used as loop header.
+ void PrepareForLoop(BitVector* assigned, bool is_osr = false);
};
-
-// Each expression in the AST is evaluated in a specific context. This context
-// decides how the evaluation result is passed up the visitor.
-class AstGraphBuilder::AstContext BASE_EMBEDDED {
- public:
- bool IsEffect() const { return kind_ == Expression::kEffect; }
- bool IsValue() const { return kind_ == Expression::kValue; }
- bool IsTest() const { return kind_ == Expression::kTest; }
-
- // Determines how to combine the frame state with the value
- // that is about to be plugged into this AstContext.
- OutputFrameStateCombine GetStateCombine() {
- return IsEffect() ? OutputFrameStateCombine::Ignore()
- : OutputFrameStateCombine::Push();
- }
-
- // Plug a node into this expression context. Call this function in tail
- // position in the Visit functions for expressions.
- virtual void ProduceValue(Node* value) = 0;
-
- // Unplugs a node from this expression context. Call this to retrieve the
- // result of another Visit function that already plugged the context.
- virtual Node* ConsumeValue() = 0;
-
- // Shortcut for "context->ProduceValue(context->ConsumeValue())".
- void ReplaceValue() { ProduceValue(ConsumeValue()); }
-
- protected:
- AstContext(AstGraphBuilder* owner, Expression::Context kind);
- virtual ~AstContext();
-
- AstGraphBuilder* owner() const { return owner_; }
- Environment* environment() const { return owner_->environment(); }
-
-// We want to be able to assert, in a context-specific way, that the stack
-// height makes sense when the context is filled.
-#ifdef DEBUG
- int original_height_;
-#endif
-
- private:
- Expression::Context kind_;
- AstGraphBuilder* owner_;
- AstContext* outer_;
-};
-
-
-// Context to evaluate expression for its side effects only.
-class AstGraphBuilder::AstEffectContext FINAL : public AstContext {
- public:
- explicit AstEffectContext(AstGraphBuilder* owner)
- : AstContext(owner, Expression::kEffect) {}
- ~AstEffectContext() FINAL;
- void ProduceValue(Node* value) FINAL;
- Node* ConsumeValue() FINAL;
-};
-
-
-// Context to evaluate expression for its value (and side effects).
-class AstGraphBuilder::AstValueContext FINAL : public AstContext {
- public:
- explicit AstValueContext(AstGraphBuilder* owner)
- : AstContext(owner, Expression::kValue) {}
- ~AstValueContext() FINAL;
- void ProduceValue(Node* value) FINAL;
- Node* ConsumeValue() FINAL;
-};
-
-
-// Context to evaluate expression for a condition value (and side effects).
-class AstGraphBuilder::AstTestContext FINAL : public AstContext {
- public:
- explicit AstTestContext(AstGraphBuilder* owner)
- : AstContext(owner, Expression::kTest) {}
- ~AstTestContext() FINAL;
- void ProduceValue(Node* value) FINAL;
- Node* ConsumeValue() FINAL;
-};
-
-
-// Scoped class tracking breakable statements entered by the visitor. Allows to
-// properly 'break' and 'continue' iteration statements as well as to 'break'
-// from blocks within switch statements.
-class AstGraphBuilder::BreakableScope BASE_EMBEDDED {
- public:
- BreakableScope(AstGraphBuilder* owner, BreakableStatement* target,
- ControlBuilder* control, int drop_extra)
- : owner_(owner),
- target_(target),
- next_(owner->breakable()),
- control_(control),
- drop_extra_(drop_extra) {
- owner_->set_breakable(this); // Push.
- }
-
- ~BreakableScope() {
- owner_->set_breakable(next_); // Pop.
- }
-
- // Either 'break' or 'continue' the target statement.
- void BreakTarget(BreakableStatement* target);
- void ContinueTarget(BreakableStatement* target);
-
- private:
- AstGraphBuilder* owner_;
- BreakableStatement* target_;
- BreakableScope* next_;
- ControlBuilder* control_;
- int drop_extra_;
-
- // Find the correct scope for the target statement. Note that this also drops
- // extra operands from the environment for each scope skipped along the way.
- BreakableScope* FindBreakable(BreakableStatement* target);
-};
-
-
-// Scoped class tracking context objects created by the visitor. Represents
-// mutations of the context chain within the function body and allows to
-// change the current {scope} and {context} during visitation.
-class AstGraphBuilder::ContextScope BASE_EMBEDDED {
- public:
- ContextScope(AstGraphBuilder* owner, Scope* scope, Node* context)
- : owner_(owner),
- next_(owner->execution_context()),
- outer_(owner->current_context()),
- scope_(scope) {
- owner_->set_execution_context(this); // Push.
- owner_->set_current_context(context);
- }
-
- ~ContextScope() {
- owner_->set_execution_context(next_); // Pop.
- owner_->set_current_context(outer_);
- }
-
- // Current scope during visitation.
- Scope* scope() const { return scope_; }
-
- private:
- AstGraphBuilder* owner_;
- ContextScope* next_;
- Node* outer_;
- Scope* scope_;
-};
-
-Scope* AstGraphBuilder::current_scope() const {
- return execution_context_->scope();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
index 7adac56..2074c94 100644
--- a/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -3,7 +3,8 @@
// found in the LICENSE file.
#include "src/compiler/ast-loop-assignment-analyzer.h"
-#include "src/parser.h"
+#include "src/compiler.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -12,16 +13,16 @@
typedef class AstLoopAssignmentAnalyzer ALAA; // for code shortitude.
ALAA::AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info)
- : info_(info), loop_stack_(zone) {
- InitializeAstVisitor(zone);
+ : info_(info), zone_(zone), loop_stack_(zone) {
+ InitializeAstVisitor(info->isolate());
}
LoopAssignmentAnalysis* ALAA::Analyze() {
- LoopAssignmentAnalysis* a = new (zone()) LoopAssignmentAnalysis(zone());
+ LoopAssignmentAnalysis* a = new (zone_) LoopAssignmentAnalysis(zone_);
result_ = a;
- VisitStatements(info()->function()->body());
- result_ = NULL;
+ VisitStatements(info()->literal()->body());
+ result_ = nullptr;
return a;
}
@@ -29,7 +30,9 @@
void ALAA::Enter(IterationStatement* loop) {
int num_variables = 1 + info()->scope()->num_parameters() +
info()->scope()->num_stack_slots();
- BitVector* bits = new (zone()) BitVector(num_variables, zone());
+ BitVector* bits = new (zone_) BitVector(num_variables, zone_);
+ if (info()->is_osr() && info()->osr_ast_id() == loop->OsrEntryId())
+ bits->AddAll();
loop_stack_.push_back(bits);
}
@@ -52,12 +55,8 @@
void ALAA::VisitVariableDeclaration(VariableDeclaration* leaf) {}
void ALAA::VisitFunctionDeclaration(FunctionDeclaration* leaf) {}
-void ALAA::VisitModuleDeclaration(ModuleDeclaration* leaf) {}
void ALAA::VisitImportDeclaration(ImportDeclaration* leaf) {}
void ALAA::VisitExportDeclaration(ExportDeclaration* leaf) {}
-void ALAA::VisitModuleVariable(ModuleVariable* leaf) {}
-void ALAA::VisitModulePath(ModulePath* leaf) {}
-void ALAA::VisitModuleUrl(ModuleUrl* leaf) {}
void ALAA::VisitEmptyStatement(EmptyStatement* leaf) {}
void ALAA::VisitContinueStatement(ContinueStatement* leaf) {}
void ALAA::VisitBreakStatement(BreakStatement* leaf) {}
@@ -68,18 +67,22 @@
void ALAA::VisitLiteral(Literal* leaf) {}
void ALAA::VisitRegExpLiteral(RegExpLiteral* leaf) {}
void ALAA::VisitThisFunction(ThisFunction* leaf) {}
-void ALAA::VisitSuperReference(SuperReference* leaf) {}
+void ALAA::VisitSuperPropertyReference(SuperPropertyReference* leaf) {}
+void ALAA::VisitSuperCallReference(SuperCallReference* leaf) {}
// ---------------------------------------------------------------------------
// -- Pass-through nodes------------------------------------------------------
// ---------------------------------------------------------------------------
-void ALAA::VisitModuleLiteral(ModuleLiteral* e) { Visit(e->body()); }
-
-
void ALAA::VisitBlock(Block* stmt) { VisitStatements(stmt->statements()); }
+void ALAA::VisitDoExpression(DoExpression* expr) {
+ Visit(expr->block());
+ Visit(expr->result());
+}
+
+
void ALAA::VisitExpressionStatement(ExpressionStatement* stmt) {
Visit(stmt->expression());
}
@@ -123,6 +126,7 @@
VisitIfNotNull(e->constructor());
ZoneList<ObjectLiteralProperty*>* properties = e->properties();
for (int i = 0; i < properties->length(); i++) {
+ Visit(properties->at(i)->key());
Visit(properties->at(i)->value());
}
}
@@ -138,6 +142,7 @@
void ALAA::VisitObjectLiteral(ObjectLiteral* e) {
ZoneList<ObjectLiteralProperty*>* properties = e->properties();
for (int i = 0; i < properties->length(); i++) {
+ Visit(properties->at(i)->key());
Visit(properties->at(i)->value());
}
}
@@ -193,22 +198,27 @@
}
+void ALAA::VisitSpread(Spread* e) { Visit(e->expression()); }
+
+
+void ALAA::VisitEmptyParentheses(EmptyParentheses* e) { UNREACHABLE(); }
+
+
void ALAA::VisitCaseClause(CaseClause* cc) {
if (!cc->is_default()) Visit(cc->label());
VisitStatements(cc->statements());
}
-// ---------------------------------------------------------------------------
-// -- Interesting nodes-------------------------------------------------------
-// ---------------------------------------------------------------------------
-void ALAA::VisitModuleStatement(ModuleStatement* stmt) {
- Visit(stmt->body());
- // TODO(turbofan): can a module appear in a loop?
- AnalyzeAssignment(stmt->proxy()->var());
+void ALAA::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* stmt) {
+ Visit(stmt->statement());
}
+// ---------------------------------------------------------------------------
+// -- Interesting nodes-------------------------------------------------------
+// ---------------------------------------------------------------------------
void ALAA::VisitTryCatchStatement(TryCatchStatement* stmt) {
Visit(stmt->try_block());
Visit(stmt->catch_block());
@@ -253,7 +263,9 @@
void ALAA::VisitForOfStatement(ForOfStatement* loop) {
+ Visit(loop->assign_iterator());
Enter(loop);
+ Visit(loop->assign_each());
Visit(loop->each());
Visit(loop->subject());
Visit(loop->body());
@@ -276,6 +288,12 @@
}
+void ALAA::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ Visit(expr->expression());
+}
+
+
void ALAA::AnalyzeAssignment(Variable* var) {
if (!loop_stack_.empty() && var->IsStackAllocated()) {
loop_stack_.back()->Add(GetVariableIndex(info()->scope(), var));
@@ -300,6 +318,6 @@
}
return count;
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/ast-loop-assignment-analyzer.h b/src/compiler/ast-loop-assignment-analyzer.h
index 00a7f2d..1696911 100644
--- a/src/compiler/ast-loop-assignment-analyzer.h
+++ b/src/compiler/ast-loop-assignment-analyzer.h
@@ -5,9 +5,8 @@
#ifndef V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
#define V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/bit-vector.h"
-#include "src/v8.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -27,7 +26,7 @@
if (list_[i].first == loop) return list_[i].second;
}
UNREACHABLE(); // should never ask for loops that aren't here!
- return NULL;
+ return nullptr;
}
int GetAssignmentCountForTesting(Scope* scope, Variable* var);
@@ -46,7 +45,7 @@
LoopAssignmentAnalysis* Analyze();
-#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
@@ -54,6 +53,7 @@
private:
CompilationInfo* info_;
+ Zone* zone_;
ZoneDeque<BitVector*> loop_stack_;
LoopAssignmentAnalysis* result_;
@@ -63,7 +63,7 @@
void Exit(IterationStatement* loop);
void VisitIfNotNull(AstNode* node) {
- if (node != NULL) Visit(node);
+ if (node != nullptr) Visit(node);
}
void AnalyzeAssignment(Variable* var);
@@ -71,8 +71,8 @@
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstLoopAssignmentAnalyzer);
};
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
diff --git a/src/compiler/basic-block-instrumentor.cc b/src/compiler/basic-block-instrumentor.cc
index d7d3ade..a966a5b 100644
--- a/src/compiler/basic-block-instrumentor.cc
+++ b/src/compiler/basic-block-instrumentor.cc
@@ -10,6 +10,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/schedule.h"
@@ -54,8 +55,7 @@
BasicBlockProfiler::Data* data =
info->isolate()->GetOrCreateBasicBlockProfiler()->NewData(n_blocks);
// Set the function name.
- if (!info->shared_info().is_null() &&
- info->shared_info()->name()->IsString()) {
+ if (info->has_shared_info() && info->shared_info()->name()->IsString()) {
std::ostringstream os;
String::cast(info->shared_info()->name())->PrintUC16(os);
data->SetFunctionName(&os);
@@ -81,11 +81,13 @@
// Construct increment operation.
Node* base = graph->NewNode(
PointerConstant(&common, data->GetCounterAddress(block_number)));
- Node* load = graph->NewNode(machine.Load(kMachUint32), base, zero);
+ Node* load = graph->NewNode(machine.Load(MachineType::Uint32()), base, zero,
+ graph->start(), graph->start());
Node* inc = graph->NewNode(machine.Int32Add(), load, one);
- Node* store = graph->NewNode(
- machine.Store(StoreRepresentation(kMachUint32, kNoWriteBarrier)), base,
- zero, inc);
+ Node* store =
+ graph->NewNode(machine.Store(StoreRepresentation(
+ MachineRepresentation::kWord32, kNoWriteBarrier)),
+ base, zero, inc, graph->start(), graph->start());
// Insert the new nodes.
static const int kArraySize = 6;
Node* to_insert[kArraySize] = {zero, one, base, load, inc, store};
diff --git a/src/compiler/basic-block-instrumentor.h b/src/compiler/basic-block-instrumentor.h
index 7edac0d..32dd82a 100644
--- a/src/compiler/basic-block-instrumentor.h
+++ b/src/compiler/basic-block-instrumentor.h
@@ -5,8 +5,7 @@
#ifndef V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
#define V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
-#include "src/v8.h"
-
+#include "src/allocation.h"
#include "src/basic-block-profiler.h"
namespace v8 {
diff --git a/src/compiler/branch-elimination.cc b/src/compiler/branch-elimination.cc
new file mode 100644
index 0000000..bc56e73
--- /dev/null
+++ b/src/compiler/branch-elimination.cc
@@ -0,0 +1,269 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/branch-elimination.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ node_conditions_(zone, js_graph->graph()->NodeCount()),
+ zone_(zone),
+ dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {}
+
+
+BranchElimination::~BranchElimination() {}
+
+
+Reduction BranchElimination::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kDead:
+ return NoChange();
+ case IrOpcode::kMerge:
+ return ReduceMerge(node);
+ case IrOpcode::kLoop:
+ return ReduceLoop(node);
+ case IrOpcode::kBranch:
+ return ReduceBranch(node);
+ case IrOpcode::kIfFalse:
+ return ReduceIf(node, false);
+ case IrOpcode::kIfTrue:
+ return ReduceIf(node, true);
+ case IrOpcode::kStart:
+ return ReduceStart(node);
+ default:
+ if (node->op()->ControlOutputCount() > 0) {
+ return ReduceOtherControl(node);
+ }
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction BranchElimination::ReduceBranch(Node* node) {
+ Node* condition = node->InputAt(0);
+ Node* control_input = NodeProperties::GetControlInput(node, 0);
+ const ControlPathConditions* from_input = node_conditions_.Get(control_input);
+ if (from_input != nullptr) {
+ Maybe<bool> condition_value = from_input->LookupCondition(condition);
+ // If we know the condition we can discard the branch.
+ if (condition_value.IsJust()) {
+ bool known_value = condition_value.FromJust();
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ Replace(use, known_value ? control_input : dead());
+ break;
+ case IrOpcode::kIfFalse:
+ Replace(use, known_value ? dead() : control_input);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return Replace(dead());
+ }
+ }
+ return TakeConditionsFromFirstControl(node);
+}
+
+
+Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
+ // Add the condition to the list arriving from the input branch.
+ Node* branch = NodeProperties::GetControlInput(node, 0);
+ const ControlPathConditions* from_branch = node_conditions_.Get(branch);
+ // If we do not know anything about the predecessor, do not propagate just
+ // yet because we will have to recompute anyway once we compute the
+ // predecessor.
+ if (from_branch == nullptr) {
+ DCHECK(node_conditions_.Get(node) == nullptr);
+ return NoChange();
+ }
+ Node* condition = branch->InputAt(0);
+ return UpdateConditions(
+ node, from_branch->AddCondition(zone_, condition, is_true_branch));
+}
+
+
+Reduction BranchElimination::ReduceLoop(Node* node) {
+ // Here we rely on having only reducible loops:
+ // The loop entry edge always dominates the header, so we can just use
+ // the information from the loop entry edge.
+ return TakeConditionsFromFirstControl(node);
+}
+
+
+Reduction BranchElimination::ReduceMerge(Node* node) {
+ // Shortcut for the case when we do not know anything about some
+ // input.
+ for (int i = 0; i < node->InputCount(); i++) {
+ if (node_conditions_.Get(node->InputAt(i)) == nullptr) {
+ DCHECK(node_conditions_.Get(node) == nullptr);
+ return NoChange();
+ }
+ }
+
+ const ControlPathConditions* first = node_conditions_.Get(node->InputAt(0));
+ // Make a copy of the first input's conditions and merge with the conditions
+ // from other inputs.
+ ControlPathConditions* conditions =
+ new (zone_->New(sizeof(ControlPathConditions)))
+ ControlPathConditions(*first);
+ for (int i = 1; i < node->InputCount(); i++) {
+ conditions->Merge(*(node_conditions_.Get(node->InputAt(i))));
+ }
+
+ return UpdateConditions(node, conditions);
+}
+
+
+Reduction BranchElimination::ReduceStart(Node* node) {
+ return UpdateConditions(node, ControlPathConditions::Empty(zone_));
+}
+
+
+const BranchElimination::ControlPathConditions*
+BranchElimination::PathConditionsForControlNodes::Get(Node* node) {
+ if (static_cast<size_t>(node->id()) < info_for_node_.size()) {
+ return info_for_node_[node->id()];
+ }
+ return nullptr;
+}
+
+
+void BranchElimination::PathConditionsForControlNodes::Set(
+ Node* node, const ControlPathConditions* conditions) {
+ size_t index = static_cast<size_t>(node->id());
+ if (index >= info_for_node_.size()) {
+ info_for_node_.resize(index + 1, nullptr);
+ }
+ info_for_node_[index] = conditions;
+}
+
+
+Reduction BranchElimination::ReduceOtherControl(Node* node) {
+ DCHECK_EQ(1, node->op()->ControlInputCount());
+ return TakeConditionsFromFirstControl(node);
+}
+
+
+Reduction BranchElimination::TakeConditionsFromFirstControl(Node* node) {
+ // We just propagate the information from the control input (ideally,
+ // we would only revisit control uses if there is change).
+ const ControlPathConditions* from_input =
+ node_conditions_.Get(NodeProperties::GetControlInput(node, 0));
+ return UpdateConditions(node, from_input);
+}
+
+
+Reduction BranchElimination::UpdateConditions(
+ Node* node, const ControlPathConditions* conditions) {
+ const ControlPathConditions* original = node_conditions_.Get(node);
+ // Only signal that the node has Changed if the condition information has
+ // changed.
+ if (conditions != original) {
+ if (original == nullptr || *conditions != *original) {
+ node_conditions_.Set(node, conditions);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+// static
+const BranchElimination::ControlPathConditions*
+BranchElimination::ControlPathConditions::Empty(Zone* zone) {
+ return new (zone->New(sizeof(ControlPathConditions)))
+ ControlPathConditions(nullptr, 0);
+}
+
+
+void BranchElimination::ControlPathConditions::Merge(
+ const ControlPathConditions& other) {
+ // Change the current condition list to a longest common tail
+ // of this condition list and the other list. (The common tail
+ // should correspond to the list from the common dominator.)
+
+ // First, we throw away the prefix of the longer list, so that
+ // we have lists of the same length.
+ size_t other_size = other.condition_count_;
+ BranchCondition* other_condition = other.head_;
+ while (other_size > condition_count_) {
+ other_condition = other_condition->next;
+ other_size--;
+ }
+ while (condition_count_ > other_size) {
+ head_ = head_->next;
+ condition_count_--;
+ }
+
+ // Then we go through both lists in lock-step until we find
+ // the common tail.
+ while (head_ != other_condition) {
+ DCHECK(condition_count_ > 0);
+ condition_count_--;
+ other_condition = other_condition->next;
+ head_ = head_->next;
+ }
+}
+
+
+const BranchElimination::ControlPathConditions*
+BranchElimination::ControlPathConditions::AddCondition(Zone* zone,
+ Node* condition,
+ bool is_true) const {
+ DCHECK(LookupCondition(condition).IsNothing());
+
+ BranchCondition* new_head = new (zone->New(sizeof(BranchCondition)))
+ BranchCondition(condition, is_true, head_);
+
+ ControlPathConditions* conditions =
+ new (zone->New(sizeof(ControlPathConditions)))
+ ControlPathConditions(new_head, condition_count_ + 1);
+ return conditions;
+}
+
+
+Maybe<bool> BranchElimination::ControlPathConditions::LookupCondition(
+ Node* condition) const {
+ for (BranchCondition* current = head_; current != nullptr;
+ current = current->next) {
+ if (current->condition == condition) {
+ return Just<bool>(current->is_true);
+ }
+ }
+ return Nothing<bool>();
+}
+
+
+bool BranchElimination::ControlPathConditions::operator==(
+ const ControlPathConditions& other) const {
+ if (condition_count_ != other.condition_count_) return false;
+ BranchCondition* this_condition = head_;
+ BranchCondition* other_condition = other.head_;
+ while (true) {
+ if (this_condition == other_condition) return true;
+ if (this_condition->condition != other_condition->condition ||
+ this_condition->is_true != other_condition->is_true) {
+ return false;
+ }
+ this_condition = this_condition->next;
+ other_condition = other_condition->next;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/branch-elimination.h b/src/compiler/branch-elimination.h
new file mode 100644
index 0000000..a7ac926
--- /dev/null
+++ b/src/compiler/branch-elimination.h
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+#define V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSGraph;
+
+
+class BranchElimination final : public AdvancedReducer {
+ public:
+ BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone);
+ ~BranchElimination() final;
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ struct BranchCondition {
+ Node* condition;
+ bool is_true;
+ BranchCondition* next;
+
+ BranchCondition(Node* condition, bool is_true, BranchCondition* next)
+ : condition(condition), is_true(is_true), next(next) {}
+ };
+
+ // Class for tracking information about branch conditions.
+ // At the moment it is a linked list of conditions and their values
+ // (true or false).
+ class ControlPathConditions {
+ public:
+ Maybe<bool> LookupCondition(Node* condition) const;
+
+ const ControlPathConditions* AddCondition(Zone* zone, Node* condition,
+ bool is_true) const;
+ static const ControlPathConditions* Empty(Zone* zone);
+ void Merge(const ControlPathConditions& other);
+
+ bool operator==(const ControlPathConditions& other) const;
+ bool operator!=(const ControlPathConditions& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ ControlPathConditions(BranchCondition* head, size_t condition_count)
+ : head_(head), condition_count_(condition_count) {}
+
+ BranchCondition* head_;
+ // We keep track of the list length so that we can find the longest
+ // common tail easily.
+ size_t condition_count_;
+ };
+
+ // Maps each control node to the condition information known about the node.
+ // If the information is nullptr, then we have not calculated the information
+ // yet.
+ class PathConditionsForControlNodes {
+ public:
+ PathConditionsForControlNodes(Zone* zone, size_t size_hint)
+ : info_for_node_(size_hint, nullptr, zone) {}
+ const ControlPathConditions* Get(Node* node);
+ void Set(Node* node, const ControlPathConditions* conditions);
+
+ private:
+ ZoneVector<const ControlPathConditions*> info_for_node_;
+ };
+
+ Reduction ReduceBranch(Node* node);
+ Reduction ReduceIf(Node* node, bool is_true_branch);
+ Reduction ReduceLoop(Node* node);
+ Reduction ReduceMerge(Node* node);
+ Reduction ReduceStart(Node* node);
+ Reduction ReduceOtherControl(Node* node);
+
+ Reduction TakeConditionsFromFirstControl(Node* node);
+ Reduction UpdateConditions(Node* node,
+ const ControlPathConditions* conditions);
+
+ Node* dead() const { return dead_; }
+
+ PathConditionsForControlNodes node_conditions_;
+ Zone* zone_;
+ Node* dead_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
diff --git a/src/compiler/bytecode-branch-analysis.cc b/src/compiler/bytecode-branch-analysis.cc
new file mode 100644
index 0000000..27699a1
--- /dev/null
+++ b/src/compiler/bytecode-branch-analysis.cc
@@ -0,0 +1,125 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-branch-analysis.h"
+
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The class contains all of the sites that contain
+// branches to a particular target (bytecode offset).
+class BytecodeBranchInfo final : public ZoneObject {
+ public:
+ explicit BytecodeBranchInfo(Zone* zone)
+ : back_edge_offsets_(zone), fore_edge_offsets_(zone) {}
+
+ void AddBranch(int source_offset, int target_offset);
+
+ // The offsets of bytecodes that refer to this bytecode as
+ // a back-edge predecessor.
+ const ZoneVector<int>* back_edge_offsets() { return &back_edge_offsets_; }
+
+ // The offsets of bytecodes that refer to this bytecode as
+ // a forwards-edge predecessor.
+ const ZoneVector<int>* fore_edge_offsets() { return &fore_edge_offsets_; }
+
+ private:
+ ZoneVector<int> back_edge_offsets_;
+ ZoneVector<int> fore_edge_offsets_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeBranchInfo);
+};
+
+
+void BytecodeBranchInfo::AddBranch(int source_offset, int target_offset) {
+ if (source_offset < target_offset) {
+ fore_edge_offsets_.push_back(source_offset);
+ } else {
+ back_edge_offsets_.push_back(source_offset);
+ }
+}
+
+
+BytecodeBranchAnalysis::BytecodeBranchAnalysis(
+ Handle<BytecodeArray> bytecode_array, Zone* zone)
+ : branch_infos_(zone),
+ bytecode_array_(bytecode_array),
+ reachable_(bytecode_array->length(), zone),
+ zone_(zone) {}
+
+
+void BytecodeBranchAnalysis::Analyze() {
+ interpreter::BytecodeArrayIterator iterator(bytecode_array());
+ bool reachable = true;
+ while (!iterator.done()) {
+ interpreter::Bytecode bytecode = iterator.current_bytecode();
+ int current_offset = iterator.current_offset();
+ // All bytecode basic blocks are generated to be forward reachable
+ // and may also be backward reachable. Hence if there's a forward
+ // branch targetting here the code becomes reachable.
+ reachable = reachable || forward_branches_target(current_offset);
+ if (reachable) {
+ reachable_.Add(current_offset);
+ if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
+ // Only the branch is recorded, the forward path falls through
+ // and is handled as normal bytecode data flow.
+ AddBranch(current_offset, iterator.GetJumpTargetOffset());
+ } else if (interpreter::Bytecodes::IsJump(bytecode)) {
+ // Unless the branch targets the next bytecode it's not
+ // reachable. If it targets the next bytecode the check at the
+ // start of the loop will set the reachable flag.
+ AddBranch(current_offset, iterator.GetJumpTargetOffset());
+ reachable = false;
+ } else if (interpreter::Bytecodes::IsJumpOrReturn(bytecode)) {
+ DCHECK_EQ(bytecode, interpreter::Bytecode::kReturn);
+ reachable = false;
+ }
+ }
+ iterator.Advance();
+ }
+}
+
+
+const ZoneVector<int>* BytecodeBranchAnalysis::BackwardBranchesTargetting(
+ int offset) const {
+ auto iterator = branch_infos_.find(offset);
+ if (branch_infos_.end() != iterator) {
+ return iterator->second->back_edge_offsets();
+ } else {
+ return nullptr;
+ }
+}
+
+
+const ZoneVector<int>* BytecodeBranchAnalysis::ForwardBranchesTargetting(
+ int offset) const {
+ auto iterator = branch_infos_.find(offset);
+ if (branch_infos_.end() != iterator) {
+ return iterator->second->fore_edge_offsets();
+ } else {
+ return nullptr;
+ }
+}
+
+
+void BytecodeBranchAnalysis::AddBranch(int source_offset, int target_offset) {
+ BytecodeBranchInfo* branch_info = nullptr;
+ auto iterator = branch_infos_.find(target_offset);
+ if (branch_infos_.end() == iterator) {
+ branch_info = new (zone()) BytecodeBranchInfo(zone());
+ branch_infos_.insert(std::make_pair(target_offset, branch_info));
+ } else {
+ branch_info = iterator->second;
+ }
+ branch_info->AddBranch(source_offset, target_offset);
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/bytecode-branch-analysis.h b/src/compiler/bytecode-branch-analysis.h
new file mode 100644
index 0000000..0ef33b6
--- /dev/null
+++ b/src/compiler/bytecode-branch-analysis.h
@@ -0,0 +1,79 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
+#define V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
+
+#include "src/bit-vector.h"
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace compiler {
+
+class BytecodeBranchInfo;
+
+// A class for identifying the branch targets and their branch sites
+// within a bytecode array and also identifying which bytecodes are
+// reachable. This information can be used to construct the local
+// control flow logic for high-level IR graphs built from bytecode.
+//
+// NB This class relies on the only backwards branches in bytecode
+// being jumps back to loop headers.
+class BytecodeBranchAnalysis BASE_EMBEDDED {
+ public:
+ BytecodeBranchAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone);
+
+ // Analyze the bytecodes to find the branch sites and their
+ // targets. No other methods in this class return valid information
+ // until this has been called.
+ void Analyze();
+
+ // Offsets of bytecodes having a backward branch to the bytecode at |offset|.
+ const ZoneVector<int>* BackwardBranchesTargetting(int offset) const;
+
+ // Offsets of bytecodes having a forward branch to the bytecode at |offset|.
+ const ZoneVector<int>* ForwardBranchesTargetting(int offset) const;
+
+ // Returns true if the bytecode at |offset| is reachable.
+ bool is_reachable(int offset) const { return reachable_.Contains(offset); }
+
+ // Returns true if there are any forward branches to the bytecode at
+ // |offset|.
+ bool forward_branches_target(int offset) const {
+ const ZoneVector<int>* sites = ForwardBranchesTargetting(offset);
+ return sites != nullptr && sites->size() > 0;
+ }
+
+ // Returns true if there are any backward branches to the bytecode
+ // at |offset|.
+ bool backward_branches_target(int offset) const {
+ const ZoneVector<int>* sites = BackwardBranchesTargetting(offset);
+ return sites != nullptr && sites->size() > 0;
+ }
+
+ private:
+ void AddBranch(int origin_offset, int target_offset);
+
+ Zone* zone() const { return zone_; }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+ ZoneMap<int, BytecodeBranchInfo*> branch_infos_;
+ Handle<BytecodeArray> bytecode_array_;
+ BitVector reachable_;
+ Zone* zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeBranchAnalysis);
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
new file mode 100644
index 0000000..cf0b6ab
--- /dev/null
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -0,0 +1,2040 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-graph-builder.h"
+
+#include "src/compiler/bytecode-branch-analysis.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/operator-properties.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Helper for generating frame states for before and after a bytecode.
+class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
+ public:
+ FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder,
+ const interpreter::BytecodeArrayIterator& iterator)
+ : builder_(builder),
+ id_after_(BailoutId::None()),
+ added_to_node_(false),
+ output_poke_offset_(0),
+ output_poke_count_(0) {
+ BailoutId id_before(iterator.current_offset());
+ frame_state_before_ = builder_->environment()->Checkpoint(
+ id_before, OutputFrameStateCombine::Ignore());
+ id_after_ = BailoutId(id_before.ToInt() + iterator.current_bytecode_size());
+ }
+
+ ~FrameStateBeforeAndAfter() {
+ DCHECK(added_to_node_);
+ DCHECK(builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
+ output_poke_count_));
+ }
+
+ private:
+ friend class Environment;
+
+ void AddToNode(Node* node, OutputFrameStateCombine combine) {
+ DCHECK(!added_to_node_);
+ int count = OperatorProperties::GetFrameStateInputCount(node->op());
+ DCHECK_LE(count, 2);
+ if (count >= 1) {
+ // Add the frame state for after the operation.
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ Node* frame_state_after =
+ builder_->environment()->Checkpoint(id_after_, combine);
+ NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
+ }
+
+ if (count >= 2) {
+ // Add the frame state for before the operation.
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 1)->opcode());
+ NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
+ }
+
+ if (!combine.IsOutputIgnored()) {
+ output_poke_offset_ = static_cast<int>(combine.GetOffsetToPokeAt());
+ output_poke_count_ = node->op()->ValueOutputCount();
+ }
+ added_to_node_ = true;
+ }
+
+ BytecodeGraphBuilder* builder_;
+ Node* frame_state_before_;
+ BailoutId id_after_;
+
+ bool added_to_node_;
+ int output_poke_offset_;
+ int output_poke_count_;
+};
+
+
+// Issues:
+// - Scopes - intimately tied to AST. Need to eval what is needed.
+// - Need to resolve closure parameter treatment.
+BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
+ int register_count,
+ int parameter_count,
+ Node* control_dependency,
+ Node* context)
+ : builder_(builder),
+ register_count_(register_count),
+ parameter_count_(parameter_count),
+ context_(context),
+ control_dependency_(control_dependency),
+ effect_dependency_(control_dependency),
+ values_(builder->local_zone()),
+ parameters_state_values_(nullptr),
+ registers_state_values_(nullptr),
+ accumulator_state_values_(nullptr) {
+ // The layout of values_ is:
+ //
+ // [receiver] [parameters] [registers] [accumulator]
+ //
+ // parameter[0] is the receiver (this), parameters 1..N are the
+ // parameters supplied to the method (arg0..argN-1). The accumulator
+ // is stored separately.
+
+ // Parameters including the receiver
+ for (int i = 0; i < parameter_count; i++) {
+ const char* debug_name = (i == 0) ? "%this" : nullptr;
+ const Operator* op = common()->Parameter(i, debug_name);
+ Node* parameter = builder->graph()->NewNode(op, graph()->start());
+ values()->push_back(parameter);
+ }
+
+ // Registers
+ register_base_ = static_cast<int>(values()->size());
+ Node* undefined_constant = builder->jsgraph()->UndefinedConstant();
+ values()->insert(values()->end(), register_count, undefined_constant);
+
+ // Accumulator
+ accumulator_base_ = static_cast<int>(values()->size());
+ values()->push_back(undefined_constant);
+}
+
+
+BytecodeGraphBuilder::Environment::Environment(
+ const BytecodeGraphBuilder::Environment* other)
+ : builder_(other->builder_),
+ register_count_(other->register_count_),
+ parameter_count_(other->parameter_count_),
+ context_(other->context_),
+ control_dependency_(other->control_dependency_),
+ effect_dependency_(other->effect_dependency_),
+ values_(other->zone()),
+ parameters_state_values_(nullptr),
+ registers_state_values_(nullptr),
+ accumulator_state_values_(nullptr),
+ register_base_(other->register_base_),
+ accumulator_base_(other->accumulator_base_) {
+ values_ = other->values_;
+}
+
+
+int BytecodeGraphBuilder::Environment::RegisterToValuesIndex(
+ interpreter::Register the_register) const {
+ if (the_register.is_parameter()) {
+ return the_register.ToParameterIndex(parameter_count());
+ } else {
+ return the_register.index() + register_base();
+ }
+}
+
+
+Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
+ return values()->at(accumulator_base_);
+}
+
+
+Node* BytecodeGraphBuilder::Environment::LookupRegister(
+ interpreter::Register the_register) const {
+ if (the_register.is_function_context()) {
+ return builder()->GetFunctionContext();
+ } else if (the_register.is_function_closure()) {
+ return builder()->GetFunctionClosure();
+ } else if (the_register.is_new_target()) {
+ return builder()->GetNewTarget();
+ } else {
+ int values_index = RegisterToValuesIndex(the_register);
+ return values()->at(values_index);
+ }
+}
+
+
+void BytecodeGraphBuilder::Environment::ExchangeRegisters(
+ interpreter::Register reg0, interpreter::Register reg1) {
+ int reg0_index = RegisterToValuesIndex(reg0);
+ int reg1_index = RegisterToValuesIndex(reg1);
+ Node* saved_reg0_value = values()->at(reg0_index);
+ values()->at(reg0_index) = values()->at(reg1_index);
+ values()->at(reg1_index) = saved_reg0_value;
+}
+
+
+void BytecodeGraphBuilder::Environment::BindAccumulator(
+ Node* node, FrameStateBeforeAndAfter* states) {
+ if (states) {
+ states->AddToNode(node, OutputFrameStateCombine::PokeAt(0));
+ }
+ values()->at(accumulator_base_) = node;
+}
+
+
+void BytecodeGraphBuilder::Environment::BindRegister(
+ interpreter::Register the_register, Node* node,
+ FrameStateBeforeAndAfter* states) {
+ int values_index = RegisterToValuesIndex(the_register);
+ if (states) {
+ states->AddToNode(node, OutputFrameStateCombine::PokeAt(accumulator_base_ -
+ values_index));
+ }
+ values()->at(values_index) = node;
+}
+
+
+void BytecodeGraphBuilder::Environment::BindRegistersToProjections(
+ interpreter::Register first_reg, Node* node,
+ FrameStateBeforeAndAfter* states) {
+ int values_index = RegisterToValuesIndex(first_reg);
+ if (states) {
+ states->AddToNode(node, OutputFrameStateCombine::PokeAt(accumulator_base_ -
+ values_index));
+ }
+ for (int i = 0; i < node->op()->ValueOutputCount(); i++) {
+ values()->at(values_index + i) =
+ builder()->NewNode(common()->Projection(i), node);
+ }
+}
+
+
+void BytecodeGraphBuilder::Environment::RecordAfterState(
+ Node* node, FrameStateBeforeAndAfter* states) {
+ states->AddToNode(node, OutputFrameStateCombine::Ignore());
+}
+
+
+bool BytecodeGraphBuilder::Environment::IsMarkedAsUnreachable() const {
+ return GetControlDependency()->opcode() == IrOpcode::kDead;
+}
+
+
+void BytecodeGraphBuilder::Environment::MarkAsUnreachable() {
+ UpdateControlDependency(builder()->jsgraph()->Dead());
+}
+
+
+BytecodeGraphBuilder::Environment*
+BytecodeGraphBuilder::Environment::CopyForLoop() {
+ PrepareForLoop();
+ return new (zone()) Environment(this);
+}
+
+
+BytecodeGraphBuilder::Environment*
+BytecodeGraphBuilder::Environment::CopyForConditional() const {
+ return new (zone()) Environment(this);
+}
+
+
+void BytecodeGraphBuilder::Environment::Merge(
+ BytecodeGraphBuilder::Environment* other) {
+ // Nothing to do if the other environment is dead.
+ if (other->IsMarkedAsUnreachable()) {
+ return;
+ }
+
+ // Create a merge of the control dependencies of both environments and update
+ // the current environment's control dependency accordingly.
+ Node* control = builder()->MergeControl(GetControlDependency(),
+ other->GetControlDependency());
+ UpdateControlDependency(control);
+
+ // Create a merge of the effect dependencies of both environments and update
+ // the current environment's effect dependency accordingly.
+ Node* effect = builder()->MergeEffect(GetEffectDependency(),
+ other->GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ // Introduce Phi nodes for values that have differing input at merge points,
+ // potentially extending an existing Phi node if possible.
+ context_ = builder()->MergeValue(context_, other->context_, control);
+ for (size_t i = 0; i < values_.size(); i++) {
+ values_[i] = builder()->MergeValue(values_[i], other->values_[i], control);
+ }
+}
+
+
+void BytecodeGraphBuilder::Environment::PrepareForLoop() {
+ // Create a control node for the loop header.
+ Node* control = builder()->NewLoop();
+
+ // Create a Phi for external effects.
+ Node* effect = builder()->NewEffectPhi(1, GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ // Assume everything in the loop is updated.
+ context_ = builder()->NewPhi(1, context_, control);
+ int size = static_cast<int>(values()->size());
+ for (int i = 0; i < size; i++) {
+ values()->at(i) = builder()->NewPhi(1, values()->at(i), control);
+ }
+
+ // Connect to the loop end.
+ Node* terminate = builder()->graph()->NewNode(
+ builder()->common()->Terminate(), effect, control);
+ builder()->exit_controls_.push_back(terminate);
+}
+
+
+bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
+ Node** state_values, int offset, int count) {
+ if (!builder()->info()->is_deoptimization_enabled()) {
+ return false;
+ }
+ if (*state_values == nullptr) {
+ return true;
+ }
+ DCHECK_EQ((*state_values)->InputCount(), count);
+ DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
+ Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+ for (int i = 0; i < count; i++) {
+ if ((*state_values)->InputAt(i) != env_values[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
+ int offset,
+ int count) {
+ if (StateValuesRequireUpdate(state_values, offset, count)) {
+ const Operator* op = common()->StateValues(count);
+ (*state_values) = graph()->NewNode(op, count, &values()->at(offset));
+ }
+}
+
+
+Node* BytecodeGraphBuilder::Environment::Checkpoint(
+ BailoutId bailout_id, OutputFrameStateCombine combine) {
+ if (!builder()->info()->is_deoptimization_enabled()) {
+ return builder()->jsgraph()->EmptyFrameState();
+ }
+
+ // TODO(rmcilroy): Consider using StateValuesCache for some state values.
+ UpdateStateValues(¶meters_state_values_, 0, parameter_count());
+ UpdateStateValues(®isters_state_values_, register_base(),
+ register_count());
+ UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
+
+ const Operator* op = common()->FrameState(
+ bailout_id, combine, builder()->frame_state_function_info());
+ Node* result = graph()->NewNode(
+ op, parameters_state_values_, registers_state_values_,
+ accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
+ builder()->graph()->start());
+
+ return result;
+}
+
+
+bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
+ Node** state_values, int offset, int count, int output_poke_start,
+ int output_poke_end) {
+ DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
+ for (int i = 0; i < count; i++, offset++) {
+ if (offset < output_poke_start || offset >= output_poke_end) {
+ if ((*state_values)->InputAt(i) != values()->at(offset)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
+ int output_poke_offset, int output_poke_count) {
+ // Poke offset is relative to the top of the stack (i.e., the accumulator).
+ int output_poke_start = accumulator_base() - output_poke_offset;
+ int output_poke_end = output_poke_start + output_poke_count;
+ return StateValuesAreUpToDate(¶meters_state_values_, 0, parameter_count(),
+ output_poke_start, output_poke_end) &&
+ StateValuesAreUpToDate(®isters_state_values_, register_base(),
+ register_count(), output_poke_start,
+ output_poke_end) &&
+ StateValuesAreUpToDate(&accumulator_state_values_, accumulator_base(),
+ 1, output_poke_start, output_poke_end);
+}
+
+
+BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
+ CompilationInfo* compilation_info,
+ JSGraph* jsgraph)
+ : local_zone_(local_zone),
+ info_(compilation_info),
+ jsgraph_(jsgraph),
+ bytecode_array_(handle(info()->shared_info()->bytecode_array())),
+ frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kInterpretedFunction,
+ bytecode_array()->parameter_count(),
+ bytecode_array()->register_count(), info()->shared_info(),
+ CALL_MAINTAINS_NATIVE_CONTEXT)),
+ merge_environments_(local_zone),
+ loop_header_environments_(local_zone),
+ input_buffer_size_(0),
+ input_buffer_(nullptr),
+ exit_controls_(local_zone) {}
+
+
+Node* BytecodeGraphBuilder::GetNewTarget() {
+ if (!new_target_.is_set()) {
+ int params = bytecode_array()->parameter_count();
+ int index = Linkage::GetJSCallNewTargetParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%new.target");
+ Node* node = NewNode(op, graph()->start());
+ new_target_.set(node);
+ }
+ return new_target_.get();
+}
+
+
+Node* BytecodeGraphBuilder::GetFunctionContext() {
+ if (!function_context_.is_set()) {
+ int params = bytecode_array()->parameter_count();
+ int index = Linkage::GetJSCallContextParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%context");
+ Node* node = NewNode(op, graph()->start());
+ function_context_.set(node);
+ }
+ return function_context_.get();
+}
+
+
+Node* BytecodeGraphBuilder::GetFunctionClosure() {
+ if (!function_closure_.is_set()) {
+ int index = Linkage::kJSCallClosureParamIndex;
+ const Operator* op = common()->Parameter(index, "%closure");
+ Node* node = NewNode(op, graph()->start());
+ function_closure_.set(node);
+ }
+ return function_closure_.get();
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
+ return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
+ jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadImmutableObjectField(Node* object,
+ int offset) {
+ return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
+ object,
+ jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
+ graph()->start(), graph()->start());
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
+ const Operator* op =
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
+ Node* native_context = NewNode(op, environment()->Context());
+ return NewNode(javascript()->LoadContext(0, index, true), native_context);
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
+ if (!feedback_vector_.is_set()) {
+ Node* closure = GetFunctionClosure();
+ Node* shared = BuildLoadImmutableObjectField(
+ closure, JSFunction::kSharedFunctionInfoOffset);
+ Node* vector = BuildLoadImmutableObjectField(
+ shared, SharedFunctionInfo::kFeedbackVectorOffset);
+ feedback_vector_.set(vector);
+ }
+ return feedback_vector_.get();
+}
+
+
+VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
+ Handle<TypeFeedbackVector> feedback_vector = info()->feedback_vector();
+ FeedbackVectorSlot slot;
+ if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
+ slot = feedback_vector->ToSlot(slot_id);
+ }
+ return VectorSlotPair(feedback_vector, slot);
+}
+
+
+bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
+ // Set up the basic structure of the graph. Outputs for {Start} are
+ // the formal parameters (including the receiver) plus context and
+ // closure.
+
+ // Set up the basic structure of the graph. Outputs for {Start} are the formal
+ // parameters (including the receiver) plus new target, number of arguments,
+ // context and closure.
+ int actual_parameter_count = bytecode_array()->parameter_count() + 4;
+ graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
+
+ Environment env(this, bytecode_array()->register_count(),
+ bytecode_array()->parameter_count(), graph()->start(),
+ GetFunctionContext());
+ set_environment(&env);
+
+ CreateGraphBody(stack_check);
+
+ // Finish the basic structure of the graph.
+ DCHECK_NE(0u, exit_controls_.size());
+ int const input_count = static_cast<int>(exit_controls_.size());
+ Node** const inputs = &exit_controls_.front();
+ Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
+ graph()->SetEnd(end);
+
+ return true;
+}
+
+
+void BytecodeGraphBuilder::CreateGraphBody(bool stack_check) {
+ // TODO(oth): Review ast-graph-builder equivalent, i.e. arguments
+ // object setup, this function variable if used, tracing hooks.
+
+ if (stack_check) {
+ Node* node = NewNode(javascript()->StackCheck());
+ PrepareEntryFrameState(node);
+ }
+
+ VisitBytecodes();
+}
+
+
+void BytecodeGraphBuilder::VisitBytecodes() {
+ BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
+ analysis.Analyze();
+ set_branch_analysis(&analysis);
+ interpreter::BytecodeArrayIterator iterator(bytecode_array());
+ set_bytecode_iterator(&iterator);
+ while (!iterator.done()) {
+ int current_offset = iterator.current_offset();
+ if (analysis.is_reachable(current_offset)) {
+ MergeEnvironmentsOfForwardBranches(current_offset);
+ BuildLoopHeaderForBackwardBranches(current_offset);
+
+ switch (iterator.current_bytecode()) {
+#define BYTECODE_CASE(name, ...) \
+ case interpreter::Bytecode::k##name: \
+ Visit##name(iterator); \
+ break;
+ BYTECODE_LIST(BYTECODE_CASE)
+#undef BYTECODE_CODE
+ }
+ }
+ iterator.Advance();
+ }
+ set_branch_analysis(nullptr);
+ set_bytecode_iterator(nullptr);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaZero(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->ZeroConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaSmi8(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->Constant(iterator.GetImmediateOperand(0));
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaUndefined(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->UndefinedConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaNull(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->NullConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaTheHole(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->TheHoleConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaTrue(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->TrueConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaFalse(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->FalseConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdar(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ environment()->BindAccumulator(value);
+}
+
+
+void BytecodeGraphBuilder::VisitStar(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* value = environment()->LookupAccumulator();
+ environment()->BindRegister(iterator.GetRegisterOperand(0), value);
+}
+
+
+void BytecodeGraphBuilder::VisitMov(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ environment()->BindRegister(iterator.GetRegisterOperand(1), value);
+}
+
+
+void BytecodeGraphBuilder::VisitExchange(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
+ iterator.GetRegisterOperand(1));
+}
+
+
+void BytecodeGraphBuilder::VisitExchangeWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
+ iterator.GetRegisterOperand(1));
+}
+
+
+void BytecodeGraphBuilder::BuildLoadGlobal(
+ const interpreter::BytecodeArrayIterator& iterator,
+ TypeofMode typeof_mode) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+
+ const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
+ Node* node = NewNode(op, BuildLoadFeedbackVector());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::BuildStoreGlobal(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+ Node* value = environment()->LookupAccumulator();
+
+ const Operator* op =
+ javascript()->StoreGlobal(language_mode(), name, feedback);
+ Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+ environment()->RecordAfterState(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitStaGlobalSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildStoreGlobal(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaGlobalStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildStoreGlobal(iterator);
+}
+
+void BytecodeGraphBuilder::VisitStaGlobalSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildStoreGlobal(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaGlobalStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildStoreGlobal(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaContextSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ // TODO(mythria): LoadContextSlots are unrolled by the required depth when
+ // generating bytecode. Hence the value of depth is always 0. Update this
+ // code, when the implementation changes.
+ // TODO(mythria): immutable flag is also set to false. This information is not
+ // available in bytecode array. update this code when the implementation
+ // changes.
+ const Operator* op =
+ javascript()->LoadContext(0, iterator.GetIndexOperand(1), false);
+ Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* node = NewNode(op, context);
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaContextSlotWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitLdaContextSlot(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaContextSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ // TODO(mythria): LoadContextSlots are unrolled by the required depth when
+ // generating bytecode. Hence the value of depth is always 0. Update this
+ // code, when the implementation changes.
+ const Operator* op =
+ javascript()->StoreContext(0, iterator.GetIndexOperand(1));
+ Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* value = environment()->LookupAccumulator();
+ NewNode(op, context, value);
+}
+
+
+void BytecodeGraphBuilder::VisitStaContextSlotWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitStaContextSlot(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildLdaLookupSlot(
+ TypeofMode typeof_mode,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Handle<String> name =
+ Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* value =
+ NewNode(op, BuildLoadFeedbackVector(), environment()->Context());
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaLookupSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildLdaLookupSlot(TypeofMode::NOT_INSIDE_TYPEOF, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF, iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildStaLookupSlot(
+ LanguageMode language_mode,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ Node* name = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+ Node* language = jsgraph()->Constant(language_mode);
+ const Operator* op = javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
+ Node* store = NewNode(op, value, environment()->Context(), name, language);
+ environment()->BindAccumulator(store, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaLookupSlotWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitLdaLookupSlot(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitLdaLookupSlotInsideTypeof(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildStaLookupSlot(LanguageMode::SLOPPY, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildStaLookupSlot(LanguageMode::STRICT, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitStaLookupSlotSloppy(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitStaLookupSlotStrict(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildNamedLoad(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+
+ const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
+ Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildNamedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildNamedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildKeyedLoad(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* key = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+
+ const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
+ Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildNamedStore(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+
+ const Operator* op =
+ javascript()->StoreNamed(language_mode(), name, feedback);
+ Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+ environment()->RecordAfterState(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitStoreICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStoreICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildNamedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStoreICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStoreICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildNamedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildKeyedStore(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* key = environment()->LookupRegister(iterator.GetRegisterOperand(1));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+
+ const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
+ Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+ environment()->RecordAfterState(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitPushContext(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* context = environment()->LookupAccumulator();
+ environment()->BindRegister(iterator.GetRegisterOperand(0), context);
+ environment()->SetContext(context);
+}
+
+
+void BytecodeGraphBuilder::VisitPopContext(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ environment()->SetContext(context);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateClosure(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Handle<SharedFunctionInfo> shared_info =
+ Handle<SharedFunctionInfo>::cast(iterator.GetConstantForIndexOperand(0));
+ PretenureFlag tenured =
+ iterator.GetImmediateOperand(1) ? TENURED : NOT_TENURED;
+ const Operator* op = javascript()->CreateClosure(shared_info, tenured);
+ Node* closure = NewNode(op);
+ environment()->BindAccumulator(closure);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateClosureWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitCreateClosure(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateArguments(
+ CreateArgumentsParameters::Type type,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ const Operator* op = javascript()->CreateArguments(type, 0);
+ Node* object = NewNode(op, GetFunctionClosure());
+ environment()->BindAccumulator(object, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateMappedArguments(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateArguments(CreateArgumentsParameters::kMappedArguments, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateUnmappedArguments(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateArguments(CreateArgumentsParameters::kUnmappedArguments, iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateLiteral(
+ const Operator* op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* literal = NewNode(op, GetFunctionClosure());
+ environment()->BindAccumulator(literal, &states);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateRegExpLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Handle<String> constant_pattern =
+ Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
+ int literal_index = iterator.GetIndexOperand(1);
+ int literal_flags = iterator.GetImmediateOperand(2);
+ const Operator* op = javascript()->CreateLiteralRegExp(
+ constant_pattern, literal_flags, literal_index);
+ BuildCreateLiteral(op, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateRegExpLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateRegExpLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateRegExpLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateArrayLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Handle<FixedArray> constant_elements =
+ Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
+ int literal_index = iterator.GetIndexOperand(1);
+ int literal_flags = iterator.GetImmediateOperand(2);
+ const Operator* op = javascript()->CreateLiteralArray(
+ constant_elements, literal_flags, literal_index);
+ BuildCreateLiteral(op, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateArrayLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateArrayLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateArrayLiteralWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateArrayLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateObjectLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Handle<FixedArray> constant_properties =
+ Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
+ int literal_index = iterator.GetIndexOperand(1);
+ int literal_flags = iterator.GetImmediateOperand(2);
+ const Operator* op = javascript()->CreateLiteralObject(
+ constant_properties, literal_flags, literal_index);
+ BuildCreateLiteral(op, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateObjectLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateObjectLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateObjectLiteralWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateObjectLiteral(iterator);
+}
+
+
+Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
+ Node* callee,
+ interpreter::Register receiver,
+ size_t arity) {
+ Node** all = info()->zone()->NewArray<Node*>(static_cast<int>(arity));
+ all[0] = callee;
+ all[1] = environment()->LookupRegister(receiver);
+ int receiver_index = receiver.index();
+ for (int i = 2; i < static_cast<int>(arity); ++i) {
+ all[i] = environment()->LookupRegister(
+ interpreter::Register(receiver_index + i - 1));
+ }
+ Node* value = MakeNode(call_op, static_cast<int>(arity), all, false);
+ return value;
+}
+
+
+void BytecodeGraphBuilder::BuildCall(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ // TODO(rmcilroy): Set receiver_hint correctly based on whether the receiver
+ // register has been loaded with null / undefined explicitly or we are sure it
+ // is not null / undefined.
+ ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
+ Node* callee = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ interpreter::Register receiver = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(3));
+
+ const Operator* call = javascript()->CallFunction(
+ arg_count + 2, language_mode(), feedback, receiver_hint);
+ Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitCall(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCall(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCallWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCall(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCallJSRuntime(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* callee = BuildLoadNativeContextField(iterator.GetIndexOperand(0));
+ interpreter::Register receiver = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+
+ // Create node to perform the JS runtime call.
+ const Operator* call =
+ javascript()->CallFunction(arg_count + 2, language_mode());
+ Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
+ const Operator* call_runtime_op, interpreter::Register first_arg,
+ size_t arity) {
+ Node** all = info()->zone()->NewArray<Node*>(arity);
+ int first_arg_index = first_arg.index();
+ for (int i = 0; i < static_cast<int>(arity); ++i) {
+ all[i] = environment()->LookupRegister(
+ interpreter::Register(first_arg_index + i));
+ }
+ Node* value = MakeNode(call_runtime_op, static_cast<int>(arity), all, false);
+ return value;
+}
+
+
+void BytecodeGraphBuilder::VisitCallRuntime(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Runtime::FunctionId functionId =
+ static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
+ interpreter::Register first_arg = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+
+ // Create node to perform the runtime call.
+ const Operator* call = javascript()->CallRuntime(functionId, arg_count);
+ Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitCallRuntimeForPair(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Runtime::FunctionId functionId =
+ static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
+ interpreter::Register first_arg = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+ interpreter::Register first_return = iterator.GetRegisterOperand(3);
+
+ // Create node to perform the runtime call.
+ const Operator* call = javascript()->CallRuntime(functionId, arg_count);
+ Node* return_pair = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+ environment()->BindRegistersToProjections(first_return, return_pair, &states);
+}
+
+
+Node* BytecodeGraphBuilder::ProcessCallNewArguments(
+ const Operator* call_new_op, interpreter::Register callee,
+ interpreter::Register first_arg, size_t arity) {
+ Node** all = info()->zone()->NewArray<Node*>(arity);
+ all[0] = environment()->LookupRegister(callee);
+ int first_arg_index = first_arg.index();
+ for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
+ all[i] = environment()->LookupRegister(
+ interpreter::Register(first_arg_index + i - 1));
+ }
+ // Original constructor is the same as the callee.
+ all[arity - 1] = environment()->LookupRegister(callee);
+ Node* value = MakeNode(call_new_op, static_cast<int>(arity), all, false);
+ return value;
+}
+
+
+void BytecodeGraphBuilder::VisitNew(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ interpreter::Register callee = iterator.GetRegisterOperand(0);
+ interpreter::Register first_arg = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+
+ // TODO(turbofan): Pass the feedback here.
+ const Operator* call = javascript()->CallConstruct(
+ static_cast<int>(arg_count) + 2, VectorSlotPair());
+ Node* value = ProcessCallNewArguments(call, callee, first_arg, arg_count + 2);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitThrow(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ // TODO(mythria): Change to Runtime::kThrow when we have deoptimization
+ // information support in the interpreter.
+ NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), value);
+ Node* control = NewNode(common()->Throw(), value);
+ environment()->RecordAfterState(control, &states);
+ UpdateControlDependencyToLeaveFunction(control);
+}
+
+
+void BytecodeGraphBuilder::BuildBinaryOp(
+ const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* right = environment()->LookupAccumulator();
+ Node* node = NewNode(js_op, left, right);
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitAdd(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Add(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitSub(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Subtract(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitMul(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Multiply(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitDiv(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Divide(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitMod(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Modulus(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitBitwiseOr(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->BitwiseOr(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitBitwiseXor(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->BitwiseXor(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitBitwiseAnd(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->BitwiseAnd(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitShiftLeft(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->ShiftLeft(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitShiftRight(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->ShiftRight(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitShiftRightLogical(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->ShiftRightLogical(language_mode(), hints),
+ iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitInc(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ const Operator* js_op =
+ javascript()->Add(language_mode(), BinaryOperationHints::Any());
+ Node* node = NewNode(js_op, environment()->LookupAccumulator(),
+ jsgraph()->OneConstant());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitDec(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ const Operator* js_op =
+ javascript()->Subtract(language_mode(), BinaryOperationHints::Any());
+ Node* node = NewNode(js_op, environment()->LookupAccumulator(),
+ jsgraph()->OneConstant());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLogicalNot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ environment()->LookupAccumulator());
+ Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitTypeOf(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node =
+ NewNode(javascript()->TypeOf(), environment()->LookupAccumulator());
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::BuildDelete(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* key = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* node =
+ NewNode(javascript()->DeleteProperty(language_mode()), object, key);
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitDeletePropertyStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildDelete(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitDeletePropertySloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildDelete(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitDeleteLookupSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* name = environment()->LookupAccumulator();
+ const Operator* op = javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ Node* result = NewNode(op, environment()->Context(), name);
+ environment()->BindAccumulator(result, &states);
+}
+
+
+void BytecodeGraphBuilder::BuildCompareOp(
+ const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* right = environment()->LookupAccumulator();
+ Node* node = NewNode(js_op, left, right);
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitTestEqual(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCompareOp(javascript()->Equal(), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitTestNotEqual(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCompareOp(javascript()->NotEqual(), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitTestEqualStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCompareOp(javascript()->StrictEqual(), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitTestNotEqualStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCompareOp(javascript()->StrictNotEqual(), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitTestLessThan(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCompareOp(javascript()->LessThan(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitTestGreaterThan(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCompareOp(javascript()->GreaterThan(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitTestLessThanOrEqual(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCompareOp(javascript()->LessThanOrEqual(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCompareOp(javascript()->GreaterThanOrEqual(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitTestIn(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCompareOp(javascript()->HasProperty(), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitTestInstanceOf(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCompareOp(javascript()->InstanceOf(), iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCastOperator(
+ const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* node = NewNode(js_op, environment()->LookupAccumulator());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitToName(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCastOperator(javascript()->ToName(), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitToObject(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCastOperator(javascript()->ToObject(), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitToNumber(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCastOperator(javascript()->ToNumber(), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitJump(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJump();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJump();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJump();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfTrue(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfTrueConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfFalse(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfFalseConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfNull(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->NullConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfNullConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->NullConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfNullConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->NullConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfUndefined(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->UndefinedConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->UndefinedConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->UndefinedConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitReturn(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* control =
+ NewNode(common()->Return(), environment()->LookupAccumulator());
+ UpdateControlDependencyToLeaveFunction(control);
+ set_environment(nullptr);
+}
+
+
+void BytecodeGraphBuilder::VisitForInPrepare(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* prepare = nullptr;
+ {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* receiver = environment()->LookupAccumulator();
+ prepare = NewNode(javascript()->ForInPrepare(), receiver);
+ environment()->RecordAfterState(prepare, &states);
+ }
+ // Project cache_type, cache_array, cache_length into register
+ // operands 1, 2, 3.
+ for (int i = 0; i < 3; i++) {
+ environment()->BindRegister(iterator.GetRegisterOperand(i),
+ NewNode(common()->Projection(i), prepare));
+ }
+}
+
+
+void BytecodeGraphBuilder::VisitForInDone(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* cache_length =
+ environment()->LookupRegister(iterator.GetRegisterOperand(1));
+ Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
+ environment()->BindAccumulator(exit_cond, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitForInNext(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* receiver =
+ environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* cache_type =
+ environment()->LookupRegister(iterator.GetRegisterOperand(1));
+ Node* cache_array =
+ environment()->LookupRegister(iterator.GetRegisterOperand(2));
+ Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(3));
+ Node* value = NewNode(javascript()->ForInNext(), receiver, cache_array,
+ cache_type, index);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitForInStep(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ index = NewNode(javascript()->ForInStep(), index);
+ environment()->BindAccumulator(index, &states);
+}
+
+
+void BytecodeGraphBuilder::MergeEnvironmentsOfBackwardBranches(
+ int source_offset, int target_offset) {
+ DCHECK_GE(source_offset, target_offset);
+ const ZoneVector<int>* branch_sites =
+ branch_analysis()->BackwardBranchesTargetting(target_offset);
+ if (branch_sites->back() == source_offset) {
+ // The set of back branches is complete, merge them.
+ DCHECK_GE(branch_sites->at(0), target_offset);
+ Environment* merged = merge_environments_[branch_sites->at(0)];
+ for (size_t i = 1; i < branch_sites->size(); i++) {
+ DCHECK_GE(branch_sites->at(i), target_offset);
+ merged->Merge(merge_environments_[branch_sites->at(i)]);
+ }
+ // And now merge with loop header environment created when loop
+ // header was visited.
+ loop_header_environments_[target_offset]->Merge(merged);
+ }
+}
+
+
+void BytecodeGraphBuilder::MergeEnvironmentsOfForwardBranches(
+ int source_offset) {
+ if (branch_analysis()->forward_branches_target(source_offset)) {
+ // Merge environments of branches that reach this bytecode.
+ auto branch_sites =
+ branch_analysis()->ForwardBranchesTargetting(source_offset);
+ DCHECK_LT(branch_sites->at(0), source_offset);
+ Environment* merged = merge_environments_[branch_sites->at(0)];
+ for (size_t i = 1; i < branch_sites->size(); i++) {
+ DCHECK_LT(branch_sites->at(i), source_offset);
+ merged->Merge(merge_environments_[branch_sites->at(i)]);
+ }
+ if (environment()) {
+ merged->Merge(environment());
+ }
+ set_environment(merged);
+ }
+}
+
+
+void BytecodeGraphBuilder::BuildLoopHeaderForBackwardBranches(
+ int source_offset) {
+ if (branch_analysis()->backward_branches_target(source_offset)) {
+ // Add loop header and store a copy so we can connect merged back
+ // edge inputs to the loop header.
+ loop_header_environments_[source_offset] = environment()->CopyForLoop();
+ }
+}
+
+
+void BytecodeGraphBuilder::BuildJump(int source_offset, int target_offset) {
+ DCHECK_NULL(merge_environments_[source_offset]);
+ merge_environments_[source_offset] = environment();
+ if (source_offset >= target_offset) {
+ MergeEnvironmentsOfBackwardBranches(source_offset, target_offset);
+ }
+ set_environment(nullptr);
+}
+
+
+void BytecodeGraphBuilder::BuildJump() {
+ int source_offset = bytecode_iterator()->current_offset();
+ int target_offset = bytecode_iterator()->GetJumpTargetOffset();
+ BuildJump(source_offset, target_offset);
+}
+
+
+void BytecodeGraphBuilder::BuildConditionalJump(Node* condition) {
+ int source_offset = bytecode_iterator()->current_offset();
+ NewBranch(condition);
+ Environment* if_false_environment = environment()->CopyForConditional();
+ NewIfTrue();
+ BuildJump(source_offset, bytecode_iterator()->GetJumpTargetOffset());
+ set_environment(if_false_environment);
+ NewIfFalse();
+}
+
+
+void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* condition =
+ NewNode(javascript()->StrictEqual(), accumulator, comperand);
+ BuildConditionalJump(condition);
+}
+
+
+void BytecodeGraphBuilder::BuildJumpIfToBooleanEqual(Node* comperand) {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* to_boolean =
+ NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
+ Node* condition = NewNode(javascript()->StrictEqual(), to_boolean, comperand);
+ BuildConditionalJump(condition);
+}
+
+
+Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
+ if (size > input_buffer_size_) {
+ size = size + kInputBufferSizeIncrement + input_buffer_size_;
+ input_buffer_ = local_zone()->NewArray<Node*>(size);
+ input_buffer_size_ = size;
+ }
+ return input_buffer_;
+}
+
+
+void BytecodeGraphBuilder::PrepareEntryFrameState(Node* node) {
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ NodeProperties::ReplaceFrameStateInput(
+ node, 0, environment()->Checkpoint(BailoutId(0),
+ OutputFrameStateCombine::Ignore()));
+}
+
+
+Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
+ Node** value_inputs, bool incomplete) {
+ DCHECK_EQ(op->ValueInputCount(), value_input_count);
+
+ bool has_context = OperatorProperties::HasContextInput(op);
+ int frame_state_count = OperatorProperties::GetFrameStateInputCount(op);
+ bool has_control = op->ControlInputCount() == 1;
+ bool has_effect = op->EffectInputCount() == 1;
+
+ DCHECK_LT(op->ControlInputCount(), 2);
+ DCHECK_LT(op->EffectInputCount(), 2);
+
+ Node* result = nullptr;
+ if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
+ result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
+ } else {
+ int input_count_with_deps = value_input_count;
+ if (has_context) ++input_count_with_deps;
+ input_count_with_deps += frame_state_count;
+ if (has_control) ++input_count_with_deps;
+ if (has_effect) ++input_count_with_deps;
+ Node** buffer = EnsureInputBufferSize(input_count_with_deps);
+ memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+ Node** current_input = buffer + value_input_count;
+ if (has_context) {
+ *current_input++ = environment()->Context();
+ }
+ for (int i = 0; i < frame_state_count; i++) {
+ // The frame state will be inserted later. Here we misuse
+ // the {Dead} node as a sentinel to be later overwritten
+ // with the real frame state.
+ *current_input++ = jsgraph()->Dead();
+ }
+ if (has_effect) {
+ *current_input++ = environment()->GetEffectDependency();
+ }
+ if (has_control) {
+ *current_input++ = environment()->GetControlDependency();
+ }
+ result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
+ if (!environment()->IsMarkedAsUnreachable()) {
+ // Update the current control dependency for control-producing nodes.
+ if (NodeProperties::IsControl(result)) {
+ environment()->UpdateControlDependency(result);
+ }
+ // Update the current effect dependency for effect-producing nodes.
+ if (result->op()->EffectOutputCount() > 0) {
+ environment()->UpdateEffectDependency(result);
+ }
+ // Add implicit success continuation for throwing nodes.
+ if (!result->op()->HasProperty(Operator::kNoThrow)) {
+ const Operator* if_success = common()->IfSuccess();
+ Node* on_success = graph()->NewNode(if_success, result);
+ environment_->UpdateControlDependency(on_success);
+ }
+ }
+ }
+
+ return result;
+}
+
+
+Node* BytecodeGraphBuilder::NewPhi(int count, Node* input, Node* control) {
+ const Operator* phi_op = common()->Phi(MachineRepresentation::kTagged, count);
+ Node** buffer = EnsureInputBufferSize(count + 1);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer, true);
+}
+
+
+Node* BytecodeGraphBuilder::NewEffectPhi(int count, Node* input,
+ Node* control) {
+ const Operator* phi_op = common()->EffectPhi(count);
+ Node** buffer = EnsureInputBufferSize(count + 1);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer, true);
+}
+
+
+Node* BytecodeGraphBuilder::MergeControl(Node* control, Node* other) {
+ int inputs = control->op()->ControlInputCount() + 1;
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Control node for loop exists, add input.
+ const Operator* op = common()->Loop(inputs);
+ control->AppendInput(graph_zone(), other);
+ NodeProperties::ChangeOp(control, op);
+ } else if (control->opcode() == IrOpcode::kMerge) {
+ // Control node for merge exists, add input.
+ const Operator* op = common()->Merge(inputs);
+ control->AppendInput(graph_zone(), other);
+ NodeProperties::ChangeOp(control, op);
+ } else {
+ // Control node is a singleton, introduce a merge.
+ const Operator* op = common()->Merge(inputs);
+ Node* merge_inputs[] = {control, other};
+ control = graph()->NewNode(op, arraysize(merge_inputs), merge_inputs, true);
+ }
+ return control;
+}
+
+
+Node* BytecodeGraphBuilder::MergeEffect(Node* value, Node* other,
+ Node* control) {
+ int inputs = control->op()->ControlInputCount();
+ if (value->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->InsertInput(graph_zone(), inputs - 1, other);
+ NodeProperties::ChangeOp(value, common()->EffectPhi(inputs));
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewEffectPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
+Node* BytecodeGraphBuilder::MergeValue(Node* value, Node* other,
+ Node* control) {
+ int inputs = control->op()->ControlInputCount();
+ if (value->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->InsertInput(graph_zone(), inputs - 1, other);
+ NodeProperties::ChangeOp(
+ value, common()->Phi(MachineRepresentation::kTagged, inputs));
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
+void BytecodeGraphBuilder::UpdateControlDependencyToLeaveFunction(Node* exit) {
+ if (environment()->IsMarkedAsUnreachable()) return;
+ environment()->MarkAsUnreachable();
+ exit_controls_.push_back(exit);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
new file mode 100644
index 0000000..94a278c
--- /dev/null
+++ b/src/compiler/bytecode-graph-builder.h
@@ -0,0 +1,344 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
+#define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
+
+#include "src/compiler.h"
+#include "src/compiler/bytecode-branch-analysis.h"
+#include "src/compiler/js-graph.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The BytecodeGraphBuilder produces a high-level IR graph based on
+// interpreter bytecodes.
+class BytecodeGraphBuilder {
+ public:
+ BytecodeGraphBuilder(Zone* local_zone, CompilationInfo* info,
+ JSGraph* jsgraph);
+
+ // Creates a graph by visiting bytecodes.
+ bool CreateGraph(bool stack_check = true);
+
+ Graph* graph() const { return jsgraph_->graph(); }
+
+ private:
+ class Environment;
+ class FrameStateBeforeAndAfter;
+
+ void CreateGraphBody(bool stack_check);
+ void VisitBytecodes();
+
+ Node* LoadAccumulator(Node* value);
+
+ // Get or create the node that represents the outer function closure.
+ Node* GetFunctionClosure();
+
+ // Get or create the node that represents the outer function context.
+ Node* GetFunctionContext();
+
+ // Get or create the node that represents the incoming new target value.
+ Node* GetNewTarget();
+
+ // Builder for accessing a (potentially immutable) object field.
+ Node* BuildLoadObjectField(Node* object, int offset);
+ Node* BuildLoadImmutableObjectField(Node* object, int offset);
+
+ // Builder for accessing type feedback vector.
+ Node* BuildLoadFeedbackVector();
+
+ // Builder for loading the a native context field.
+ Node* BuildLoadNativeContextField(int index);
+
+ // Helper function for creating a pair containing type feedback vector and
+ // a feedback slot.
+ VectorSlotPair CreateVectorSlotPair(int slot_id);
+
+ void set_environment(Environment* env) { environment_ = env; }
+ const Environment* environment() const { return environment_; }
+ Environment* environment() { return environment_; }
+
+ // Node creation helpers
+ Node* NewNode(const Operator* op, bool incomplete = false) {
+ return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1) {
+ Node* buffer[] = {n1};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+ Node* buffer[] = {n1, n2};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* buffer[] = {n1, n2, n3};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* buffer[] = {n1, n2, n3, n4};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ // Helpers to create new control nodes.
+ Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+ Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+ Node* NewMerge() { return NewNode(common()->Merge(1), true); }
+ Node* NewLoop() { return NewNode(common()->Loop(1), true); }
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+ return NewNode(common()->Branch(hint), condition);
+ }
+
+ // Creates a new Phi node having {count} input values.
+ Node* NewPhi(int count, Node* input, Node* control);
+ Node* NewEffectPhi(int count, Node* input, Node* control);
+
+ // Helpers for merging control, effect or value dependencies.
+ Node* MergeControl(Node* control, Node* other);
+ Node* MergeEffect(Node* effect, Node* other_effect, Node* control);
+ Node* MergeValue(Node* value, Node* other_value, Node* control);
+
+ // The main node creation chokepoint. Adds context, frame state, effect,
+ // and control dependencies depending on the operator.
+ Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
+ bool incomplete);
+
+ // Helper to indicate a node exits the function body.
+ void UpdateControlDependencyToLeaveFunction(Node* exit);
+
+ Node** EnsureInputBufferSize(int size);
+
+ Node* ProcessCallArguments(const Operator* call_op, Node* callee,
+ interpreter::Register receiver, size_t arity);
+ Node* ProcessCallNewArguments(const Operator* call_new_op,
+ interpreter::Register callee,
+ interpreter::Register first_arg, size_t arity);
+ Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
+ interpreter::Register first_arg,
+ size_t arity);
+
+ void BuildCreateLiteral(const Operator* op,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateRegExpLiteral(
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateArrayLiteral(
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateObjectLiteral(
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateArguments(CreateArgumentsParameters::Type type,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildLoadGlobal(const interpreter::BytecodeArrayIterator& iterator,
+ TypeofMode typeof_mode);
+ void BuildStoreGlobal(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildNamedLoad(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildKeyedLoad(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildNamedStore(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildKeyedStore(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildLdaLookupSlot(TypeofMode typeof_mode,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildStaLookupSlot(LanguageMode language_mode,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCall(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildBinaryOp(const Operator* op,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCompareOp(const Operator* op,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildDelete(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCastOperator(const Operator* js_op,
+ const interpreter::BytecodeArrayIterator& iterator);
+
+ // Control flow plumbing.
+ void BuildJump(int source_offset, int target_offset);
+ void BuildJump();
+ void BuildConditionalJump(Node* condition);
+ void BuildJumpIfEqual(Node* comperand);
+ void BuildJumpIfToBooleanEqual(Node* boolean_comperand);
+
+ // Constructing merge and loop headers.
+ void MergeEnvironmentsOfBackwardBranches(int source_offset,
+ int target_offset);
+ void MergeEnvironmentsOfForwardBranches(int source_offset);
+ void BuildLoopHeaderForBackwardBranches(int source_offset);
+
+ // Attaches a frame state to |node| for the entry to the function.
+ void PrepareEntryFrameState(Node* node);
+
+ // Growth increment for the temporary buffer used to construct input lists to
+ // new nodes.
+ static const int kInputBufferSizeIncrement = 64;
+
+ // Field accessors
+ CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+ Zone* graph_zone() const { return graph()->zone(); }
+ CompilationInfo* info() const { return info_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
+ Zone* local_zone() const { return local_zone_; }
+ const Handle<BytecodeArray>& bytecode_array() const {
+ return bytecode_array_;
+ }
+ const FrameStateFunctionInfo* frame_state_function_info() const {
+ return frame_state_function_info_;
+ }
+
+ LanguageMode language_mode() const {
+ // TODO(mythria): Don't rely on parse information to get language mode.
+ return info()->language_mode();
+ }
+
+ const interpreter::BytecodeArrayIterator* bytecode_iterator() const {
+ return bytecode_iterator_;
+ }
+
+ void set_bytecode_iterator(
+ const interpreter::BytecodeArrayIterator* bytecode_iterator) {
+ bytecode_iterator_ = bytecode_iterator;
+ }
+
+ const BytecodeBranchAnalysis* branch_analysis() const {
+ return branch_analysis_;
+ }
+
+ void set_branch_analysis(const BytecodeBranchAnalysis* branch_analysis) {
+ branch_analysis_ = branch_analysis;
+ }
+
+#define DECLARE_VISIT_BYTECODE(name, ...) \
+ void Visit##name(const interpreter::BytecodeArrayIterator& iterator);
+ BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
+#undef DECLARE_VISIT_BYTECODE
+
+ Zone* local_zone_;
+ CompilationInfo* info_;
+ JSGraph* jsgraph_;
+ Handle<BytecodeArray> bytecode_array_;
+ const FrameStateFunctionInfo* frame_state_function_info_;
+ const interpreter::BytecodeArrayIterator* bytecode_iterator_;
+ const BytecodeBranchAnalysis* branch_analysis_;
+ Environment* environment_;
+
+
+ // Merge environments are snapshots of the environment at a particular
+ // bytecode offset to be merged into a later environment.
+ ZoneMap<int, Environment*> merge_environments_;
+
+ // Loop header environments are environments created for bytecodes
+ // where it is known there are back branches, ie a loop header.
+ ZoneMap<int, Environment*> loop_header_environments_;
+
+ // Temporary storage for building node input lists.
+ int input_buffer_size_;
+ Node** input_buffer_;
+
+ // Nodes representing values in the activation record.
+ SetOncePointer<Node> function_context_;
+ SetOncePointer<Node> function_closure_;
+ SetOncePointer<Node> new_target_;
+
+ // Optimization to cache loaded feedback vector.
+ SetOncePointer<Node> feedback_vector_;
+
+ // Control nodes that exit the function body.
+ ZoneVector<Node*> exit_controls_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
+};
+
+
+class BytecodeGraphBuilder::Environment : public ZoneObject {
+ public:
+ Environment(BytecodeGraphBuilder* builder, int register_count,
+ int parameter_count, Node* control_dependency, Node* context);
+
+ int parameter_count() const { return parameter_count_; }
+ int register_count() const { return register_count_; }
+
+ Node* LookupAccumulator() const;
+ Node* LookupRegister(interpreter::Register the_register) const;
+
+ void ExchangeRegisters(interpreter::Register reg0,
+ interpreter::Register reg1);
+
+ void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
+ void BindRegister(interpreter::Register the_register, Node* node,
+ FrameStateBeforeAndAfter* states = nullptr);
+ void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
+ FrameStateBeforeAndAfter* states = nullptr);
+ void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
+
+ bool IsMarkedAsUnreachable() const;
+ void MarkAsUnreachable();
+
+ // Effect dependency tracked by this environment.
+ Node* GetEffectDependency() { return effect_dependency_; }
+ void UpdateEffectDependency(Node* dependency) {
+ effect_dependency_ = dependency;
+ }
+
+ // Preserve a checkpoint of the environment for the IR graph. Any
+ // further mutation of the environment will not affect checkpoints.
+ Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
+
+ // Returns true if the state values are up to date with the current
+ // environment.
+ bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
+
+ // Control dependency tracked by this environment.
+ Node* GetControlDependency() const { return control_dependency_; }
+ void UpdateControlDependency(Node* dependency) {
+ control_dependency_ = dependency;
+ }
+
+ Node* Context() const { return context_; }
+ void SetContext(Node* new_context) { context_ = new_context; }
+
+ Environment* CopyForConditional() const;
+ Environment* CopyForLoop();
+ void Merge(Environment* other);
+
+ private:
+ explicit Environment(const Environment* copy);
+ void PrepareForLoop();
+ bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
+ int output_poke_start, int output_poke_end);
+ bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
+ void UpdateStateValues(Node** state_values, int offset, int count);
+
+ int RegisterToValuesIndex(interpreter::Register the_register) const;
+
+ Zone* zone() const { return builder_->local_zone(); }
+ Graph* graph() const { return builder_->graph(); }
+ CommonOperatorBuilder* common() const { return builder_->common(); }
+ BytecodeGraphBuilder* builder() const { return builder_; }
+ const NodeVector* values() const { return &values_; }
+ NodeVector* values() { return &values_; }
+ int register_base() const { return register_base_; }
+ int accumulator_base() const { return accumulator_base_; }
+
+ BytecodeGraphBuilder* builder_;
+ int register_count_;
+ int parameter_count_;
+ Node* context_;
+ Node* control_dependency_;
+ Node* effect_dependency_;
+ NodeVector values_;
+ Node* parameters_state_values_;
+ Node* registers_state_values_;
+ Node* accumulator_state_values_;
+ int register_base_;
+ int accumulator_base_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
diff --git a/src/compiler/c-linkage.cc b/src/compiler/c-linkage.cc
new file mode 100644
index 0000000..44e0bf1
--- /dev/null
+++ b/src/compiler/c-linkage.cc
@@ -0,0 +1,229 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+
+#include "src/compiler/linkage.h"
+
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+LinkageLocation regloc(Register reg) {
+ return LinkageLocation::ForRegister(reg.code());
+}
+
+
+// Platform-specific configuration for C calling convention.
+#if V8_TARGET_ARCH_IA32
+// ===========================================================================
+// == ia32 ===================================================================
+// ===========================================================================
+#define CALLEE_SAVE_REGISTERS esi.bit() | edi.bit() | ebx.bit()
+
+#elif V8_TARGET_ARCH_X64
+// ===========================================================================
+// == x64 ====================================================================
+// ===========================================================================
+
+#ifdef _WIN64
+// == x64 windows ============================================================
+#define STACK_SHADOW_WORDS 4
+#define PARAM_REGISTERS rcx, rdx, r8, r9
+#define CALLEE_SAVE_REGISTERS \
+ rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() | r14.bit() | \
+ r15.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ (1 << xmm6.code()) | (1 << xmm7.code()) | (1 << xmm8.code()) | \
+ (1 << xmm9.code()) | (1 << xmm10.code()) | (1 << xmm11.code()) | \
+ (1 << xmm12.code()) | (1 << xmm13.code()) | (1 << xmm14.code()) | \
+ (1 << xmm15.code())
+#else
+// == x64 other ==============================================================
+#define PARAM_REGISTERS rdi, rsi, rdx, rcx, r8, r9
+#define CALLEE_SAVE_REGISTERS \
+ rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
+#endif
+
+#elif V8_TARGET_ARCH_X87
+// ===========================================================================
+// == x87 ====================================================================
+// ===========================================================================
+#define CALLEE_SAVE_REGISTERS esi.bit() | edi.bit() | ebx.bit()
+
+#elif V8_TARGET_ARCH_ARM
+// ===========================================================================
+// == arm ====================================================================
+// ===========================================================================
+#define PARAM_REGISTERS r0, r1, r2, r3
+#define CALLEE_SAVE_REGISTERS \
+ r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ (1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) | \
+ (1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) | \
+ (1 << d14.code()) | (1 << d15.code())
+
+
+#elif V8_TARGET_ARCH_ARM64
+// ===========================================================================
+// == arm64 ====================================================================
+// ===========================================================================
+#define PARAM_REGISTERS x0, x1, x2, x3, x4, x5, x6, x7
+#define CALLEE_SAVE_REGISTERS \
+ (1 << x19.code()) | (1 << x20.code()) | (1 << x21.code()) | \
+ (1 << x22.code()) | (1 << x23.code()) | (1 << x24.code()) | \
+ (1 << x25.code()) | (1 << x26.code()) | (1 << x27.code()) | \
+ (1 << x28.code()) | (1 << x29.code()) | (1 << x30.code())
+
+
+#define CALLEE_SAVE_FP_REGISTERS \
+ (1 << d8.code()) | (1 << d9.code()) | (1 << d10.code()) | \
+ (1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) | \
+ (1 << d14.code()) | (1 << d15.code())
+
+#elif V8_TARGET_ARCH_MIPS
+// ===========================================================================
+// == mips ===================================================================
+// ===========================================================================
+#define PARAM_REGISTERS a0, a1, a2, a3
+#define CALLEE_SAVE_REGISTERS \
+ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
+ s7.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
+
+#elif V8_TARGET_ARCH_MIPS64
+// ===========================================================================
+// == mips64 =================================================================
+// ===========================================================================
+#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+#define CALLEE_SAVE_REGISTERS \
+ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
+ s7.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
+
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+// ===========================================================================
+// == ppc & ppc64 ============================================================
+// ===========================================================================
+#define PARAM_REGISTERS r3, r4, r5, r6, r7, r8, r9, r10
+#define CALLEE_SAVE_REGISTERS \
+ r14.bit() | r15.bit() | r16.bit() | r17.bit() | r18.bit() | r19.bit() | \
+ r20.bit() | r21.bit() | r22.bit() | r23.bit() | r24.bit() | r25.bit() | \
+ r26.bit() | r27.bit() | r28.bit() | r29.bit() | r30.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ d14.bit() | d15.bit() | d16.bit() | d17.bit() | d18.bit() | d19.bit() | \
+ d20.bit() | d21.bit() | d22.bit() | d23.bit() | d24.bit() | d25.bit() | \
+ d26.bit() | d27.bit() | d28.bit() | d29.bit() | d30.bit() | d31.bit()
+
+#else
+// ===========================================================================
+// == unknown ================================================================
+// ===========================================================================
+#define UNSUPPORTED_C_LINKAGE 1
+#endif
+} // namespace
+
+
+// General code uses the above configuration data.
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+ Zone* zone, const MachineSignature* msig) {
+ LocationSignature::Builder locations(zone, msig->return_count(),
+ msig->parameter_count());
+#if 0 // TODO(titzer): instruction selector tests break here.
+ // Check the types of the signature.
+ // Currently no floating point parameters or returns are allowed because
+ // on x87 and ia32, the FP top of stack is involved.
+
+ for (size_t i = 0; i < msig->return_count(); i++) {
+ MachineType type = RepresentationOf(msig->GetReturn(i));
+ CHECK(type != kRepFloat32 && type != kRepFloat64);
+ }
+ for (size_t i = 0; i < msig->parameter_count(); i++) {
+ MachineType type = RepresentationOf(msig->GetParam(i));
+ CHECK(type != kRepFloat32 && type != kRepFloat64);
+ }
+#endif
+
+#ifdef UNSUPPORTED_C_LINKAGE
+ // This method should not be called on unknown architectures.
+ V8_Fatal(__FILE__, __LINE__,
+ "requested C call descriptor on unsupported architecture");
+ return nullptr;
+#endif
+
+ // Add return location(s).
+ CHECK(locations.return_count_ <= 2);
+
+ if (locations.return_count_ > 0) {
+ locations.AddReturn(regloc(kReturnRegister0));
+ }
+ if (locations.return_count_ > 1) {
+ locations.AddReturn(regloc(kReturnRegister1));
+ }
+
+ const int parameter_count = static_cast<int>(msig->parameter_count());
+
+#ifdef PARAM_REGISTERS
+ static const Register kParamRegisters[] = {PARAM_REGISTERS};
+ static const int kParamRegisterCount =
+ static_cast<int>(arraysize(kParamRegisters));
+#else
+ static const Register* kParamRegisters = nullptr;
+ static const int kParamRegisterCount = 0;
+#endif
+
+#ifdef STACK_SHADOW_WORDS
+ int stack_offset = STACK_SHADOW_WORDS;
+#else
+ int stack_offset = 0;
+#endif
+ // Add register and/or stack parameter(s).
+ for (int i = 0; i < parameter_count; i++) {
+ if (i < kParamRegisterCount) {
+ locations.AddParam(regloc(kParamRegisters[i]));
+ } else {
+ locations.AddParam(
+ LinkageLocation::ForCallerFrameSlot(-1 - stack_offset));
+ stack_offset++;
+ }
+ }
+
+#ifdef CALLEE_SAVE_REGISTERS
+ const RegList kCalleeSaveRegisters = CALLEE_SAVE_REGISTERS;
+#else
+ const RegList kCalleeSaveRegisters = 0;
+#endif
+
+#ifdef CALLEE_SAVE_FP_REGISTERS
+ const RegList kCalleeSaveFPRegisters = CALLEE_SAVE_FP_REGISTERS;
+#else
+ const RegList kCalleeSaveFPRegisters = 0;
+#endif
+
+ // The target for C calls is always an address (i.e. machine pointer).
+ MachineType target_type = MachineType::Pointer();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallAddress, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ msig, // machine_sig
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kNoFlags, // flags
+ "c-call");
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
index 7ddc751..f791db1 100644
--- a/src/compiler/change-lowering.cc
+++ b/src/compiler/change-lowering.cc
@@ -4,12 +4,14 @@
#include "src/compiler/change-lowering.h"
+#include "src/address-map.h"
#include "src/code-factory.h"
-#include "src/compiler/diamond.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -37,6 +39,16 @@
return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
case IrOpcode::kChangeUint32ToTagged:
return ChangeUint32ToTagged(node->InputAt(0), control);
+ case IrOpcode::kLoadField:
+ return LoadField(node);
+ case IrOpcode::kStoreField:
+ return StoreField(node);
+ case IrOpcode::kLoadElement:
+ return LoadElement(node);
+ case IrOpcode::kStoreElement:
+ return StoreElement(node);
+ case IrOpcode::kAllocate:
+ return Allocate(node);
default:
return NoChange();
}
@@ -46,25 +58,17 @@
Node* ChangeLowering::HeapNumberValueIndexConstant() {
- STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0);
- const int heap_number_value_offset =
- ((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4));
- return jsgraph()->IntPtrConstant(heap_number_value_offset - kHeapObjectTag);
+ return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
Node* ChangeLowering::SmiMaxValueConstant() {
- const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize()
- : SmiTagging<8>::SmiValueSize();
- return jsgraph()->Int32Constant(
- -(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
+ return jsgraph()->Int32Constant(Smi::kMaxValue);
}
Node* ChangeLowering::SmiShiftBitsConstant() {
- const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize()
- : SmiTagging<8>::SmiShiftSize();
- return jsgraph()->IntPtrConstant(smi_shift_size + kSmiTagSize);
+ return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
@@ -72,17 +76,22 @@
// The AllocateHeapNumberStub does not use the context, so we can safely pass
// in Smi zero here.
Callable callable = CodeFactory::AllocateHeapNumber(isolate());
- CallDescriptor* descriptor = linkage()->GetStubCallDescriptor(
- callable.descriptor(), 0, CallDescriptor::kNoFlags);
Node* target = jsgraph()->HeapConstant(callable.code());
Node* context = jsgraph()->NoContextConstant();
- Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
- Node* heap_number = graph()->NewNode(common()->Call(descriptor), target,
- context, effect, control);
+ Node* effect = graph()->NewNode(common()->BeginRegion(), graph()->start());
+ if (!allocate_heap_number_operator_.is_set()) {
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ allocate_heap_number_operator_.set(common()->Call(descriptor));
+ }
+ Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
+ target, context, effect, control);
Node* store = graph()->NewNode(
- machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
+ machine()->Store(StoreRepresentation(MachineRepresentation::kFloat64,
+ kNoWriteBarrier)),
heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
- return graph()->NewNode(common()->Finish(1), heap_number, store);
+ return graph()->NewNode(common()->FinishRegion(), heap_number, store);
}
@@ -91,6 +100,14 @@
}
+Node* ChangeLowering::ChangeInt32ToSmi(Node* value) {
+ if (machine()->Is64()) {
+ value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
+ }
+ return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+}
+
+
Node* ChangeLowering::ChangeSmiToFloat64(Node* value) {
return ChangeInt32ToFloat64(ChangeSmiToInt32(value));
}
@@ -119,7 +136,7 @@
Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
- return graph()->NewNode(machine()->Load(kMachFloat64), value,
+ return graph()->NewNode(machine()->Load(MachineType::Float64()), value,
HeapNumberValueIndexConstant(), graph()->start(),
control);
}
@@ -133,64 +150,151 @@
}
-Node* ChangeLowering::Uint32LessThanOrEqual(Node* lhs, Node* rhs) {
- return graph()->NewNode(machine()->Uint32LessThanOrEqual(), lhs, rhs);
-}
-
-
-Reduction ChangeLowering::ChangeBitToBool(Node* val, Node* control) {
- MachineType const type = static_cast<MachineType>(kTypeBool | kRepTagged);
- return Replace(graph()->NewNode(common()->Select(type), val,
- jsgraph()->TrueConstant(),
- jsgraph()->FalseConstant()));
-}
-
-
-Reduction ChangeLowering::ChangeBoolToBit(Node* val) {
+Reduction ChangeLowering::ChangeBitToBool(Node* value, Node* control) {
return Replace(
- graph()->NewNode(machine()->WordEqual(), val, jsgraph()->TrueConstant()));
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged), value,
+ jsgraph()->TrueConstant(), jsgraph()->FalseConstant()));
}
-Reduction ChangeLowering::ChangeFloat64ToTagged(Node* val, Node* control) {
- return Replace(AllocateHeapNumberWithValue(val, control));
+Reduction ChangeLowering::ChangeBoolToBit(Node* value) {
+ return Replace(graph()->NewNode(machine()->WordEqual(), value,
+ jsgraph()->TrueConstant()));
+}
+
+
+Reduction ChangeLowering::ChangeFloat64ToTagged(Node* value, Node* control) {
+ Type* const value_type = NodeProperties::GetType(value);
+ Node* const value32 = graph()->NewNode(
+ machine()->TruncateFloat64ToInt32(TruncationMode::kRoundToZero), value);
+ // TODO(bmeurer): This fast case must be disabled until we kill the asm.js
+ // support in the generic JavaScript pipeline, because LoadBuffer is lying
+ // about its result.
+ // if (value_type->Is(Type::Signed32())) {
+ // return ChangeInt32ToTagged(value32, control);
+ // }
+ Node* check_same = graph()->NewNode(
+ machine()->Float64Equal(), value,
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
+ Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
+
+ Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
+ Node* vsmi;
+ Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
+ Node* vbox;
+
+ // We only need to check for -0 if the {value} can potentially contain -0.
+ if (value_type->Maybe(Type::MinusZero())) {
+ Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+ jsgraph()->Int32Constant(0));
+ Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_zero, if_smi);
+
+ Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+ Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Node* check_negative = graph()->NewNode(
+ machine()->Int32LessThan(),
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(0));
+ Node* branch_negative = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), check_negative, if_zero);
+
+ Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
+ Node* if_notnegative =
+ graph()->NewNode(common()->IfFalse(), branch_negative);
+
+ // We need to create a box for negative 0.
+ if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
+ if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
+ }
+
+ // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
+ // machines we need to deal with potential overflow and fallback to boxing.
+ if (machine()->Is64() || value_type->Is(Type::SignedSmall())) {
+ vsmi = ChangeInt32ToSmi(value32);
+ } else {
+ Node* smi_tag =
+ graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
+
+ Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
+ Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_ovf, if_smi);
+
+ Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
+ if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
+
+ if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
+ vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
+ }
+
+ // Allocate the box for the {value}.
+ vbox = AllocateHeapNumberWithValue(value, if_box);
+
+ control = graph()->NewNode(common()->Merge(2), if_smi, if_box);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vsmi, vbox, control);
+ return Replace(value);
}
Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
- if (machine()->Is64()) {
- return Replace(graph()->NewNode(
- machine()->Word64Shl(),
- graph()->NewNode(machine()->ChangeInt32ToInt64(), value),
- SmiShiftBitsConstant()));
- } else if (NodeProperties::GetBounds(value).upper->Is(Type::SignedSmall())) {
- return Replace(
- graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant()));
+ if (machine()->Is64() ||
+ NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
+ return Replace(ChangeInt32ToSmi(value));
}
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
- Node* ovf = graph()->NewNode(common()->Projection(1), add);
- Diamond d(graph(), common(), ovf, BranchHint::kFalse);
- d.Chain(control);
- return Replace(
- d.Phi(kMachAnyTagged,
- AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), d.if_true),
- graph()->NewNode(common()->Projection(0), add)));
+ Node* ovf = graph()->NewNode(common()->Projection(1), add);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue =
+ AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(common()->Projection(0), add);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
+
+ return Replace(phi);
}
Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
Signedness signedness) {
- const MachineType type = (signedness == kSigned) ? kMachInt32 : kMachUint32;
+ if (NodeProperties::GetType(value)->Is(Type::TaggedSigned())) {
+ return Replace(ChangeSmiToInt32(value));
+ }
+
const Operator* op = (signedness == kSigned)
? machine()->ChangeFloat64ToInt32()
: machine()->ChangeFloat64ToUint32();
- Diamond d(graph(), common(), TestNotSmi(value), BranchHint::kFalse);
- d.Chain(control);
- return Replace(
- d.Phi(type, graph()->NewNode(op, LoadHeapNumberValue(value, d.if_true)),
- ChangeSmiToInt32(value)));
+
+ if (NodeProperties::GetType(value)->Is(Type::TaggedPointer())) {
+ return Replace(graph()->NewNode(op, LoadHeapNumberValue(value, control)));
+ }
+
+ Node* check = TestNotSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = graph()->NewNode(op, LoadHeapNumberValue(value, if_true));
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = ChangeSmiToInt32(value);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, merge);
+
+ return Replace(phi);
}
@@ -200,6 +304,7 @@
if (value->opcode() != opcode) return false;
bool first = true;
for (Edge const edge : value->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) continue;
if (NodeProperties::IsEffectEdge(edge)) continue;
DCHECK(NodeProperties::IsValueEdge(edge));
if (!first) return false;
@@ -220,46 +325,261 @@
// else LoadHeapNumberValue(y)
Node* const object = NodeProperties::GetValueInput(value, 0);
Node* const context = NodeProperties::GetContextInput(value);
+ Node* const frame_state = NodeProperties::GetFrameStateInput(value, 0);
Node* const effect = NodeProperties::GetEffectInput(value);
Node* const control = NodeProperties::GetControlInput(value);
- Diamond d1(graph(), common(), TestNotSmi(object), BranchHint::kFalse);
- d1.Chain(control);
+ const Operator* merge_op = common()->Merge(2);
+ const Operator* ephi_op = common()->EffectPhi(2);
+ const Operator* phi_op = common()->Phi(MachineRepresentation::kFloat64, 2);
- Node* number =
- graph()->NewNode(value->op(), object, context, effect, d1.if_true);
- Diamond d2(graph(), common(), TestNotSmi(number));
- d2.Nest(d1, true);
- Node* phi2 = d2.Phi(kMachFloat64, LoadHeapNumberValue(number, d2.if_true),
- ChangeSmiToFloat64(number));
+ Node* check1 = TestNotSmi(object);
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
- Node* phi1 = d1.Phi(kMachFloat64, phi2, ChangeSmiToFloat64(object));
- Node* ephi1 = d1.EffectPhi(number, effect);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = graph()->NewNode(value->op(), object, context, frame_state,
+ effect, if_true1);
+ Node* etrue1 = vtrue1;
- for (Edge edge : value->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge)) {
- edge.UpdateTo(ephi1);
+ Node* check2 = TestNotSmi(vtrue1);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_true1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = LoadHeapNumberValue(vtrue1, if_true2);
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2 = ChangeSmiToFloat64(vtrue1);
+
+ if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
+ vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1 = ChangeSmiToFloat64(object);
+ Node* efalse1 = effect;
+
+ Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
+ Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
+ Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
+
+ // Wire the new diamond into the graph, {JSToNumber} can still throw.
+ NodeProperties::ReplaceUses(value, phi1, ephi1, etrue1, etrue1);
+
+ // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
+ // the node and places it inside the diamond. Come up with a helper method!
+ for (Node* use : etrue1->uses()) {
+ if (use->opcode() == IrOpcode::kIfSuccess) {
+ use->ReplaceUses(merge1);
+ NodeProperties::ReplaceControlInput(branch2, use);
}
}
+
return Replace(phi1);
}
- Diamond d(graph(), common(), TestNotSmi(value), BranchHint::kFalse);
- d.Chain(control);
- Node* load = LoadHeapNumberValue(value, d.if_true);
- Node* number = ChangeSmiToFloat64(value);
- return Replace(d.Phi(kMachFloat64, load, number));
+ Node* check = TestNotSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = LoadHeapNumberValue(value, if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = ChangeSmiToFloat64(value);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
+
+ return Replace(phi);
}
Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
- Diamond d(graph(), common(),
- Uint32LessThanOrEqual(value, SmiMaxValueConstant()),
- BranchHint::kTrue);
- d.Chain(control);
- return Replace(d.Phi(
- kMachAnyTagged, ChangeUint32ToSmi(value),
- AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), d.if_false)));
+ if (NodeProperties::GetType(value)->Is(Type::UnsignedSmall())) {
+ return Replace(ChangeUint32ToSmi(value));
+ }
+
+ Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
+ SmiMaxValueConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = ChangeUint32ToSmi(value);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse =
+ AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), if_false);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
+
+ return Replace(phi);
+}
+
+
+namespace {
+
+WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
+ MachineRepresentation representation,
+ Type* field_type, Type* input_type) {
+ if (field_type->Is(Type::TaggedSigned()) ||
+ input_type->Is(Type::TaggedSigned())) {
+ // Write barriers are only for writes of heap objects.
+ return kNoWriteBarrier;
+ }
+ if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
+ // Write barriers are not necessary when storing true, false, null or
+ // undefined, because these special oddballs are always in the root set.
+ return kNoWriteBarrier;
+ }
+ if (base_is_tagged == kTaggedBase &&
+ representation == MachineRepresentation::kTagged) {
+ if (input_type->IsConstant() &&
+ input_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<HeapObject> input =
+ Handle<HeapObject>::cast(input_type->AsConstant()->Value());
+ if (input->IsMap()) {
+ // Write barriers for storing maps are cheaper.
+ return kMapWriteBarrier;
+ }
+ Isolate* const isolate = input->GetIsolate();
+ RootIndexMap root_index_map(isolate);
+ int root_index = root_index_map.Lookup(*input);
+ if (root_index != RootIndexMap::kInvalidRootIndex &&
+ isolate->heap()->RootIsImmortalImmovable(root_index)) {
+ // Write barriers are unnecessary for immortal immovable roots.
+ return kNoWriteBarrier;
+ }
+ }
+ if (field_type->Is(Type::TaggedPointer()) ||
+ input_type->Is(Type::TaggedPointer())) {
+ // Write barriers for heap objects don't need a Smi check.
+ return kPointerWriteBarrier;
+ }
+ // Write barriers are only for writes into heap objects (i.e. tagged base).
+ return kFullWriteBarrier;
+ }
+ return kNoWriteBarrier;
+}
+
+
+WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
+ MachineRepresentation representation,
+ int field_offset, Type* field_type,
+ Type* input_type) {
+ if (base_is_tagged == kTaggedBase && field_offset == HeapObject::kMapOffset) {
+ // Write barriers for storing maps are cheaper.
+ return kMapWriteBarrier;
+ }
+ return ComputeWriteBarrierKind(base_is_tagged, representation, field_type,
+ input_type);
+}
+
+} // namespace
+
+
+Reduction ChangeLowering::LoadField(Node* node) {
+ const FieldAccess& access = FieldAccessOf(node->op());
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ return Changed(node);
+}
+
+
+Reduction ChangeLowering::StoreField(Node* node) {
+ const FieldAccess& access = FieldAccessOf(node->op());
+ Type* type = NodeProperties::GetType(node->InputAt(1));
+ WriteBarrierKind kind = ComputeWriteBarrierKind(
+ access.base_is_tagged, access.machine_type.representation(),
+ access.offset, access.type, type);
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(node,
+ machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), kind)));
+ return Changed(node);
+}
+
+
+Node* ChangeLowering::ComputeIndex(const ElementAccess& access,
+ Node* const key) {
+ Node* index = key;
+ const int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ if (element_size_shift) {
+ index = graph()->NewNode(machine()->Word32Shl(), index,
+ jsgraph()->Int32Constant(element_size_shift));
+ }
+ const int fixed_offset = access.header_size - access.tag();
+ if (fixed_offset) {
+ index = graph()->NewNode(machine()->Int32Add(), index,
+ jsgraph()->Int32Constant(fixed_offset));
+ }
+ if (machine()->Is64()) {
+ // TODO(turbofan): This is probably only correct for typed arrays, and only
+ // if the typed arrays are at most 2GiB in size, which happens to match
+ // exactly our current situation.
+ index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
+ }
+ return index;
+}
+
+
+Reduction ChangeLowering::LoadElement(Node* node) {
+ const ElementAccess& access = ElementAccessOf(node->op());
+ node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ return Changed(node);
+}
+
+
+Reduction ChangeLowering::StoreElement(Node* node) {
+ const ElementAccess& access = ElementAccessOf(node->op());
+ Type* type = NodeProperties::GetType(node->InputAt(2));
+ node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(),
+ ComputeWriteBarrierKind(access.base_is_tagged,
+ access.machine_type.representation(),
+ access.type, type))));
+ return Changed(node);
+}
+
+
+Reduction ChangeLowering::Allocate(Node* node) {
+ PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
+ if (pretenure == NOT_TENURED) {
+ Callable callable = CodeFactory::AllocateInNewSpace(isolate());
+ Node* target = jsgraph()->HeapConstant(callable.code());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ const Operator* op = common()->Call(descriptor);
+ node->InsertInput(graph()->zone(), 0, target);
+ node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
+ NodeProperties::ChangeOp(node, op);
+ } else {
+ DCHECK_EQ(TENURED, pretenure);
+ AllocationSpace space = OLD_SPACE;
+ Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
+ Operator::Properties props = node->op()->properties();
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), f, 2, props, CallDescriptor::kNeedsFrameState);
+ ExternalReference ref(f, jsgraph()->isolate());
+ int32_t flags = AllocateTargetSpace::encode(space);
+ node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
+ node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ }
+ return Changed(node);
}
diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h
index 773fd08..6d60776 100644
--- a/src/compiler/change-lowering.h
+++ b/src/compiler/change-lowering.h
@@ -13,17 +13,18 @@
// Forward declarations.
class CommonOperatorBuilder;
+struct ElementAccess;
class JSGraph;
class Linkage;
class MachineOperatorBuilder;
+class Operator;
-class ChangeLowering FINAL : public Reducer {
+class ChangeLowering final : public Reducer {
public:
- ChangeLowering(JSGraph* jsgraph, Linkage* linkage)
- : jsgraph_(jsgraph), linkage_(linkage) {}
- ~ChangeLowering() FINAL;
+ explicit ChangeLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
+ ~ChangeLowering() final;
- Reduction Reduce(Node* node) FINAL;
+ Reduction Reduce(Node* node) final;
private:
Node* HeapNumberValueIndexConstant();
@@ -32,13 +33,13 @@
Node* AllocateHeapNumberWithValue(Node* value, Node* control);
Node* ChangeInt32ToFloat64(Node* value);
+ Node* ChangeInt32ToSmi(Node* value);
Node* ChangeSmiToFloat64(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ChangeUint32ToFloat64(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* LoadHeapNumberValue(Node* value, Node* control);
Node* TestNotSmi(Node* value);
- Node* Uint32LessThanOrEqual(Node* lhs, Node* rhs);
Reduction ChangeBitToBool(Node* value, Node* control);
Reduction ChangeBoolToBit(Node* value);
@@ -49,15 +50,21 @@
Signedness signedness);
Reduction ChangeUint32ToTagged(Node* value, Node* control);
+ Reduction LoadField(Node* node);
+ Reduction StoreField(Node* node);
+ Reduction LoadElement(Node* node);
+ Reduction StoreElement(Node* node);
+ Reduction Allocate(Node* node);
+
+ Node* ComputeIndex(const ElementAccess& access, Node* const key);
Graph* graph() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
- Linkage* linkage() const { return linkage_; }
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
- JSGraph* jsgraph_;
- Linkage* linkage_;
+ JSGraph* const jsgraph_;
+ SetOncePointer<const Operator> allocate_heap_number_operator_;
};
} // namespace compiler
diff --git a/src/compiler/coalesced-live-ranges.cc b/src/compiler/coalesced-live-ranges.cc
new file mode 100644
index 0000000..4ac3e21
--- /dev/null
+++ b/src/compiler/coalesced-live-ranges.cc
@@ -0,0 +1,143 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/coalesced-live-ranges.h"
+#include "src/compiler/greedy-allocator.h"
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+LiveRangeConflictIterator::LiveRangeConflictIterator(const LiveRange* range,
+ IntervalStore* storage)
+ : query_(range->first_interval()),
+ pos_(storage->end()),
+ intervals_(storage) {
+ MovePosAndQueryToFirstConflict();
+}
+
+
+LiveRange* LiveRangeConflictIterator::Current() const {
+ if (IsFinished()) return nullptr;
+ return pos_->range_;
+}
+
+
+void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
+ DCHECK_NOT_NULL(query_);
+ auto end = intervals_->end();
+ LifetimePosition q_start = query_->start();
+ LifetimePosition q_end = query_->end();
+
+ if (intervals_->empty() || intervals_->rbegin()->end_ <= q_start ||
+ intervals_->begin()->start_ >= q_end) {
+ pos_ = end;
+ return;
+ }
+
+ pos_ = intervals_->upper_bound(AsAllocatedInterval(q_start));
+ // pos is either at the end (no start strictly greater than q_start) or
+ // at some position with the aforementioned property. In either case, the
+ // allocated interval before this one may intersect our query:
+ // either because, although it starts before this query's start, it ends
+ // after; or because it starts exactly at the query start. So unless we're
+ // right at the beginning of the storage - meaning the first allocated
+ // interval is also starting after this query's start - see what's behind.
+ if (pos_ != intervals_->begin()) {
+ --pos_;
+ if (!QueryIntersectsAllocatedInterval()) {
+ // The interval behind wasn't intersecting, so move back.
+ ++pos_;
+ }
+ }
+ if (pos_ == end || !QueryIntersectsAllocatedInterval()) {
+ pos_ = end;
+ }
+}
+
+
+void LiveRangeConflictIterator::MovePosAndQueryToFirstConflict() {
+ auto end = intervals_->end();
+ for (; query_ != nullptr; query_ = query_->next()) {
+ MovePosToFirstConflictForQuery();
+ if (pos_ != end) {
+ DCHECK(QueryIntersectsAllocatedInterval());
+ return;
+ }
+ }
+
+ Invalidate();
+}
+
+
+void LiveRangeConflictIterator::IncrementPosAndSkipOverRepetitions() {
+ auto end = intervals_->end();
+ DCHECK(pos_ != end);
+ LiveRange* current_conflict = Current();
+ while (pos_ != end && pos_->range_ == current_conflict) {
+ ++pos_;
+ }
+}
+
+
+LiveRange* LiveRangeConflictIterator::InternalGetNext(bool clean_behind) {
+ if (IsFinished()) return nullptr;
+
+ LiveRange* to_clear = Current();
+ IncrementPosAndSkipOverRepetitions();
+ // At this point, pos_ is either at the end, or on an interval that doesn't
+ // correspond to the same range as to_clear. This interval may not even be
+ // a conflict.
+ if (clean_behind) {
+ // Since we parked pos_ on an iterator that won't be affected by removal,
+ // we can safely delete to_clear's intervals.
+ for (auto interval = to_clear->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ AllocatedInterval erase_key(interval->start(), interval->end(), nullptr);
+ intervals_->erase(erase_key);
+ }
+ }
+ // We may have parked pos_ at the end, or on a non-conflict. In that case,
+ // move to the next query and reinitialize pos and query. This may invalidate
+ // the iterator, if no more conflicts are available.
+ if (!QueryIntersectsAllocatedInterval()) {
+ query_ = query_->next();
+ MovePosAndQueryToFirstConflict();
+ }
+ return Current();
+}
+
+
+LiveRangeConflictIterator CoalescedLiveRanges::GetConflicts(
+ const LiveRange* range) {
+ return LiveRangeConflictIterator(range, &intervals());
+}
+
+
+void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
+ for (auto interval = range->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ AllocatedInterval to_insert(interval->start(), interval->end(), range);
+ intervals().insert(to_insert);
+ }
+}
+
+
+bool CoalescedLiveRanges::VerifyAllocationsAreValidForTesting() const {
+ LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
+ for (auto i : intervals_) {
+ if (i.start_ < last_end) {
+ return false;
+ }
+ last_end = i.end_;
+ }
+ return true;
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/coalesced-live-ranges.h b/src/compiler/coalesced-live-ranges.h
new file mode 100644
index 0000000..54bbce2
--- /dev/null
+++ b/src/compiler/coalesced-live-ranges.h
@@ -0,0 +1,158 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COALESCED_LIVE_RANGES_H_
+#define V8_COALESCED_LIVE_RANGES_H_
+
+#include "src/compiler/register-allocator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Implementation detail for CoalescedLiveRanges.
+struct AllocatedInterval {
+ AllocatedInterval(LifetimePosition start, LifetimePosition end,
+ LiveRange* range)
+ : start_(start), end_(end), range_(range) {}
+
+ LifetimePosition start_;
+ LifetimePosition end_;
+ LiveRange* range_;
+ bool operator<(const AllocatedInterval& other) const {
+ return start_ < other.start_;
+ }
+ bool operator>(const AllocatedInterval& other) const {
+ return start_ > other.start_;
+ }
+};
+typedef ZoneSet<AllocatedInterval> IntervalStore;
+
+
+// An iterator over conflicts of a live range, obtained from CoalescedLiveRanges
+// The design supports two main scenarios (see GreedyAllocator):
+// (1) observing each conflicting range, without mutating the allocations, and
+// (2) observing each conflicting range, and then moving to the next, after
+// removing the current conflict.
+class LiveRangeConflictIterator {
+ public:
+ // Current conflict. nullptr if no conflicts, or if we reached the end of
+ // conflicts.
+ LiveRange* Current() const;
+
+ // Get the next conflict. Caller should handle non-consecutive repetitions of
+ // the same range.
+ LiveRange* GetNext() { return InternalGetNext(false); }
+
+ // Get the next conflict, after evicting the current one. Caller may expect
+ // to never observe the same live range more than once.
+ LiveRange* RemoveCurrentAndGetNext() { return InternalGetNext(true); }
+
+ private:
+ friend class CoalescedLiveRanges;
+
+ typedef IntervalStore::const_iterator interval_iterator;
+ LiveRangeConflictIterator(const LiveRange* range, IntervalStore* store);
+
+ // Move the store iterator to first interval intersecting query. Since the
+ // intervals are sorted, subsequent intervals intersecting query follow. May
+ // leave the store iterator at "end", meaning that the current query does not
+ // have an intersection.
+ void MovePosToFirstConflictForQuery();
+
+ // Move both query and store iterator to the first intersection, if any. If
+ // none, then it invalidates the iterator (IsFinished() == true)
+ void MovePosAndQueryToFirstConflict();
+
+ // Increment pos and skip over intervals belonging to the same range we
+ // started with (i.e. Current() before the call). It is possible that range
+ // will be seen again, but not consecutively.
+ void IncrementPosAndSkipOverRepetitions();
+
+ // Common implementation used by both GetNext as well as
+ // ClearCurrentAndGetNext.
+ LiveRange* InternalGetNext(bool clean_behind);
+
+ bool IsFinished() const { return query_ == nullptr; }
+
+ static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
+ return AllocatedInterval(pos, LifetimePosition::Invalid(), nullptr);
+ }
+
+ // Intersection utilities.
+ static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
+ LifetimePosition b_start, LifetimePosition b_end) {
+ return a_start < b_end && b_start < a_end;
+ }
+
+ bool QueryIntersectsAllocatedInterval() const {
+ DCHECK_NOT_NULL(query_);
+ return pos_ != intervals_->end() &&
+ Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
+ }
+
+ void Invalidate() {
+ query_ = nullptr;
+ pos_ = intervals_->end();
+ }
+
+ const UseInterval* query_;
+ interval_iterator pos_;
+ IntervalStore* intervals_;
+};
+
+// Collection of live ranges allocated to the same register.
+// It supports efficiently finding all conflicts for a given, non-allocated
+// range. See AllocatedInterval.
+// Allocated live ranges do not intersect. At most, individual use intervals
+// touch. We store, for a live range, an AllocatedInterval corresponding to each
+// of that range's UseIntervals. We keep the list of AllocatedIntervals sorted
+// by starts. Then, given the non-intersecting property, we know that
+// consecutive AllocatedIntervals have the property that the "smaller"'s end is
+// less or equal to the "larger"'s start.
+// This allows for quick (logarithmic complexity) identification of the first
+// AllocatedInterval to conflict with a given LiveRange, and then for efficient
+// traversal of conflicts.
+class CoalescedLiveRanges : public ZoneObject {
+ public:
+ explicit CoalescedLiveRanges(Zone* zone) : intervals_(zone) {}
+ void clear() { intervals_.clear(); }
+
+ bool empty() const { return intervals_.empty(); }
+
+ // Iterate over each live range conflicting with the provided one.
+ // The same live range may be observed multiple, but non-consecutive times.
+ LiveRangeConflictIterator GetConflicts(const LiveRange* range);
+
+
+ // Allocates a range with a pre-calculated candidate weight.
+ void AllocateRange(LiveRange* range);
+
+ // Unit testing API, verifying that allocated intervals do not overlap.
+ bool VerifyAllocationsAreValidForTesting() const;
+
+ private:
+ static const float kAllocatedRangeMultiplier;
+
+ IntervalStore& intervals() { return intervals_; }
+ const IntervalStore& intervals() const { return intervals_; }
+
+ // Augment the weight of a range that is about to be allocated.
+ static void UpdateWeightAtAllocation(LiveRange* range);
+
+ // Reduce the weight of a range that has lost allocation.
+ static void UpdateWeightAtEviction(LiveRange* range);
+
+
+ IntervalStore intervals_;
+ DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+#endif // V8_COALESCED_LIVE_RANGES_H_
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 7942344..7295948 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -27,49 +27,63 @@
// -- Instruction operand accesses with conversions --------------------------
- Register InputRegister(int index) {
+ Register InputRegister(size_t index) {
return ToRegister(instr_->InputAt(index));
}
- DoubleRegister InputDoubleRegister(int index) {
+ DoubleRegister InputDoubleRegister(size_t index) {
return ToDoubleRegister(instr_->InputAt(index));
}
- double InputDouble(int index) { return ToDouble(instr_->InputAt(index)); }
+ double InputDouble(size_t index) { return ToDouble(instr_->InputAt(index)); }
- int32_t InputInt32(int index) {
+ float InputFloat32(size_t index) { return ToFloat32(instr_->InputAt(index)); }
+
+ int32_t InputInt32(size_t index) {
return ToConstant(instr_->InputAt(index)).ToInt32();
}
- int8_t InputInt8(int index) { return static_cast<int8_t>(InputInt32(index)); }
+ int64_t InputInt64(size_t index) {
+ return ToConstant(instr_->InputAt(index)).ToInt64();
+ }
- int16_t InputInt16(int index) {
+ int8_t InputInt8(size_t index) {
+ return static_cast<int8_t>(InputInt32(index));
+ }
+
+ int16_t InputInt16(size_t index) {
return static_cast<int16_t>(InputInt32(index));
}
- uint8_t InputInt5(int index) {
+ uint8_t InputInt5(size_t index) {
return static_cast<uint8_t>(InputInt32(index) & 0x1F);
}
- uint8_t InputInt6(int index) {
+ uint8_t InputInt6(size_t index) {
return static_cast<uint8_t>(InputInt32(index) & 0x3F);
}
- Handle<HeapObject> InputHeapObject(int index) {
+ ExternalReference InputExternalReference(size_t index) {
+ return ToExternalReference(instr_->InputAt(index));
+ }
+
+ Handle<HeapObject> InputHeapObject(size_t index) {
return ToHeapObject(instr_->InputAt(index));
}
- Label* InputLabel(int index) { return ToLabel(instr_->InputAt(index)); }
+ Label* InputLabel(size_t index) { return ToLabel(instr_->InputAt(index)); }
- BasicBlock::RpoNumber InputRpo(int index) {
+ RpoNumber InputRpo(size_t index) {
return ToRpoNumber(instr_->InputAt(index));
}
- Register OutputRegister(int index = 0) {
+ Register OutputRegister(size_t index = 0) {
return ToRegister(instr_->OutputAt(index));
}
- Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
+ Register TempRegister(size_t index) {
+ return ToRegister(instr_->TempAt(index));
+ }
DoubleRegister OutputDoubleRegister() {
return ToDoubleRegister(instr_->Output());
@@ -81,34 +95,42 @@
return gen_->GetLabel(ToRpoNumber(op));
}
- BasicBlock::RpoNumber ToRpoNumber(InstructionOperand* op) {
+ RpoNumber ToRpoNumber(InstructionOperand* op) {
return ToConstant(op).ToRpoNumber();
}
Register ToRegister(InstructionOperand* op) {
- DCHECK(op->IsRegister());
- return Register::FromAllocationIndex(op->index());
+ return LocationOperand::cast(op)->GetRegister();
}
DoubleRegister ToDoubleRegister(InstructionOperand* op) {
- DCHECK(op->IsDoubleRegister());
- return DoubleRegister::FromAllocationIndex(op->index());
+ return LocationOperand::cast(op)->GetDoubleRegister();
}
Constant ToConstant(InstructionOperand* op) {
if (op->IsImmediate()) {
- return gen_->code()->GetImmediate(op->index());
+ return gen_->code()->GetImmediate(ImmediateOperand::cast(op));
}
- return gen_->code()->GetConstant(op->index());
+ return gen_->code()->GetConstant(
+ ConstantOperand::cast(op)->virtual_register());
}
double ToDouble(InstructionOperand* op) { return ToConstant(op).ToFloat64(); }
+ float ToFloat32(InstructionOperand* op) { return ToConstant(op).ToFloat32(); }
+
+ ExternalReference ToExternalReference(InstructionOperand* op) {
+ return ToConstant(op).ToExternalReference();
+ }
+
Handle<HeapObject> ToHeapObject(InstructionOperand* op) {
return ToConstant(op).ToHeapObject();
}
Frame* frame() const { return gen_->frame(); }
+ FrameAccessState* frame_access_state() const {
+ return gen_->frame_access_state();
+ }
Isolate* isolate() const { return gen_->isolate(); }
Linkage* linkage() const { return gen_->linkage(); }
@@ -128,12 +150,15 @@
Label* entry() { return &entry_; }
Label* exit() { return &exit_; }
+ Frame* frame() const { return frame_; }
+ Isolate* isolate() const { return masm()->isolate(); }
MacroAssembler* masm() const { return masm_; }
OutOfLineCode* next() const { return next_; }
private:
Label entry_;
Label exit_;
+ Frame* const frame_;
MacroAssembler* const masm_;
OutOfLineCode* const next_;
};
@@ -144,6 +169,8 @@
static inline void FinishCode(MacroAssembler* masm) {
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
masm->CheckConstPool(true, false);
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+ masm->ud2();
#endif
}
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index cfe4f06..313567e 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -4,40 +4,72 @@
#include "src/compiler/code-generator.h"
+#include "src/address-map.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
+#include "src/frames-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
+class CodeGenerator::JumpTable final : public ZoneObject {
+ public:
+ JumpTable(JumpTable* next, Label** targets, size_t target_count)
+ : next_(next), targets_(targets), target_count_(target_count) {}
+
+ Label* label() { return &label_; }
+ JumpTable* next() const { return next_; }
+ Label** targets() const { return targets_; }
+ size_t target_count() const { return target_count_; }
+
+ private:
+ Label label_;
+ JumpTable* const next_;
+ Label** const targets_;
+ size_t const target_count_;
+};
+
+
CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info)
- : frame_(frame),
+ : frame_access_state_(new (code->zone()) FrameAccessState(frame)),
linkage_(linkage),
code_(code),
info_(info),
labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
- current_block_(BasicBlock::RpoNumber::Invalid()),
- current_source_position_(SourcePosition::Invalid()),
- masm_(code->zone()->isolate(), NULL, 0),
+ current_block_(RpoNumber::Invalid()),
+ current_source_position_(SourcePosition::Unknown()),
+ masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kYes),
resolver_(this),
safepoints_(code->zone()),
+ handlers_(code->zone()),
deoptimization_states_(code->zone()),
deoptimization_literals_(code->zone()),
+ inlined_function_count_(0),
translations_(code->zone()),
last_lazy_deopt_pc_(0),
- ools_(nullptr) {
+ jump_tables_(nullptr),
+ ools_(nullptr),
+ osr_pc_offset_(-1) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
+ if (code->ContainsCall()) {
+ frame->MarkNeedsFrame();
+ }
}
Handle<Code> CodeGenerator::GenerateCode() {
CompilationInfo* info = this->info();
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in AssemblePrologue).
+ FrameScope frame_scope(masm(), StackFrame::MANUAL);
+
// Emit a code line info recording start event.
PositionsRecorder* recorder = masm()->positions_recorder();
LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
@@ -51,6 +83,23 @@
info->set_prologue_offset(masm()->pc_offset());
AssemblePrologue();
+ // Define deoptimization literals for all inlined functions.
+ DCHECK_EQ(0u, deoptimization_literals_.size());
+ for (auto& inlined : info->inlined_functions()) {
+ if (!inlined.shared_info.is_identical_to(info->shared_info())) {
+ DefineDeoptimizationLiteral(inlined.shared_info);
+ }
+ }
+ inlined_function_count_ = deoptimization_literals_.size();
+
+ // Define deoptimization literals for all unoptimized code objects of inlined
+ // functions. This ensures unoptimized code is kept alive by optimized code.
+ for (auto& inlined : info->inlined_functions()) {
+ if (!inlined.shared_info.is_identical_to(info->shared_info())) {
+ DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
+ }
+ }
+
// Assemble all non-deferred blocks, followed by deferred ones.
for (int deferred = 0; deferred < 2; ++deferred) {
for (auto const block : code()->instruction_blocks()) {
@@ -59,13 +108,36 @@
}
// Align loop headers on 16-byte boundaries.
if (block->IsLoopHeader()) masm()->Align(16);
+ // Ensure lazy deopt doesn't patch handler entry points.
+ if (block->IsHandler()) EnsureSpaceForLazyDeopt();
// Bind a label for a block.
current_block_ = block->rpo_number();
if (FLAG_code_comments) {
// TODO(titzer): these code comments are a giant memory leak.
- Vector<char> buffer = Vector<char>::New(32);
- SNPrintF(buffer, "-- B%d start --", block->id().ToInt());
- masm()->RecordComment(buffer.start());
+ Vector<char> buffer = Vector<char>::New(200);
+ char* buffer_start = buffer.start();
+
+ int next = SNPrintF(
+ buffer, "-- B%d start%s%s%s%s", block->rpo_number().ToInt(),
+ block->IsDeferred() ? " (deferred)" : "",
+ block->needs_frame() ? "" : " (no frame)",
+ block->must_construct_frame() ? " (construct frame)" : "",
+ block->must_deconstruct_frame() ? " (deconstruct frame)" : "");
+
+ buffer = buffer.SubVector(next, buffer.length());
+
+ if (block->IsLoopHeader()) {
+ next =
+ SNPrintF(buffer, " (loop up to %d)", block->loop_end().ToInt());
+ buffer = buffer.SubVector(next, buffer.length());
+ }
+ if (block->loop_header().IsValid()) {
+ next =
+ SNPrintF(buffer, " (in loop %d)", block->loop_header().ToInt());
+ buffer = buffer.SubVector(next, buffer.length());
+ }
+ SNPrintF(buffer, " --");
+ masm()->RecordComment(buffer_start);
}
masm()->bind(GetLabel(current_block_));
for (int i = block->code_start(); i < block->code_end(); ++i) {
@@ -80,37 +152,58 @@
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
masm()->bind(ool->entry());
ool->Generate();
- masm()->jmp(ool->exit());
+ if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
}
}
- FinishCode(masm());
-
// Ensure there is space for lazy deoptimization in the code.
- if (!info->IsStub()) {
+ if (info->ShouldEnsureSpaceForLazyDeopt()) {
int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
while (masm()->pc_offset() < target_offset) {
masm()->nop();
}
}
+ FinishCode(masm());
+
+ // Emit the jump tables.
+ if (jump_tables_) {
+ masm()->Align(kPointerSize);
+ for (JumpTable* table = jump_tables_; table; table = table->next()) {
+ masm()->bind(table->label());
+ AssembleJumpTable(table->targets(), table->target_count());
+ }
+ }
+
safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
- // TODO(titzer): what are the right code flags here?
- Code::Kind kind = Code::STUB;
- if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- kind = Code::OPTIMIZED_FUNCTION;
- }
- Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
- masm(), Code::ComputeFlags(kind), info);
+ Handle<Code> result =
+ v8::internal::CodeGenerator::MakeCodeEpilogue(masm(), info);
result->set_is_turbofanned(true);
result->set_stack_slots(frame()->GetSpillSlotCount());
result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
+ // Emit exception handler table.
+ if (!handlers_.empty()) {
+ Handle<HandlerTable> table =
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
+ TENURED));
+ for (size_t i = 0; i < handlers_.size(); ++i) {
+ int position = handlers_[i].handler->pos();
+ HandlerTable::CatchPrediction prediction = handlers_[i].caught_locally
+ ? HandlerTable::CAUGHT
+ : HandlerTable::UNCAUGHT;
+ table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
+ table->SetReturnHandler(static_cast<int>(i), position, prediction);
+ }
+ result->set_handler_table(*table);
+ }
+
PopulateDeoptimizationData(result);
// Ensure there is space for lazy deoptimization in the relocation info.
- if (!info->IsStub()) {
+ if (info->ShouldEnsureSpaceForLazyDeopt()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
}
@@ -122,113 +215,143 @@
}
-bool CodeGenerator::IsNextInAssemblyOrder(BasicBlock::RpoNumber block) const {
- return code()->InstructionBlockAt(current_block_)->ao_number().IsNext(
- code()->InstructionBlockAt(block)->ao_number());
+bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
+ return code()
+ ->InstructionBlockAt(current_block_)
+ ->ao_number()
+ .IsNext(code()->InstructionBlockAt(block)->ao_number());
}
-void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
- int arguments,
+void CodeGenerator::RecordSafepoint(ReferenceMap* references,
+ Safepoint::Kind kind, int arguments,
Safepoint::DeoptMode deopt_mode) {
- const ZoneList<InstructionOperand*>* operands =
- pointers->GetNormalizedOperands();
Safepoint safepoint =
safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- InstructionOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- Register reg = Register::FromAllocationIndex(pointer->index());
+ int stackSlotToSpillSlotDelta =
+ frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
+ for (auto& operand : references->reference_operands()) {
+ if (operand.IsStackSlot()) {
+ int index = LocationOperand::cast(operand).index();
+ DCHECK(index >= 0);
+ // Safepoint table indices are 0-based from the beginning of the spill
+ // slot area, adjust appropriately.
+ index -= stackSlotToSpillSlotDelta;
+ safepoint.DefinePointerSlot(index, zone());
+ } else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ Register reg = LocationOperand::cast(operand).GetRegister();
safepoint.DefinePointerRegister(reg, zone());
}
}
}
+bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
+ int* offset_return) {
+ if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+ if (info()->has_context() && object.is_identical_to(info()->context()) &&
+ !info()->is_osr()) {
+ *offset_return = StandardFrameConstants::kContextOffset;
+ return true;
+ } else if (object.is_identical_to(info()->closure())) {
+ *offset_return = JavaScriptFrameConstants::kFunctionOffset;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool CodeGenerator::IsMaterializableFromRoot(
+ Handle<HeapObject> object, Heap::RootListIndex* index_return) {
+ const CallDescriptor* incoming_descriptor =
+ linkage()->GetIncomingDescriptor();
+ if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
+ RootIndexMap map(isolate());
+ int root_index = map.Lookup(*object);
+ if (root_index != RootIndexMap::kInvalidRootIndex) {
+ *index_return = static_cast<Heap::RootListIndex>(root_index);
+ return true;
+ }
+ }
+ return false;
+}
+
+
void CodeGenerator::AssembleInstruction(Instruction* instr) {
- if (instr->IsGapMoves()) {
- // Handle parallel moves associated with the gap instruction.
- AssembleGap(GapInstruction::cast(instr));
- } else if (instr->IsSourcePosition()) {
- AssembleSourcePosition(SourcePositionInstruction::cast(instr));
- } else {
- // Assemble architecture-specific code for the instruction.
- AssembleArchInstruction(instr);
+ AssembleGaps(instr);
+ AssembleSourcePosition(instr);
+ // Assemble architecture-specific code for the instruction.
+ AssembleArchInstruction(instr);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
- if (mode == kFlags_branch) {
- // Assemble a branch after this instruction.
- InstructionOperandConverter i(this, instr);
- BasicBlock::RpoNumber true_rpo =
- i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
- BasicBlock::RpoNumber false_rpo =
- i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
+ if (mode == kFlags_branch) {
+ // Assemble a branch after this instruction.
+ InstructionOperandConverter i(this, instr);
+ RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
+ RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
- if (true_rpo == false_rpo) {
- // redundant branch.
- if (!IsNextInAssemblyOrder(true_rpo)) {
- AssembleArchJump(true_rpo);
- }
- return;
+ if (true_rpo == false_rpo) {
+ // redundant branch.
+ if (!IsNextInAssemblyOrder(true_rpo)) {
+ AssembleArchJump(true_rpo);
}
- if (IsNextInAssemblyOrder(true_rpo)) {
- // true block is next, can fall through if condition negated.
- std::swap(true_rpo, false_rpo);
- condition = NegateFlagsCondition(condition);
- }
- BranchInfo branch;
- branch.condition = condition;
- branch.true_label = GetLabel(true_rpo);
- branch.false_label = GetLabel(false_rpo);
- branch.fallthru = IsNextInAssemblyOrder(false_rpo);
- // Assemble architecture-specific branch.
- AssembleArchBranch(instr, &branch);
- } else if (mode == kFlags_set) {
- // Assemble a boolean materialization after this instruction.
- AssembleArchBoolean(instr, condition);
+ return;
}
+ if (IsNextInAssemblyOrder(true_rpo)) {
+ // true block is next, can fall through if condition negated.
+ std::swap(true_rpo, false_rpo);
+ condition = NegateFlagsCondition(condition);
+ }
+ BranchInfo branch;
+ branch.condition = condition;
+ branch.true_label = GetLabel(true_rpo);
+ branch.false_label = GetLabel(false_rpo);
+ branch.fallthru = IsNextInAssemblyOrder(false_rpo);
+ // Assemble architecture-specific branch.
+ AssembleArchBranch(instr, &branch);
+ } else if (mode == kFlags_set) {
+ // Assemble a boolean materialization after this instruction.
+ AssembleArchBoolean(instr, condition);
}
}
-void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
- SourcePosition source_position = instr->source_position();
+void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
+ SourcePosition source_position;
+ if (!code()->GetSourcePosition(instr, &source_position)) return;
if (source_position == current_source_position_) return;
- DCHECK(!source_position.IsInvalid());
- if (!source_position.IsUnknown()) {
- int code_pos = source_position.raw();
- masm()->positions_recorder()->RecordPosition(source_position.raw());
- masm()->positions_recorder()->WriteRecordedPositions();
- if (FLAG_code_comments) {
- Vector<char> buffer = Vector<char>::New(256);
- CompilationInfo* info = this->info();
- int ln = Script::GetLineNumber(info->script(), code_pos);
- int cn = Script::GetColumnNumber(info->script(), code_pos);
- if (info->script()->name()->IsString()) {
- Handle<String> file(String::cast(info->script()->name()));
- base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
- file->ToCString().get(), ln, cn);
- } else {
- base::OS::SNPrintF(buffer.start(), buffer.length(),
- "-- <unknown>:%d:%d --", ln, cn);
- }
- masm()->RecordComment(buffer.start());
- }
- }
current_source_position_ = source_position;
+ if (source_position.IsUnknown()) return;
+ int code_pos = source_position.raw();
+ masm()->positions_recorder()->RecordPosition(code_pos);
+ masm()->positions_recorder()->WriteRecordedPositions();
+ if (FLAG_code_comments) {
+ Vector<char> buffer = Vector<char>::New(256);
+ CompilationInfo* info = this->info();
+ int ln = Script::GetLineNumber(info->script(), code_pos);
+ int cn = Script::GetColumnNumber(info->script(), code_pos);
+ if (info->script()->name()->IsString()) {
+ Handle<String> file(String::cast(info->script()->name()));
+ base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
+ file->ToCString().get(), ln, cn);
+ } else {
+ base::OS::SNPrintF(buffer.start(), buffer.length(),
+ "-- <unknown>:%d:%d --", ln, cn);
+ }
+ masm()->RecordComment(buffer.start());
+ }
}
-void CodeGenerator::AssembleGap(GapInstruction* instr) {
- for (int i = GapInstruction::FIRST_INNER_POSITION;
- i <= GapInstruction::LAST_INNER_POSITION; i++) {
- GapInstruction::InnerPosition inner_pos =
- static_cast<GapInstruction::InnerPosition>(i);
+void CodeGenerator::AssembleGaps(Instruction* instr) {
+ for (int i = Instruction::FIRST_GAP_POSITION;
+ i <= Instruction::LAST_GAP_POSITION; i++) {
+ Instruction::GapPosition inner_pos =
+ static_cast<Instruction::GapPosition>(i);
ParallelMove* move = instr->GetParallelMove(inner_pos);
- if (move != NULL) resolver()->Resolve(move);
+ if (move != nullptr) resolver()->Resolve(move);
}
}
@@ -236,7 +359,7 @@
void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
CompilationInfo* info = this->info();
int deopt_count = static_cast<int>(deoptimization_states_.size());
- if (deopt_count == 0) return;
+ if (deopt_count == 0 && !info->is_osr()) return;
Handle<DeoptimizationInputData> data =
DeoptimizationInputData::New(isolate(), deopt_count, TENURED);
@@ -244,13 +367,11 @@
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translation_array);
- data->SetInlinedFunctionCount(Smi::FromInt(0));
+ data->SetInlinedFunctionCount(
+ Smi::FromInt(static_cast<int>(inlined_function_count_)));
data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
- // TODO(jarin) The following code was copied over from Lithium, not sure
- // whether the scope or the IsOptimizing condition are really needed.
- if (info->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
+
+ if (info->has_shared_info()) {
data->SetSharedFunctionInfo(*info->shared_info());
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
@@ -266,16 +387,21 @@
data->SetLiteralArray(*literals);
}
- // No OSR in Turbofan yet...
- BailoutId osr_ast_id = BailoutId::None();
- data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(-1));
+ if (info->is_osr()) {
+ DCHECK(osr_pc_offset_ >= 0);
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+ } else {
+ BailoutId osr_ast_id = BailoutId::None();
+ data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(-1));
+ }
// Populate deoptimization entries.
for (int i = 0; i < deopt_count; i++) {
DeoptimizationState* deoptimization_state = deoptimization_states_[i];
data->SetAstId(i, deoptimization_state->bailout_id());
- CHECK_NE(NULL, deoptimization_states_[i]);
+ CHECK(deoptimization_states_[i]);
data->SetTranslationIndex(
i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
data->SetArgumentsStackHeight(i, Smi::FromInt(0));
@@ -286,25 +412,36 @@
}
-void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
+Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
+ jump_tables_ = new (zone()) JumpTable(jump_tables_, targets, target_count);
+ return jump_tables_->label();
+}
+
+
+void CodeGenerator::RecordCallPosition(Instruction* instr) {
CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
RecordSafepoint(
- instr->pointer_map(), Safepoint::kSimple, 0,
+ instr->reference_map(), Safepoint::kSimple, 0,
needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
+ if (flags & CallDescriptor::kHasExceptionHandler) {
+ InstructionOperandConverter i(this, instr);
+ bool caught = flags & CallDescriptor::kHasLocalCatchHandler;
+ RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
+ handlers_.push_back({caught, GetLabel(handler_rpo), masm()->pc_offset()});
+ }
+
if (flags & CallDescriptor::kNeedsNopAfterCall) {
AddNopForSmiCodeInlining();
}
if (needs_frame_state) {
MarkLazyDeoptSite();
- // If the frame state is present, it starts at argument 1
- // (just after the code address).
- InstructionOperandConverter converter(this, instr);
- // Deoptimization info starts at argument 1
+ // If the frame state is present, it starts at argument 1 (just after the
+ // code address).
size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetFrameStateDescriptor(instr, frame_state_offset);
@@ -348,86 +485,120 @@
FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
Instruction* instr, size_t frame_state_offset) {
InstructionOperandConverter i(this, instr);
- InstructionSequence::StateId state_id = InstructionSequence::StateId::FromInt(
- i.InputInt32(static_cast<int>(frame_state_offset)));
+ InstructionSequence::StateId state_id =
+ InstructionSequence::StateId::FromInt(i.InputInt32(frame_state_offset));
return code()->GetFrameStateDescriptor(state_id);
}
-struct OperandAndType {
- OperandAndType(InstructionOperand* operand, MachineType type)
- : operand_(operand), type_(type) {}
- InstructionOperand* operand_;
- MachineType type_;
-};
-
-static OperandAndType TypedOperandForFrameState(
- FrameStateDescriptor* descriptor, Instruction* instr,
- size_t frame_state_offset, size_t index, OutputFrameStateCombine combine) {
- DCHECK(index < descriptor->GetSize(combine));
- switch (combine.kind()) {
- case OutputFrameStateCombine::kPushOutput: {
- DCHECK(combine.GetPushCount() <= instr->OutputCount());
- size_t size_without_output =
- descriptor->GetSize(OutputFrameStateCombine::Ignore());
- // If the index is past the existing stack items, return the output.
- if (index >= size_without_output) {
- return OperandAndType(instr->OutputAt(index - size_without_output),
- kMachAnyTagged);
- }
- break;
+void CodeGenerator::TranslateStateValueDescriptor(
+ StateValueDescriptor* desc, Translation* translation,
+ InstructionOperandIterator* iter) {
+ if (desc->IsNested()) {
+ translation->BeginCapturedObject(static_cast<int>(desc->size()));
+ for (size_t index = 0; index < desc->fields().size(); index++) {
+ TranslateStateValueDescriptor(&desc->fields()[index], translation, iter);
}
- case OutputFrameStateCombine::kPokeAt:
- size_t index_from_top =
- descriptor->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
- if (index >= index_from_top &&
- index < index_from_top + instr->OutputCount()) {
- return OperandAndType(instr->OutputAt(index - index_from_top),
- kMachAnyTagged);
- }
- break;
+ } else if (desc->IsDuplicate()) {
+ translation->DuplicateObject(static_cast<int>(desc->id()));
+ } else {
+ DCHECK(desc->IsPlain());
+ AddTranslationForOperand(translation, iter->instruction(), iter->Advance(),
+ desc->type());
}
- return OperandAndType(instr->InputAt(frame_state_offset + index),
- descriptor->GetType(index));
+}
+
+
+void CodeGenerator::TranslateFrameStateDescriptorOperands(
+ FrameStateDescriptor* desc, InstructionOperandIterator* iter,
+ OutputFrameStateCombine combine, Translation* translation) {
+ for (size_t index = 0; index < desc->GetSize(combine); index++) {
+ switch (combine.kind()) {
+ case OutputFrameStateCombine::kPushOutput: {
+ DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
+ size_t size_without_output =
+ desc->GetSize(OutputFrameStateCombine::Ignore());
+ // If the index is past the existing stack items in values_.
+ if (index >= size_without_output) {
+ // Materialize the result of the call instruction in this slot.
+ AddTranslationForOperand(
+ translation, iter->instruction(),
+ iter->instruction()->OutputAt(index - size_without_output),
+ MachineType::AnyTagged());
+ continue;
+ }
+ break;
+ }
+ case OutputFrameStateCombine::kPokeAt:
+ // The result of the call should be placed at position
+ // [index_from_top] in the stack (overwriting whatever was
+ // previously there).
+ size_t index_from_top =
+ desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
+ if (index >= index_from_top &&
+ index < index_from_top + iter->instruction()->OutputCount()) {
+ AddTranslationForOperand(
+ translation, iter->instruction(),
+ iter->instruction()->OutputAt(index - index_from_top),
+ MachineType::AnyTagged());
+ iter->Advance(); // We do not use this input, but we need to
+ // advace, as the input got replaced.
+ continue;
+ }
+ break;
+ }
+ StateValueDescriptor* value_desc = desc->GetStateValueDescriptor();
+ TranslateStateValueDescriptor(&value_desc->fields()[index], translation,
+ iter);
+ }
}
void CodeGenerator::BuildTranslationForFrameStateDescriptor(
- FrameStateDescriptor* descriptor, Instruction* instr,
- Translation* translation, size_t frame_state_offset,
- OutputFrameStateCombine state_combine) {
+ FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
+ Translation* translation, OutputFrameStateCombine state_combine) {
// Outer-most state must be added to translation first.
- if (descriptor->outer_state() != NULL) {
- BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), instr,
- translation, frame_state_offset,
+ if (descriptor->outer_state() != nullptr) {
+ BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
+ translation,
OutputFrameStateCombine::Ignore());
}
- int id = Translation::kSelfLiteralId;
- if (!descriptor->jsfunction().is_null()) {
- id = DefineDeoptimizationLiteral(
- Handle<Object>::cast(descriptor->jsfunction().ToHandleChecked()));
+ Handle<SharedFunctionInfo> shared_info;
+ if (!descriptor->shared_info().ToHandle(&shared_info)) {
+ if (!info()->has_shared_info()) {
+ return; // Stub with no SharedFunctionInfo.
+ }
+ shared_info = info()->shared_info();
}
+ int shared_info_id = DefineDeoptimizationLiteral(shared_info);
switch (descriptor->type()) {
- case JS_FRAME:
+ case FrameStateType::kJavaScriptFunction:
translation->BeginJSFrame(
- descriptor->bailout_id(), id,
+ descriptor->bailout_id(), shared_info_id,
static_cast<unsigned int>(descriptor->GetSize(state_combine) -
- descriptor->parameters_count()));
+ (1 + descriptor->parameters_count())));
break;
- case ARGUMENTS_ADAPTOR:
+ case FrameStateType::kInterpretedFunction:
+ translation->BeginInterpretedFrame(
+ descriptor->bailout_id(), shared_info_id,
+ static_cast<unsigned int>(descriptor->locals_count()));
+ break;
+ case FrameStateType::kArgumentsAdaptor:
translation->BeginArgumentsAdaptorFrame(
- id, static_cast<unsigned int>(descriptor->parameters_count()));
+ shared_info_id,
+ static_cast<unsigned int>(descriptor->parameters_count()));
+ break;
+ case FrameStateType::kConstructStub:
+ translation->BeginConstructStubFrame(
+ shared_info_id,
+ static_cast<unsigned int>(descriptor->parameters_count()));
break;
}
- frame_state_offset += descriptor->outer_state()->GetTotalSize();
- for (size_t i = 0; i < descriptor->GetSize(state_combine); i++) {
- OperandAndType op = TypedOperandForFrameState(
- descriptor, instr, frame_state_offset, i, state_combine);
- AddTranslationForOperand(translation, instr, op.operand_, op.type_);
- }
+ TranslateFrameStateDescriptorOperands(descriptor, iter, state_combine,
+ translation);
}
@@ -441,8 +612,9 @@
Translation translation(
&translations_, static_cast<int>(descriptor->GetFrameCount()),
static_cast<int>(descriptor->GetJSFrameCount()), zone());
- BuildTranslationForFrameStateDescriptor(descriptor, instr, &translation,
- frame_state_offset, state_combine);
+ InstructionOperandIterator iter(instr, frame_state_offset);
+ BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
+ state_combine);
int deoptimization_id = static_cast<int>(deoptimization_states_.size());
@@ -458,35 +630,39 @@
InstructionOperand* op,
MachineType type) {
if (op->IsStackSlot()) {
- if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
- type == kMachInt16) {
- translation->StoreInt32StackSlot(op->index());
- } else if (type == kMachUint32 || type == kMachUint16 ||
- type == kMachUint8) {
- translation->StoreUint32StackSlot(op->index());
- } else if ((type & kRepMask) == kRepTagged) {
- translation->StoreStackSlot(op->index());
+ if (type.representation() == MachineRepresentation::kBit) {
+ translation->StoreBoolStackSlot(LocationOperand::cast(op)->index());
+ } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
+ type == MachineType::Int32()) {
+ translation->StoreInt32StackSlot(LocationOperand::cast(op)->index());
+ } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
+ type == MachineType::Uint32()) {
+ translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
+ } else if (type.representation() == MachineRepresentation::kTagged) {
+ translation->StoreStackSlot(LocationOperand::cast(op)->index());
} else {
CHECK(false);
}
} else if (op->IsDoubleStackSlot()) {
- DCHECK((type & (kRepFloat32 | kRepFloat64)) != 0);
- translation->StoreDoubleStackSlot(op->index());
+ DCHECK(IsFloatingPoint(type.representation()));
+ translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
} else if (op->IsRegister()) {
InstructionOperandConverter converter(this, instr);
- if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
- type == kMachInt16) {
+ if (type.representation() == MachineRepresentation::kBit) {
+ translation->StoreBoolRegister(converter.ToRegister(op));
+ } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
+ type == MachineType::Int32()) {
translation->StoreInt32Register(converter.ToRegister(op));
- } else if (type == kMachUint32 || type == kMachUint16 ||
- type == kMachUint8) {
+ } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
+ type == MachineType::Uint32()) {
translation->StoreUint32Register(converter.ToRegister(op));
- } else if ((type & kRepMask) == kRepTagged) {
+ } else if (type.representation() == MachineRepresentation::kTagged) {
translation->StoreRegister(converter.ToRegister(op));
} else {
CHECK(false);
}
} else if (op->IsDoubleRegister()) {
- DCHECK((type & (kRepFloat32 | kRepFloat64)) != 0);
+ DCHECK(IsFloatingPoint(type.representation()));
InstructionOperandConverter converter(this, instr);
translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
} else if (op->IsImmediate()) {
@@ -495,23 +671,34 @@
Handle<Object> constant_object;
switch (constant.type()) {
case Constant::kInt32:
- DCHECK(type == kMachInt32 || type == kMachUint32);
+ DCHECK(type == MachineType::Int32() || type == MachineType::Uint32() ||
+ type.representation() == MachineRepresentation::kBit);
constant_object =
isolate()->factory()->NewNumberFromInt(constant.ToInt32());
break;
+ case Constant::kFloat32:
+ DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
+ type.representation() == MachineRepresentation::kTagged);
+ constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
+ break;
case Constant::kFloat64:
- DCHECK(type == kMachFloat64 || type == kMachAnyTagged);
+ DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
+ type.representation() == MachineRepresentation::kTagged);
constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
break;
case Constant::kHeapObject:
- DCHECK((type & kRepMask) == kRepTagged);
+ DCHECK(type.representation() == MachineRepresentation::kTagged);
constant_object = constant.ToHeapObject();
break;
default:
CHECK(false);
}
- int literal_id = DefineDeoptimizationLiteral(constant_object);
- translation->StoreLiteral(literal_id);
+ if (constant_object.is_identical_to(info()->closure())) {
+ translation->StoreJSFrameFunction();
+ } else {
+ int literal_id = DefineDeoptimizationLiteral(constant_object);
+ translation->StoreLiteral(literal_id);
+ }
} else {
CHECK(false);
}
@@ -522,60 +709,23 @@
last_lazy_deopt_pc_ = masm()->pc_offset();
}
-#if !V8_TURBOFAN_BACKEND
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
- UNIMPLEMENTED();
+int CodeGenerator::TailCallFrameStackSlotDelta(int stack_param_delta) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int spill_slots = frame()->GetSpillSlotCount();
+ bool has_frame = descriptor->IsJSFunctionCall() || spill_slots > 0;
+ // Leave the PC on the stack on platforms that have that as part of their ABI
+ int pc_slots = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
+ int sp_slot_delta =
+ has_frame ? (frame()->GetTotalFrameSlotCount() - pc_slots) : 0;
+ // Discard only slots that won't be used by new parameters.
+ sp_slot_delta += stack_param_delta;
+ return sp_slot_delta;
}
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
- BranchInfo* branch) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AssembleArchBoolean(Instruction* instr,
- FlagsCondition condition) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); }
-
-
-void CodeGenerator::AssembleReturn() { UNIMPLEMENTED(); }
-
-
-void CodeGenerator::AssembleMove(InstructionOperand* source,
- InstructionOperand* destination) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AssembleSwap(InstructionOperand* source,
- InstructionOperand* destination) {
- UNIMPLEMENTED();
-}
-
-
-void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
-
-#endif // !V8_TURBOFAN_BACKEND
-
-
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
- : masm_(gen->masm()), next_(gen->ools_) {
+ : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
gen->ools_ = this;
}
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index 747bad2..70bf81f 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -16,6 +16,7 @@
namespace compiler {
// Forward declarations.
+class FrameAccessState;
class Linkage;
class OutOfLineCode;
@@ -27,8 +28,22 @@
};
+class InstructionOperandIterator {
+ public:
+ InstructionOperandIterator(Instruction* instr, size_t pos)
+ : instr_(instr), pos_(pos) {}
+
+ Instruction* instruction() const { return instr_; }
+ InstructionOperand* Advance() { return instr_->InputAt(pos_++); }
+
+ private:
+ Instruction* instr_;
+ size_t pos_;
+};
+
+
// Generates native code for a sequence of instructions.
-class CodeGenerator FINAL : public GapResolver::Assembler {
+class CodeGenerator final : public GapResolver::Assembler {
public:
explicit CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info);
@@ -37,11 +52,12 @@
Handle<Code> GenerateCode();
InstructionSequence* code() const { return code_; }
- Frame* frame() const { return frame_; }
- Isolate* isolate() const { return zone()->isolate(); }
+ FrameAccessState* frame_access_state() const { return frame_access_state_; }
+ Frame* frame() const { return frame_access_state_->frame(); }
+ Isolate* isolate() const { return info_->isolate(); }
Linkage* linkage() const { return linkage_; }
- Label* GetLabel(BasicBlock::RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
+ Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
private:
MacroAssembler* masm() { return &masm_; }
@@ -52,27 +68,39 @@
// Checks if {block} will appear directly after {current_block_} when
// assembling code, in which case, a fall-through can be used.
- bool IsNextInAssemblyOrder(BasicBlock::RpoNumber block) const;
+ bool IsNextInAssemblyOrder(RpoNumber block) const;
// Record a safepoint with the given pointer map.
- void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+ void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
int arguments, Safepoint::DeoptMode deopt_mode);
+ // Check if a heap object can be materialized by loading from the frame, which
+ // is usually way cheaper than materializing the actual heap object constant.
+ bool IsMaterializableFromFrame(Handle<HeapObject> object, int* offset_return);
+ // Check if a heap object can be materialized by loading from a heap root,
+ // which is cheaper on some platforms than materializing the actual heap
+ // object constant.
+ bool IsMaterializableFromRoot(Handle<HeapObject> object,
+ Heap::RootListIndex* index_return);
+
// Assemble code for the specified instruction.
void AssembleInstruction(Instruction* instr);
- void AssembleSourcePosition(SourcePositionInstruction* instr);
- void AssembleGap(GapInstruction* gap);
+ void AssembleSourcePosition(Instruction* instr);
+ void AssembleGaps(Instruction* instr);
// ===========================================================================
// ============= Architecture-specific code generation methods. ==============
// ===========================================================================
void AssembleArchInstruction(Instruction* instr);
- void AssembleArchJump(BasicBlock::RpoNumber target);
+ void AssembleArchJump(RpoNumber target);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+ void AssembleArchLookupSwitch(Instruction* instr);
+ void AssembleArchTableSwitch(Instruction* instr);
- void AssembleDeoptimizerCall(int deoptimization_id);
+ void AssembleDeoptimizerCall(int deoptimization_id,
+ Deoptimizer::BailoutType bailout_type);
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
@@ -81,37 +109,70 @@
// to tear down a stack frame.
void AssembleReturn();
+ // Generates code to deconstruct a the caller's frame, including arguments.
+ void AssembleDeconstructActivationRecord(int stack_param_delta);
+
+ // Generates code to manipulate the stack in preparation for a tail call.
+ void AssemblePrepareTailCall(int stack_param_delta);
+
// ===========================================================================
// ============== Architecture-specific gap resolver methods. ================
// ===========================================================================
// Interface used by the gap resolver to emit moves and swaps.
void AssembleMove(InstructionOperand* source,
- InstructionOperand* destination) FINAL;
+ InstructionOperand* destination) final;
void AssembleSwap(InstructionOperand* source,
- InstructionOperand* destination) FINAL;
+ InstructionOperand* destination) final;
// ===========================================================================
- // Deoptimization table construction
- void AddSafepointAndDeopt(Instruction* instr);
+ // =================== Jump table construction methods. ======================
+ // ===========================================================================
+
+ class JumpTable;
+ // Adds a jump table that is emitted after the actual code. Returns label
+ // pointing to the beginning of the table. {targets} is assumed to be static
+ // or zone allocated.
+ Label* AddJumpTable(Label** targets, size_t target_count);
+ // Emits a jump table.
+ void AssembleJumpTable(Label** targets, size_t target_count);
+
+ // ===========================================================================
+ // ================== Deoptimization table construction. =====================
+ // ===========================================================================
+
+ void RecordCallPosition(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
- FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
- size_t frame_state_offset);
+ FrameStateDescriptor* GetFrameStateDescriptor(
+ Instruction* instr, size_t frame_access_state_offset);
int BuildTranslation(Instruction* instr, int pc_offset,
- size_t frame_state_offset,
+ size_t frame_access_state_offset,
OutputFrameStateCombine state_combine);
void BuildTranslationForFrameStateDescriptor(
- FrameStateDescriptor* descriptor, Instruction* instr,
- Translation* translation, size_t frame_state_offset,
- OutputFrameStateCombine state_combine);
+ FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
+ Translation* translation, OutputFrameStateCombine state_combine);
+ void TranslateStateValueDescriptor(StateValueDescriptor* desc,
+ Translation* translation,
+ InstructionOperandIterator* iter);
+ void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
+ InstructionOperandIterator* iter,
+ OutputFrameStateCombine combine,
+ Translation* translation);
void AddTranslationForOperand(Translation* translation, Instruction* instr,
InstructionOperand* op, MachineType type);
void AddNopForSmiCodeInlining();
void EnsureSpaceForLazyDeopt();
void MarkLazyDeoptSite();
+ // Converts the delta in the number of stack parameter passed from a tail
+ // caller to the callee into the distance (in pointers) the SP must be
+ // adjusted, taking frame elision and other relevant factors into
+ // consideration.
+ int TailCallFrameStackSlotDelta(int stack_param_delta);
+
// ===========================================================================
+
struct DeoptimizationState : ZoneObject {
public:
BailoutId bailout_id() const { return bailout_id_; }
@@ -129,23 +190,34 @@
int pc_offset_;
};
+ struct HandlerInfo {
+ bool caught_locally;
+ Label* handler;
+ int pc_offset;
+ };
+
friend class OutOfLineCode;
- Frame* const frame_;
+ FrameAccessState* frame_access_state_;
Linkage* const linkage_;
InstructionSequence* const code_;
CompilationInfo* const info_;
Label* const labels_;
- BasicBlock::RpoNumber current_block_;
+ Label return_label_;
+ RpoNumber current_block_;
SourcePosition current_source_position_;
MacroAssembler masm_;
GapResolver resolver_;
SafepointTableBuilder safepoints_;
+ ZoneVector<HandlerInfo> handlers_;
ZoneDeque<DeoptimizationState*> deoptimization_states_;
- ZoneDeque<Handle<Object> > deoptimization_literals_;
+ ZoneDeque<Handle<Object>> deoptimization_literals_;
+ size_t inlined_function_count_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
+ JumpTable* jump_tables_;
OutOfLineCode* ools_;
+ int osr_pc_offset_;
};
} // namespace compiler
diff --git a/src/compiler/code-stub-assembler.cc b/src/compiler/code-stub-assembler.cc
new file mode 100644
index 0000000..b2a05b6
--- /dev/null
+++ b/src/compiler/code-stub-assembler.cc
@@ -0,0 +1,176 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-stub-assembler.h"
+
+#include <ostream>
+
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/frames.h"
+#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/machine-type.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor,
+ Code::Kind kind, const char* name)
+ : raw_assembler_(new RawMachineAssembler(
+ isolate, new (zone) Graph(zone),
+ Linkage::GetStubCallDescriptor(isolate, zone, descriptor, 0,
+ CallDescriptor::kNoFlags))),
+ kind_(kind),
+ name_(name),
+ code_generated_(false) {}
+
+
+CodeStubAssembler::~CodeStubAssembler() {}
+
+
+Handle<Code> CodeStubAssembler::GenerateCode() {
+ DCHECK(!code_generated_);
+
+ Schedule* schedule = raw_assembler_->Export();
+ Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
+ isolate(), raw_assembler_->call_descriptor(), graph(), schedule, kind_,
+ name_);
+
+ code_generated_ = true;
+ return code;
+}
+
+
+Node* CodeStubAssembler::Int32Constant(int value) {
+ return raw_assembler_->Int32Constant(value);
+}
+
+
+Node* CodeStubAssembler::IntPtrConstant(intptr_t value) {
+ return raw_assembler_->IntPtrConstant(value);
+}
+
+
+Node* CodeStubAssembler::NumberConstant(double value) {
+ return raw_assembler_->NumberConstant(value);
+}
+
+
+Node* CodeStubAssembler::HeapConstant(Handle<HeapObject> object) {
+ return raw_assembler_->HeapConstant(object);
+}
+
+
+Node* CodeStubAssembler::BooleanConstant(bool value) {
+ return raw_assembler_->BooleanConstant(value);
+}
+
+
+Node* CodeStubAssembler::Parameter(int value) {
+ return raw_assembler_->Parameter(value);
+}
+
+
+void CodeStubAssembler::Return(Node* value) {
+ return raw_assembler_->Return(value);
+}
+
+
+Node* CodeStubAssembler::SmiShiftBitsConstant() {
+ return Int32Constant(kSmiShiftSize + kSmiTagSize);
+}
+
+
+Node* CodeStubAssembler::SmiTag(Node* value) {
+ return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
+}
+
+
+Node* CodeStubAssembler::SmiUntag(Node* value) {
+ return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
+}
+
+
+Node* CodeStubAssembler::IntPtrAdd(Node* a, Node* b) {
+ return raw_assembler_->IntPtrAdd(a, b);
+}
+
+
+Node* CodeStubAssembler::IntPtrSub(Node* a, Node* b) {
+ return raw_assembler_->IntPtrSub(a, b);
+}
+
+
+Node* CodeStubAssembler::WordShl(Node* value, int shift) {
+ return raw_assembler_->WordShl(value, Int32Constant(shift));
+}
+
+
+Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
+ return raw_assembler_->Load(MachineType::AnyTagged(), object,
+ IntPtrConstant(offset - kHeapObjectTag));
+}
+
+
+Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
+ Node** args) {
+ return raw_assembler_->CallN(descriptor, code_target, args);
+}
+
+
+Node* CodeStubAssembler::TailCallN(CallDescriptor* descriptor,
+ Node* code_target, Node** args) {
+ return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1) {
+ return raw_assembler_->CallRuntime1(function_id, arg1, context);
+}
+
+
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2) {
+ return raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+}
+
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1) {
+ return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
+}
+
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1,
+ Node* arg2) {
+ return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
+}
+
+
+// RawMachineAssembler delegate helpers:
+Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
+
+
+Graph* CodeStubAssembler::graph() { return raw_assembler_->graph(); }
+
+
+Zone* CodeStubAssembler::zone() { return raw_assembler_->zone(); }
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/code-stub-assembler.h b/src/compiler/code-stub-assembler.h
new file mode 100644
index 0000000..3c4ae05
--- /dev/null
+++ b/src/compiler/code-stub-assembler.h
@@ -0,0 +1,96 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_
+#define V8_COMPILER_CODE_STUB_ASSEMBLER_H_
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/allocation.h"
+#include "src/builtins.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+class CallInterfaceDescriptor;
+class Isolate;
+class Zone;
+
+namespace compiler {
+
+class CallDescriptor;
+class Graph;
+class Node;
+class Operator;
+class RawMachineAssembler;
+class Schedule;
+
+class CodeStubAssembler {
+ public:
+ CodeStubAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor, Code::Kind kind,
+ const char* name);
+ virtual ~CodeStubAssembler();
+
+ Handle<Code> GenerateCode();
+
+ // Constants.
+ Node* Int32Constant(int value);
+ Node* IntPtrConstant(intptr_t value);
+ Node* NumberConstant(double value);
+ Node* HeapConstant(Handle<HeapObject> object);
+ Node* BooleanConstant(bool value);
+
+ Node* Parameter(int value);
+ void Return(Node* value);
+
+ // Tag and untag Smi values.
+ Node* SmiTag(Node* value);
+ Node* SmiUntag(Node* value);
+
+ // Basic arithmetic operations.
+ Node* IntPtrAdd(Node* a, Node* b);
+ Node* IntPtrSub(Node* a, Node* b);
+ Node* WordShl(Node* value, int shift);
+
+ // Load a field from an object on the heap.
+ Node* LoadObjectField(Node* object, int offset);
+
+ // Call runtime function.
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2);
+
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2);
+
+ private:
+ friend class CodeStubAssemblerTester;
+
+ Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+ Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+
+ Node* SmiShiftBitsConstant();
+
+ // Private helpers which delegate to RawMachineAssembler.
+ Graph* graph();
+ Isolate* isolate();
+ Zone* zone();
+
+ base::SmartPointer<RawMachineAssembler> raw_assembler_;
+ Code::Kind kind_;
+ const char* name_;
+ bool code_generated_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_
diff --git a/src/compiler/common-node-cache.cc b/src/compiler/common-node-cache.cc
index ee1fa0f..a0ae6e8 100644
--- a/src/compiler/common-node-cache.cc
+++ b/src/compiler/common-node-cache.cc
@@ -5,6 +5,7 @@
#include "src/compiler/common-node-cache.h"
#include "src/assembler.h"
+#include "src/compiler/node.h"
namespace v8 {
namespace internal {
@@ -15,6 +16,11 @@
}
+Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
+ return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.location()));
+}
+
+
void CommonNodeCache::GetCachedNodes(ZoneVector<Node*>* nodes) {
int32_constants_.GetCachedNodes(nodes);
int64_constants_.GetCachedNodes(nodes);
@@ -22,6 +28,7 @@
float64_constants_.GetCachedNodes(nodes);
external_constants_.GetCachedNodes(nodes);
number_constants_.GetCachedNodes(nodes);
+ heap_constants_.GetCachedNodes(nodes);
}
} // namespace compiler
diff --git a/src/compiler/common-node-cache.h b/src/compiler/common-node-cache.h
index 7ec70ae..720bc15 100644
--- a/src/compiler/common-node-cache.h
+++ b/src/compiler/common-node-cache.h
@@ -12,12 +12,15 @@
// Forward declarations.
class ExternalReference;
+class HeapObject;
+template <typename>
+class Handle;
namespace compiler {
// Bundles various caches for common nodes.
-class CommonNodeCache FINAL {
+class CommonNodeCache final {
public:
explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
~CommonNodeCache() {}
@@ -47,6 +50,8 @@
return number_constants_.Find(zone(), bit_cast<int64_t>(value));
}
+ Node** FindHeapConstant(Handle<HeapObject> value);
+
// Return all nodes from the cache.
void GetCachedNodes(ZoneVector<Node*>* nodes);
@@ -59,7 +64,8 @@
Int64NodeCache float64_constants_;
IntPtrNodeCache external_constants_;
Int64NodeCache number_constants_;
- Zone* zone_;
+ IntPtrNodeCache heap_constants_;
+ Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
};
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index cf597ea..2334541 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -4,29 +4,354 @@
#include "src/compiler/common-operator-reducer.h"
+#include <algorithm>
+
#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
+enum class Decision { kUnknown, kTrue, kFalse };
+
+Decision DecideCondition(Node* const cond) {
+ switch (cond->opcode()) {
+ case IrOpcode::kInt32Constant: {
+ Int32Matcher mcond(cond);
+ return mcond.Value() ? Decision::kTrue : Decision::kFalse;
+ }
+ case IrOpcode::kInt64Constant: {
+ Int64Matcher mcond(cond);
+ return mcond.Value() ? Decision::kTrue : Decision::kFalse;
+ }
+ case IrOpcode::kHeapConstant: {
+ HeapObjectMatcher mcond(cond);
+ return mcond.Value()->BooleanValue() ? Decision::kTrue : Decision::kFalse;
+ }
+ default:
+ return Decision::kUnknown;
+ }
+}
+
+} // namespace
+
+
+CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common,
+ MachineOperatorBuilder* machine)
+ : AdvancedReducer(editor),
+ graph_(graph),
+ common_(common),
+ machine_(machine),
+ dead_(graph->NewNode(common->Dead())) {}
+
+
Reduction CommonOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kBranch:
+ return ReduceBranch(node);
+ case IrOpcode::kMerge:
+ return ReduceMerge(node);
case IrOpcode::kEffectPhi:
- case IrOpcode::kPhi: {
- int const input_count = node->InputCount();
- if (input_count > 1) {
- Node* const replacement = node->InputAt(0);
- for (int i = 1; i < input_count - 1; ++i) {
- if (node->InputAt(i) != replacement) return NoChange();
+ return ReduceEffectPhi(node);
+ case IrOpcode::kPhi:
+ return ReducePhi(node);
+ case IrOpcode::kReturn:
+ return ReduceReturn(node);
+ case IrOpcode::kSelect:
+ return ReduceSelect(node);
+ case IrOpcode::kGuard:
+ return ReduceGuard(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ Node* const cond = node->InputAt(0);
+ // Swap IfTrue/IfFalse on {branch} if {cond} is a BooleanNot and use the input
+ // to BooleanNot as new condition for {branch}. Note we assume that {cond} was
+ // already properly optimized before we get here (as guaranteed by the graph
+ // reduction logic).
+ if (cond->opcode() == IrOpcode::kBooleanNot) {
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ NodeProperties::ChangeOp(use, common()->IfFalse());
+ break;
+ case IrOpcode::kIfFalse:
+ NodeProperties::ChangeOp(use, common()->IfTrue());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ // Update the condition of {branch}. No need to mark the uses for revisit,
+ // since we tell the graph reducer that the {branch} was changed and the
+ // graph reduction logic will ensure that the uses are revisited properly.
+ node->ReplaceInput(0, cond->InputAt(0));
+ // Negate the hint for {branch}.
+ NodeProperties::ChangeOp(
+ node, common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
+ return Changed(node);
+ }
+ Decision const decision = DecideCondition(cond);
+ if (decision == Decision::kUnknown) return NoChange();
+ Node* const control = node->InputAt(1);
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ Replace(use, (decision == Decision::kTrue) ? control : dead());
+ break;
+ case IrOpcode::kIfFalse:
+ Replace(use, (decision == Decision::kFalse) ? control : dead());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return Replace(dead());
+}
+
+
+Reduction CommonOperatorReducer::ReduceMerge(Node* node) {
+ DCHECK_EQ(IrOpcode::kMerge, node->opcode());
+ //
+ // Check if this is a merge that belongs to an unused diamond, which means
+ // that:
+ //
+ // a) the {Merge} has no {Phi} or {EffectPhi} uses, and
+ // b) the {Merge} has two inputs, one {IfTrue} and one {IfFalse}, which are
+ // both owned by the Merge, and
+ // c) and the {IfTrue} and {IfFalse} nodes point to the same {Branch}.
+ //
+ if (node->InputCount() == 2) {
+ for (Node* const use : node->uses()) {
+ if (IrOpcode::IsPhiOpcode(use->opcode())) return NoChange();
+ }
+ Node* if_true = node->InputAt(0);
+ Node* if_false = node->InputAt(1);
+ if (if_true->opcode() != IrOpcode::kIfTrue) std::swap(if_true, if_false);
+ if (if_true->opcode() == IrOpcode::kIfTrue &&
+ if_false->opcode() == IrOpcode::kIfFalse &&
+ if_true->InputAt(0) == if_false->InputAt(0) && if_true->OwnedBy(node) &&
+ if_false->OwnedBy(node)) {
+ Node* const branch = if_true->InputAt(0);
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+ DCHECK(branch->OwnedBy(if_true, if_false));
+ Node* const control = branch->InputAt(1);
+ // Mark the {branch} as {Dead}.
+ branch->TrimInputCount(0);
+ NodeProperties::ChangeOp(branch, common()->Dead());
+ return Replace(control);
+ }
+ }
+ return NoChange();
+}
+
+
+Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
+ DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
+ int const input_count = node->InputCount() - 1;
+ DCHECK_LE(1, input_count);
+ Node* const merge = node->InputAt(input_count);
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ DCHECK_EQ(input_count, merge->InputCount());
+ Node* const effect = node->InputAt(0);
+ DCHECK_NE(node, effect);
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = node->InputAt(i);
+ if (input == node) {
+ // Ignore redundant inputs.
+ DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
+ continue;
+ }
+ if (input != effect) return NoChange();
+ }
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Replace(effect);
+}
+
+
+Reduction CommonOperatorReducer::ReducePhi(Node* node) {
+ DCHECK_EQ(IrOpcode::kPhi, node->opcode());
+ int const input_count = node->InputCount() - 1;
+ DCHECK_LE(1, input_count);
+ Node* const merge = node->InputAt(input_count);
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ DCHECK_EQ(input_count, merge->InputCount());
+ if (input_count == 2) {
+ Node* vtrue = node->InputAt(0);
+ Node* vfalse = node->InputAt(1);
+ Node* if_true = merge->InputAt(0);
+ Node* if_false = merge->InputAt(1);
+ if (if_true->opcode() != IrOpcode::kIfTrue) {
+ std::swap(if_true, if_false);
+ std::swap(vtrue, vfalse);
+ }
+ if (if_true->opcode() == IrOpcode::kIfTrue &&
+ if_false->opcode() == IrOpcode::kIfFalse &&
+ if_true->InputAt(0) == if_false->InputAt(0)) {
+ Node* const branch = if_true->InputAt(0);
+ // Check that the branch is not dead already.
+ if (branch->opcode() != IrOpcode::kBranch) return NoChange();
+ Node* const cond = branch->InputAt(0);
+ if (cond->opcode() == IrOpcode::kFloat32LessThan) {
+ Float32BinopMatcher mcond(cond);
+ if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
+ vfalse->opcode() == IrOpcode::kFloat32Sub) {
+ Float32BinopMatcher mvfalse(vfalse);
+ if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Change(node, machine()->Float32Abs(), vtrue);
+ }
}
- return Replace(replacement);
+ if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
+ machine()->Float32Min().IsSupported()) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Change(node, machine()->Float32Min().op(), vtrue, vfalse);
+ } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
+ machine()->Float32Max().IsSupported()) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Change(node, machine()->Float32Max().op(), vtrue, vfalse);
+ }
+ } else if (cond->opcode() == IrOpcode::kFloat64LessThan) {
+ Float64BinopMatcher mcond(cond);
+ if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
+ vfalse->opcode() == IrOpcode::kFloat64Sub) {
+ Float64BinopMatcher mvfalse(vfalse);
+ if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Change(node, machine()->Float64Abs(), vtrue);
+ }
+ }
+ if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
+ machine()->Float64Min().IsSupported()) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Change(node, machine()->Float64Min().op(), vtrue, vfalse);
+ } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
+ machine()->Float64Max().IsSupported()) {
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Change(node, machine()->Float64Max().op(), vtrue, vfalse);
+ }
+ }
+ }
+ }
+ Node* const value = node->InputAt(0);
+ DCHECK_NE(node, value);
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = node->InputAt(i);
+ if (input == node) {
+ // Ignore redundant inputs.
+ DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
+ continue;
+ }
+ if (input != value) return NoChange();
+ }
+ // We might now be able to further reduce the {merge} node.
+ Revisit(merge);
+ return Replace(value);
+}
+
+
+Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
+ DCHECK_EQ(IrOpcode::kReturn, node->opcode());
+ Node* const value = node->InputAt(0);
+ Node* const effect = node->InputAt(1);
+ Node* const control = node->InputAt(2);
+ if (value->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(value) == control &&
+ effect->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(effect) == control &&
+ control->opcode() == IrOpcode::kMerge) {
+ int const control_input_count = control->InputCount();
+ DCHECK_NE(0, control_input_count);
+ DCHECK_EQ(control_input_count, value->InputCount() - 1);
+ DCHECK_EQ(control_input_count, effect->InputCount() - 1);
+ DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
+ DCHECK_NE(0, graph()->end()->InputCount());
+ for (int i = 0; i < control_input_count; ++i) {
+ // Create a new {Return} and connect it to {end}. We don't need to mark
+ // {end} as revisit, because we mark {node} as {Dead} below, which was
+ // previously connected to {end}, so we know for sure that at some point
+ // the reducer logic will visit {end} again.
+ Node* ret = graph()->NewNode(common()->Return(), value->InputAt(i),
+ effect->InputAt(i), control->InputAt(i));
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+ }
+ // Mark the merge {control} and return {node} as {dead}.
+ Replace(control, dead());
+ return Replace(dead());
+ }
+ return NoChange();
+}
+
+
+Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
+ DCHECK_EQ(IrOpcode::kSelect, node->opcode());
+ Node* const cond = node->InputAt(0);
+ Node* const vtrue = node->InputAt(1);
+ Node* const vfalse = node->InputAt(2);
+ if (vtrue == vfalse) return Replace(vtrue);
+ switch (DecideCondition(cond)) {
+ case Decision::kTrue:
+ return Replace(vtrue);
+ case Decision::kFalse:
+ return Replace(vfalse);
+ case Decision::kUnknown:
+ break;
+ }
+ switch (cond->opcode()) {
+ case IrOpcode::kFloat32LessThan: {
+ Float32BinopMatcher mcond(cond);
+ if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
+ vfalse->opcode() == IrOpcode::kFloat32Sub) {
+ Float32BinopMatcher mvfalse(vfalse);
+ if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
+ return Change(node, machine()->Float32Abs(), vtrue);
+ }
+ }
+ if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
+ machine()->Float32Min().IsSupported()) {
+ return Change(node, machine()->Float32Min().op(), vtrue, vfalse);
+ } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
+ machine()->Float32Max().IsSupported()) {
+ return Change(node, machine()->Float32Max().op(), vtrue, vfalse);
}
break;
}
- case IrOpcode::kSelect: {
- if (node->InputAt(1) == node->InputAt(2)) {
- return Replace(node->InputAt(1));
+ case IrOpcode::kFloat64LessThan: {
+ Float64BinopMatcher mcond(cond);
+ if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
+ vfalse->opcode() == IrOpcode::kFloat64Sub) {
+ Float64BinopMatcher mvfalse(vfalse);
+ if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
+ return Change(node, machine()->Float64Abs(), vtrue);
+ }
+ }
+ if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
+ machine()->Float64Min().IsSupported()) {
+ return Change(node, machine()->Float64Min().op(), vtrue, vfalse);
+ } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
+ machine()->Float64Max().IsSupported()) {
+ return Change(node, machine()->Float64Max().op(), vtrue, vfalse);
}
break;
}
@@ -36,6 +361,35 @@
return NoChange();
}
+
+Reduction CommonOperatorReducer::ReduceGuard(Node* node) {
+ DCHECK_EQ(IrOpcode::kGuard, node->opcode());
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetTypeOrAny(input);
+ Type* const guard_type = OpParameter<Type*>(node);
+ if (input_type->Is(guard_type)) return Replace(input);
+ return NoChange();
+}
+
+
+Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
+ Node* a) {
+ node->ReplaceInput(0, a);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
+
+
+Reduction CommonOperatorReducer::Change(Node* node, Operator const* op, Node* a,
+ Node* b) {
+ node->ReplaceInput(0, a);
+ node->ReplaceInput(1, b);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/common-operator-reducer.h b/src/compiler/common-operator-reducer.h
index 10543db..7184755 100644
--- a/src/compiler/common-operator-reducer.h
+++ b/src/compiler/common-operator-reducer.h
@@ -11,13 +11,44 @@
namespace internal {
namespace compiler {
-// Performs strength reduction on nodes that have common operators.
-class CommonOperatorReducer FINAL : public Reducer {
- public:
- CommonOperatorReducer() {}
- ~CommonOperatorReducer() FINAL {}
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+class MachineOperatorBuilder;
+class Operator;
- Reduction Reduce(Node* node) FINAL;
+
+// Performs strength reduction on nodes that have common operators.
+class CommonOperatorReducer final : public AdvancedReducer {
+ public:
+ CommonOperatorReducer(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common,
+ MachineOperatorBuilder* machine);
+ ~CommonOperatorReducer() final {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceBranch(Node* node);
+ Reduction ReduceMerge(Node* node);
+ Reduction ReduceEffectPhi(Node* node);
+ Reduction ReducePhi(Node* node);
+ Reduction ReduceReturn(Node* node);
+ Reduction ReduceSelect(Node* node);
+ Reduction ReduceGuard(Node* node);
+
+ Reduction Change(Node* node, Operator const* op, Node* a);
+ Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
+
+ Graph* graph() const { return graph_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ MachineOperatorBuilder* machine() const { return machine_; }
+ Node* dead() const { return dead_; }
+
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ MachineOperatorBuilder* const machine_;
+ Node* const dead_;
};
} // namespace compiler
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index a6cca45..be77309 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -9,7 +9,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/unique.h"
+#include "src/handles-inl.h"
#include "src/zone.h"
namespace v8 {
@@ -36,8 +36,45 @@
}
+size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
+
+
+std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ return os << "Eager";
+ case DeoptimizeKind::kSoft:
+ return os << "Soft";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+DeoptimizeKind DeoptimizeKindOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
+ return OpParameter<DeoptimizeKind>(op);
+}
+
+
+size_t hash_value(IfExceptionHint hint) { return static_cast<size_t>(hint); }
+
+
+std::ostream& operator<<(std::ostream& os, IfExceptionHint hint) {
+ switch (hint) {
+ case IfExceptionHint::kLocallyCaught:
+ return os << "Caught";
+ case IfExceptionHint::kLocallyUncaught:
+ return os << "Uncaught";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
- return lhs.type() == rhs.type() && lhs.hint() == rhs.hint();
+ return lhs.representation() == rhs.representation() &&
+ lhs.hint() == rhs.hint();
}
@@ -47,12 +84,12 @@
size_t hash_value(SelectParameters const& p) {
- return base::hash_combine(p.type(), p.hint());
+ return base::hash_combine(p.representation(), p.hint());
}
std::ostream& operator<<(std::ostream& os, SelectParameters const& p) {
- return os << p.type() << "|" << p.hint();
+ return os << p.representation() << "|" << p.hint();
}
@@ -62,54 +99,88 @@
}
-size_t hash_value(OutputFrameStateCombine const& sc) {
- return base::hash_combine(sc.kind_, sc.parameter_);
+size_t ProjectionIndexOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kProjection, op->opcode());
+ return OpParameter<size_t>(op);
}
-std::ostream& operator<<(std::ostream& os, OutputFrameStateCombine const& sc) {
- switch (sc.kind_) {
- case OutputFrameStateCombine::kPushOutput:
- if (sc.parameter_ == 0) return os << "Ignore";
- return os << "Push(" << sc.parameter_ << ")";
- case OutputFrameStateCombine::kPokeAt:
- return os << "PokeAt(" << sc.parameter_ << ")";
- }
- UNREACHABLE();
- return os;
+MachineRepresentation PhiRepresentationOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kPhi, op->opcode());
+ return OpParameter<MachineRepresentation>(op);
}
-bool operator==(FrameStateCallInfo const& lhs, FrameStateCallInfo const& rhs) {
- return lhs.type() == rhs.type() && lhs.bailout_id() == rhs.bailout_id() &&
- lhs.state_combine() == rhs.state_combine();
+int ParameterIndexOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kParameter, op->opcode());
+ return OpParameter<ParameterInfo>(op).index();
}
-bool operator!=(FrameStateCallInfo const& lhs, FrameStateCallInfo const& rhs) {
+const ParameterInfo& ParameterInfoOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kParameter, op->opcode());
+ return OpParameter<ParameterInfo>(op);
+}
+
+
+bool operator==(ParameterInfo const& lhs, ParameterInfo const& rhs) {
+ return lhs.index() == rhs.index();
+}
+
+
+bool operator!=(ParameterInfo const& lhs, ParameterInfo const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(FrameStateCallInfo const& info) {
- return base::hash_combine(info.type(), info.bailout_id(),
- info.state_combine());
+size_t hash_value(ParameterInfo const& p) { return p.index(); }
+
+
+std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
+ if (i.debug_name()) os << i.debug_name() << '#';
+ os << i.index();
+ return os;
}
-std::ostream& operator<<(std::ostream& os, FrameStateCallInfo const& info) {
- return os << info.type() << ", " << info.bailout_id() << ", "
- << info.state_combine();
-}
+#define CACHED_OP_LIST(V) \
+ V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
+ V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1) \
+ V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
+ V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
+ V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
+ V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0) \
+ V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
-#define CACHED_OP_LIST(V) \
- V(Dead, Operator::kFoldable, 0, 0, 0, 1) \
- V(End, Operator::kFoldable, 0, 0, 1, 0) \
- V(IfTrue, Operator::kFoldable, 0, 0, 1, 1) \
- V(IfFalse, Operator::kFoldable, 0, 0, 1, 1) \
- V(Throw, Operator::kFoldable, 1, 1, 1, 1) \
- V(Return, Operator::kNoProperties, 1, 1, 1, 1)
+#define CACHED_RETURN_LIST(V) \
+ V(1) \
+ V(2) \
+ V(3)
+
+
+#define CACHED_END_LIST(V) \
+ V(1) \
+ V(2) \
+ V(3) \
+ V(4) \
+ V(5) \
+ V(6) \
+ V(7) \
+ V(8)
+
+
+#define CACHED_EFFECT_PHI_LIST(V) \
+ V(1) \
+ V(2) \
+ V(3) \
+ V(4) \
+ V(5) \
+ V(6)
#define CACHED_LOOP_LIST(V) \
@@ -138,39 +209,138 @@
V(6)
-struct CommonOperatorGlobalCache FINAL {
-#define CACHED(Name, properties, value_input_count, effect_input_count, \
- control_input_count, control_output_count) \
- struct Name##Operator FINAL : public Operator { \
- Name##Operator() \
- : Operator(IrOpcode::k##Name, properties, #Name, value_input_count, \
- effect_input_count, control_input_count, 0, 0, \
- control_output_count) {} \
- }; \
+#define CACHED_PHI_LIST(V) \
+ V(kTagged, 1) \
+ V(kTagged, 2) \
+ V(kTagged, 3) \
+ V(kTagged, 4) \
+ V(kTagged, 5) \
+ V(kTagged, 6) \
+ V(kBit, 2) \
+ V(kFloat64, 2) \
+ V(kWord32, 2)
+
+
+#define CACHED_PROJECTION_LIST(V) \
+ V(0) \
+ V(1)
+
+
+#define CACHED_STATE_VALUES_LIST(V) \
+ V(0) \
+ V(1) \
+ V(2) \
+ V(3) \
+ V(4) \
+ V(5) \
+ V(6) \
+ V(7) \
+ V(8) \
+ V(10) \
+ V(11) \
+ V(12) \
+ V(13) \
+ V(14)
+
+
+struct CommonOperatorGlobalCache final {
+#define CACHED(Name, properties, value_input_count, effect_input_count, \
+ control_input_count, value_output_count, effect_output_count, \
+ control_output_count) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, properties, #Name, value_input_count, \
+ effect_input_count, control_input_count, \
+ value_output_count, effect_output_count, \
+ control_output_count) {} \
+ }; \
Name##Operator k##Name##Operator;
CACHED_OP_LIST(CACHED)
#undef CACHED
+ template <DeoptimizeKind kKind>
+ struct DeoptimizeOperator final : public Operator1<DeoptimizeKind> {
+ DeoptimizeOperator()
+ : Operator1<DeoptimizeKind>( // --
+ IrOpcode::kDeoptimize, Operator::kNoThrow, // opcode
+ "Deoptimize", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ kKind) {} // parameter
+ };
+ DeoptimizeOperator<DeoptimizeKind::kEager> kDeoptimizeEagerOperator;
+ DeoptimizeOperator<DeoptimizeKind::kSoft> kDeoptimizeSoftOperator;
+
+ template <IfExceptionHint kCaughtLocally>
+ struct IfExceptionOperator final : public Operator1<IfExceptionHint> {
+ IfExceptionOperator()
+ : Operator1<IfExceptionHint>( // --
+ IrOpcode::kIfException, Operator::kKontrol, // opcode
+ "IfException", // name
+ 0, 1, 1, 1, 1, 1, // counts
+ kCaughtLocally) {} // parameter
+ };
+ IfExceptionOperator<IfExceptionHint::kLocallyCaught> kIfExceptionCOperator;
+ IfExceptionOperator<IfExceptionHint::kLocallyUncaught> kIfExceptionUOperator;
+
+ template <size_t kInputCount>
+ struct EndOperator final : public Operator {
+ EndOperator()
+ : Operator( // --
+ IrOpcode::kEnd, Operator::kKontrol, // opcode
+ "End", // name
+ 0, 0, kInputCount, 0, 0, 0) {} // counts
+ };
+#define CACHED_END(input_count) \
+ EndOperator<input_count> kEnd##input_count##Operator;
+ CACHED_END_LIST(CACHED_END)
+#undef CACHED_END
+
+ template <size_t kInputCount>
+ struct ReturnOperator final : public Operator {
+ ReturnOperator()
+ : Operator( // --
+ IrOpcode::kReturn, Operator::kNoThrow, // opcode
+ "Return", // name
+ kInputCount, 1, 1, 0, 0, 1) {} // counts
+ };
+#define CACHED_RETURN(input_count) \
+ ReturnOperator<input_count> kReturn##input_count##Operator;
+ CACHED_RETURN_LIST(CACHED_RETURN)
+#undef CACHED_RETURN
+
template <BranchHint kBranchHint>
- struct BranchOperator FINAL : public Operator1<BranchHint> {
+ struct BranchOperator final : public Operator1<BranchHint> {
BranchOperator()
- : Operator1<BranchHint>( // --
- IrOpcode::kBranch, Operator::kFoldable, // opcode
- "Branch", // name
- 1, 0, 1, 0, 0, 2, // counts
- kBranchHint) {} // parameter
+ : Operator1<BranchHint>( // --
+ IrOpcode::kBranch, Operator::kKontrol, // opcode
+ "Branch", // name
+ 1, 0, 1, 0, 0, 2, // counts
+ kBranchHint) {} // parameter
};
BranchOperator<BranchHint::kNone> kBranchNoneOperator;
BranchOperator<BranchHint::kTrue> kBranchTrueOperator;
BranchOperator<BranchHint::kFalse> kBranchFalseOperator;
+ template <int kEffectInputCount>
+ struct EffectPhiOperator final : public Operator {
+ EffectPhiOperator()
+ : Operator( // --
+ IrOpcode::kEffectPhi, Operator::kPure, // opcode
+ "EffectPhi", // name
+ 0, kEffectInputCount, 1, 0, 1, 0) {} // counts
+ };
+#define CACHED_EFFECT_PHI(input_count) \
+ EffectPhiOperator<input_count> kEffectPhi##input_count##Operator;
+ CACHED_EFFECT_PHI_LIST(CACHED_EFFECT_PHI)
+#undef CACHED_EFFECT_PHI
+
template <size_t kInputCount>
- struct LoopOperator FINAL : public Operator {
+ struct LoopOperator final : public Operator {
LoopOperator()
- : Operator( // --
- IrOpcode::kLoop, Operator::kFoldable, // opcode
- "Loop", // name
- 0, 0, kInputCount, 0, 0, 1) {} // counts
+ : Operator( // --
+ IrOpcode::kLoop, Operator::kKontrol, // opcode
+ "Loop", // name
+ 0, 0, kInputCount, 0, 0, 1) {} // counts
};
#define CACHED_LOOP(input_count) \
LoopOperator<input_count> kLoop##input_count##Operator;
@@ -178,31 +348,75 @@
#undef CACHED_LOOP
template <size_t kInputCount>
- struct MergeOperator FINAL : public Operator {
+ struct MergeOperator final : public Operator {
MergeOperator()
- : Operator( // --
- IrOpcode::kMerge, Operator::kFoldable, // opcode
- "Merge", // name
- 0, 0, kInputCount, 0, 0, 1) {} // counts
+ : Operator( // --
+ IrOpcode::kMerge, Operator::kKontrol, // opcode
+ "Merge", // name
+ 0, 0, kInputCount, 0, 0, 1) {} // counts
};
#define CACHED_MERGE(input_count) \
MergeOperator<input_count> kMerge##input_count##Operator;
CACHED_MERGE_LIST(CACHED_MERGE)
#undef CACHED_MERGE
+ template <MachineRepresentation kRep, int kInputCount>
+ struct PhiOperator final : public Operator1<MachineRepresentation> {
+ PhiOperator()
+ : Operator1<MachineRepresentation>( //--
+ IrOpcode::kPhi, Operator::kPure, // opcode
+ "Phi", // name
+ kInputCount, 0, 1, 1, 0, 0, // counts
+ kRep) {} // parameter
+ };
+#define CACHED_PHI(rep, input_count) \
+ PhiOperator<MachineRepresentation::rep, input_count> \
+ kPhi##rep##input_count##Operator;
+ CACHED_PHI_LIST(CACHED_PHI)
+#undef CACHED_PHI
+
template <int kIndex>
- struct ParameterOperator FINAL : public Operator1<int> {
+ struct ParameterOperator final : public Operator1<ParameterInfo> {
ParameterOperator()
- : Operator1<int>( // --
+ : Operator1<ParameterInfo>( // --
IrOpcode::kParameter, Operator::kPure, // opcode
"Parameter", // name
1, 0, 0, 1, 0, 0, // counts,
- kIndex) {} // parameter
+ ParameterInfo(kIndex, nullptr)) {} // parameter and name
};
#define CACHED_PARAMETER(index) \
ParameterOperator<index> kParameter##index##Operator;
CACHED_PARAMETER_LIST(CACHED_PARAMETER)
#undef CACHED_PARAMETER
+
+ template <size_t kIndex>
+ struct ProjectionOperator final : public Operator1<size_t> {
+ ProjectionOperator()
+ : Operator1<size_t>( // --
+ IrOpcode::kProjection, // opcode
+ Operator::kPure, // flags
+ "Projection", // name
+ 1, 0, 0, 1, 0, 0, // counts,
+ kIndex) {} // parameter
+ };
+#define CACHED_PROJECTION(index) \
+ ProjectionOperator<index> kProjection##index##Operator;
+ CACHED_PROJECTION_LIST(CACHED_PROJECTION)
+#undef CACHED_PROJECTION
+
+ template <int kInputCount>
+ struct StateValuesOperator final : public Operator {
+ StateValuesOperator()
+ : Operator( // --
+ IrOpcode::kStateValues, // opcode
+ Operator::kPure, // flags
+ "StateValues", // name
+ kInputCount, 0, 0, 1, 0, 0) {} // counts
+ };
+#define CACHED_STATE_VALUES(input_count) \
+ StateValuesOperator<input_count> kStateValues##input_count##Operator;
+ CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
+#undef CACHED_STATE_VALUES
};
@@ -214,15 +428,52 @@
: cache_(kCache.Get()), zone_(zone) {}
-#define CACHED(Name, properties, value_input_count, effect_input_count, \
- control_input_count, control_output_count) \
- const Operator* CommonOperatorBuilder::Name() { \
- return &cache_.k##Name##Operator; \
+#define CACHED(Name, properties, value_input_count, effect_input_count, \
+ control_input_count, value_output_count, effect_output_count, \
+ control_output_count) \
+ const Operator* CommonOperatorBuilder::Name() { \
+ return &cache_.k##Name##Operator; \
}
CACHED_OP_LIST(CACHED)
#undef CACHED
+const Operator* CommonOperatorBuilder::End(size_t control_input_count) {
+ switch (control_input_count) {
+#define CACHED_END(input_count) \
+ case input_count: \
+ return &cache_.kEnd##input_count##Operator;
+ CACHED_END_LIST(CACHED_END)
+#undef CACHED_END
+ default:
+ break;
+ }
+ // Uncached.
+ return new (zone()) Operator( //--
+ IrOpcode::kEnd, Operator::kKontrol, // opcode
+ "End", // name
+ 0, 0, control_input_count, 0, 0, 0); // counts
+}
+
+
+const Operator* CommonOperatorBuilder::Return(int value_input_count) {
+ switch (value_input_count) {
+#define CACHED_RETURN(input_count) \
+ case input_count: \
+ return &cache_.kReturn##input_count##Operator;
+ CACHED_RETURN_LIST(CACHED_RETURN)
+#undef CACHED_RETURN
+ default:
+ break;
+ }
+ // Uncached.
+ return new (zone()) Operator( //--
+ IrOpcode::kReturn, Operator::kNoThrow, // opcode
+ "Return", // name
+ value_input_count, 1, 1, 0, 0, 1); // counts
+}
+
+
const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
switch (hint) {
case BranchHint::kNone:
@@ -237,9 +488,48 @@
}
-const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
- // Outputs are formal parameters, plus context, receiver, and JSFunction.
- const int value_output_count = num_formal_parameters + 3;
+const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind) {
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ return &cache_.kDeoptimizeEagerOperator;
+ case DeoptimizeKind::kSoft:
+ return &cache_.kDeoptimizeSoftOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+const Operator* CommonOperatorBuilder::IfException(IfExceptionHint hint) {
+ switch (hint) {
+ case IfExceptionHint::kLocallyCaught:
+ return &cache_.kIfExceptionCOperator;
+ case IfExceptionHint::kLocallyUncaught:
+ return &cache_.kIfExceptionUOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
+ return new (zone()) Operator( // --
+ IrOpcode::kSwitch, Operator::kKontrol, // opcode
+ "Switch", // name
+ 1, 0, 1, 0, 0, control_output_count); // counts
+}
+
+
+const Operator* CommonOperatorBuilder::IfValue(int32_t index) {
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kIfValue, Operator::kKontrol, // opcode
+ "IfValue", // name
+ 0, 0, 1, 0, 0, 1, // counts
+ index); // parameter
+}
+
+
+const Operator* CommonOperatorBuilder::Start(int value_output_count) {
return new (zone()) Operator( // --
IrOpcode::kStart, Operator::kFoldable, // opcode
"Start", // name
@@ -258,10 +548,10 @@
break;
}
// Uncached.
- return new (zone()) Operator( // --
- IrOpcode::kLoop, Operator::kFoldable, // opcode
- "Loop", // name
- 0, 0, control_input_count, 0, 0, 1); // counts
+ return new (zone()) Operator( // --
+ IrOpcode::kLoop, Operator::kKontrol, // opcode
+ "Loop", // name
+ 0, 0, control_input_count, 0, 0, 1); // counts
}
@@ -276,37 +566,41 @@
break;
}
// Uncached.
- return new (zone()) Operator( // --
- IrOpcode::kMerge, Operator::kFoldable, // opcode
- "Merge", // name
- 0, 0, control_input_count, 0, 0, 1); // counts
+ return new (zone()) Operator( // --
+ IrOpcode::kMerge, Operator::kKontrol, // opcode
+ "Merge", // name
+ 0, 0, control_input_count, 0, 0, 1); // counts
}
-const Operator* CommonOperatorBuilder::Terminate(int effects) {
- return new (zone()) Operator( // --
- IrOpcode::kTerminate, Operator::kPure, // opcode
- "Terminate", // name
- 0, effects, 1, 0, 0, 1); // counts
-}
-
-
-const Operator* CommonOperatorBuilder::Parameter(int index) {
- switch (index) {
+const Operator* CommonOperatorBuilder::Parameter(int index,
+ const char* debug_name) {
+ if (!debug_name) {
+ switch (index) {
#define CACHED_PARAMETER(index) \
case index: \
return &cache_.kParameter##index##Operator;
- CACHED_PARAMETER_LIST(CACHED_PARAMETER)
+ CACHED_PARAMETER_LIST(CACHED_PARAMETER)
#undef CACHED_PARAMETER
- default:
- break;
+ default:
+ break;
+ }
}
// Uncached.
- return new (zone()) Operator1<int>( // --
- IrOpcode::kParameter, Operator::kPure, // opcode
- "Parameter", // name
- 1, 0, 0, 1, 0, 0, // counts
- index); // parameter
+ return new (zone()) Operator1<ParameterInfo>( // --
+ IrOpcode::kParameter, Operator::kPure, // opcode
+ "Parameter", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ ParameterInfo(index, debug_name)); // parameter info
+}
+
+
+const Operator* CommonOperatorBuilder::OsrValue(int index) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kOsrValue, Operator::kNoProperties, // opcode
+ "OsrValue", // name
+ 0, 0, 1, 1, 0, 0, // counts
+ index); // parameter
}
@@ -329,22 +623,20 @@
const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
- return new (zone())
- Operator1<float, base::bit_equal_to<float>, base::bit_hash<float>>( // --
- IrOpcode::kFloat32Constant, Operator::kPure, // opcode
- "Float32Constant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<float>( // --
+ IrOpcode::kFloat32Constant, Operator::kPure, // opcode
+ "Float32Constant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
const Operator* CommonOperatorBuilder::Float64Constant(volatile double value) {
- return new (zone()) Operator1<double, base::bit_equal_to<double>,
- base::bit_hash<double>>( // --
- IrOpcode::kFloat64Constant, Operator::kPure, // opcode
- "Float64Constant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<double>( // --
+ IrOpcode::kFloat64Constant, Operator::kPure, // opcode
+ "Float64Constant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
@@ -359,18 +651,17 @@
const Operator* CommonOperatorBuilder::NumberConstant(volatile double value) {
- return new (zone()) Operator1<double, base::bit_equal_to<double>,
- base::bit_hash<double>>( // --
- IrOpcode::kNumberConstant, Operator::kPure, // opcode
- "NumberConstant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<double>( // --
+ IrOpcode::kNumberConstant, Operator::kPure, // opcode
+ "NumberConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
const Operator* CommonOperatorBuilder::HeapConstant(
- const Unique<HeapObject>& value) {
- return new (zone()) Operator1<Unique<HeapObject>>( // --
+ const Handle<HeapObject>& value) {
+ return new (zone()) Operator1<Handle<HeapObject>>( // --
IrOpcode::kHeapConstant, Operator::kPure, // opcode
"HeapConstant", // name
0, 0, 0, 1, 0, 0, // counts
@@ -378,54 +669,83 @@
}
-const Operator* CommonOperatorBuilder::Select(MachineType type,
+const Operator* CommonOperatorBuilder::Select(MachineRepresentation rep,
BranchHint hint) {
return new (zone()) Operator1<SelectParameters>( // --
IrOpcode::kSelect, Operator::kPure, // opcode
"Select", // name
3, 0, 0, 1, 0, 0, // counts
- SelectParameters(type, hint)); // parameter
+ SelectParameters(rep, hint)); // parameter
}
-const Operator* CommonOperatorBuilder::Phi(MachineType type, int arguments) {
- DCHECK(arguments > 0); // Disallow empty phis.
- return new (zone()) Operator1<MachineType>( // --
- IrOpcode::kPhi, Operator::kPure, // opcode
- "Phi", // name
- arguments, 0, 1, 1, 0, 0, // counts
- type); // parameter
+const Operator* CommonOperatorBuilder::Phi(MachineRepresentation rep,
+ int value_input_count) {
+ DCHECK(value_input_count > 0); // Disallow empty phis.
+#define CACHED_PHI(kRep, kValueInputCount) \
+ if (MachineRepresentation::kRep == rep && \
+ kValueInputCount == value_input_count) { \
+ return &cache_.kPhi##kRep##kValueInputCount##Operator; \
+ }
+ CACHED_PHI_LIST(CACHED_PHI)
+#undef CACHED_PHI
+ // Uncached.
+ return new (zone()) Operator1<MachineRepresentation>( // --
+ IrOpcode::kPhi, Operator::kPure, // opcode
+ "Phi", // name
+ value_input_count, 0, 1, 1, 0, 0, // counts
+ rep); // parameter
}
-const Operator* CommonOperatorBuilder::EffectPhi(int arguments) {
- DCHECK(arguments > 0); // Disallow empty phis.
+const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
+ DCHECK(effect_input_count > 0); // Disallow empty effect phis.
+ switch (effect_input_count) {
+#define CACHED_EFFECT_PHI(input_count) \
+ case input_count: \
+ return &cache_.kEffectPhi##input_count##Operator;
+ CACHED_EFFECT_PHI_LIST(CACHED_EFFECT_PHI)
+#undef CACHED_EFFECT_PHI
+ default:
+ break;
+ }
+ // Uncached.
return new (zone()) Operator( // --
IrOpcode::kEffectPhi, Operator::kPure, // opcode
"EffectPhi", // name
- 0, arguments, 1, 0, 1, 0); // counts
+ 0, effect_input_count, 1, 0, 1, 0); // counts
}
-const Operator* CommonOperatorBuilder::ValueEffect(int arguments) {
- DCHECK(arguments > 0); // Disallow empty value effects.
- return new (zone()) Operator( // --
- IrOpcode::kValueEffect, Operator::kPure, // opcode
- "ValueEffect", // name
- arguments, 0, 0, 0, 1, 0); // counts
+const Operator* CommonOperatorBuilder::Guard(Type* type) {
+ return new (zone()) Operator1<Type*>( // --
+ IrOpcode::kGuard, Operator::kKontrol, // opcode
+ "Guard", // name
+ 1, 0, 1, 1, 0, 0, // counts
+ type); // parameter
}
-const Operator* CommonOperatorBuilder::Finish(int arguments) {
- DCHECK(arguments > 0); // Disallow empty finishes.
- return new (zone()) Operator( // --
- IrOpcode::kFinish, Operator::kPure, // opcode
- "Finish", // name
- 1, arguments, 0, 1, 0, 0); // counts
+const Operator* CommonOperatorBuilder::EffectSet(int arguments) {
+ DCHECK(arguments > 1); // Disallow empty/singleton sets.
+ return new (zone()) Operator( // --
+ IrOpcode::kEffectSet, Operator::kPure, // opcode
+ "EffectSet", // name
+ 0, arguments, 0, 0, 1, 0); // counts
}
const Operator* CommonOperatorBuilder::StateValues(int arguments) {
+ switch (arguments) {
+#define CACHED_STATE_VALUES(arguments) \
+ case arguments: \
+ return &cache_.kStateValues##arguments##Operator;
+ CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
+#undef CACHED_STATE_VALUES
+ default:
+ break;
+ }
+ // Uncached.
return new (zone()) Operator( // --
IrOpcode::kStateValues, Operator::kPure, // opcode
"StateValues", // name
@@ -433,43 +753,124 @@
}
+const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots, int id) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kObjectState, Operator::kPure, // opcode
+ "ObjectState", // name
+ pointer_slots, 0, 0, 1, 0, 0, id); // counts
+}
+
+
+const Operator* CommonOperatorBuilder::TypedStateValues(
+ const ZoneVector<MachineType>* types) {
+ return new (zone()) Operator1<const ZoneVector<MachineType>*>( // --
+ IrOpcode::kTypedStateValues, Operator::kPure, // opcode
+ "TypedStateValues", // name
+ static_cast<int>(types->size()), 0, 0, 1, 0, 0, types); // counts
+}
+
+
const Operator* CommonOperatorBuilder::FrameState(
- FrameStateType type, BailoutId bailout_id,
- OutputFrameStateCombine state_combine, MaybeHandle<JSFunction> jsfunction) {
- return new (zone()) Operator1<FrameStateCallInfo>( // --
- IrOpcode::kFrameState, Operator::kPure, // opcode
- "FrameState", // name
- 4, 0, 0, 1, 0, 0, // counts
- FrameStateCallInfo(type, bailout_id, state_combine, jsfunction));
+ BailoutId bailout_id, OutputFrameStateCombine state_combine,
+ const FrameStateFunctionInfo* function_info) {
+ FrameStateInfo state_info(bailout_id, state_combine, function_info);
+ return new (zone()) Operator1<FrameStateInfo>( // --
+ IrOpcode::kFrameState, Operator::kPure, // opcode
+ "FrameState", // name
+ 5, 0, 0, 1, 0, 0, // counts
+ state_info); // parameter
}
const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
- class CallOperator FINAL : public Operator1<const CallDescriptor*> {
+ class CallOperator final : public Operator1<const CallDescriptor*> {
public:
- CallOperator(const CallDescriptor* descriptor, const char* mnemonic)
+ explicit CallOperator(const CallDescriptor* descriptor)
: Operator1<const CallDescriptor*>(
- IrOpcode::kCall, descriptor->properties(), mnemonic,
+ IrOpcode::kCall, descriptor->properties(), "Call",
descriptor->InputCount() + descriptor->FrameStateCount(),
Operator::ZeroIfPure(descriptor->properties()),
- Operator::ZeroIfPure(descriptor->properties()),
+ Operator::ZeroIfEliminatable(descriptor->properties()),
descriptor->ReturnCount(),
- Operator::ZeroIfPure(descriptor->properties()), 0, descriptor) {}
+ Operator::ZeroIfPure(descriptor->properties()),
+ Operator::ZeroIfNoThrow(descriptor->properties()), descriptor) {}
- void PrintParameter(std::ostream& os) const OVERRIDE {
+ void PrintParameter(std::ostream& os) const override {
os << "[" << *parameter() << "]";
}
};
- return new (zone()) CallOperator(descriptor, "Call");
+ return new (zone()) CallOperator(descriptor);
+}
+
+
+const Operator* CommonOperatorBuilder::LazyBailout() {
+ return Call(Linkage::GetLazyBailoutDescriptor(zone()));
+}
+
+
+const Operator* CommonOperatorBuilder::TailCall(
+ const CallDescriptor* descriptor) {
+ class TailCallOperator final : public Operator1<const CallDescriptor*> {
+ public:
+ explicit TailCallOperator(const CallDescriptor* descriptor)
+ : Operator1<const CallDescriptor*>(
+ IrOpcode::kTailCall, descriptor->properties(), "TailCall",
+ descriptor->InputCount() + descriptor->FrameStateCount(), 1, 1, 0,
+ 0, 1, descriptor) {}
+
+ void PrintParameter(std::ostream& os) const override {
+ os << "[" << *parameter() << "]";
+ }
+ };
+ return new (zone()) TailCallOperator(descriptor);
}
const Operator* CommonOperatorBuilder::Projection(size_t index) {
- return new (zone()) Operator1<size_t>( // --
- IrOpcode::kProjection, Operator::kPure, // opcode
- "Projection", // name
- 1, 0, 0, 1, 0, 0, // counts
- index); // parameter
+ switch (index) {
+#define CACHED_PROJECTION(index) \
+ case index: \
+ return &cache_.kProjection##index##Operator;
+ CACHED_PROJECTION_LIST(CACHED_PROJECTION)
+#undef CACHED_PROJECTION
+ default:
+ break;
+ }
+ // Uncached.
+ return new (zone()) Operator1<size_t>( // --
+ IrOpcode::kProjection, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // flags
+ "Projection", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ index); // parameter
+}
+
+
+const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
+ int size) {
+ if (op->opcode() == IrOpcode::kPhi) {
+ return Phi(PhiRepresentationOf(op), size);
+ } else if (op->opcode() == IrOpcode::kEffectPhi) {
+ return EffectPhi(size);
+ } else if (op->opcode() == IrOpcode::kMerge) {
+ return Merge(size);
+ } else if (op->opcode() == IrOpcode::kLoop) {
+ return Loop(size);
+ } else {
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+const FrameStateFunctionInfo*
+CommonOperatorBuilder::CreateFrameStateFunctionInfo(
+ FrameStateType type, int parameter_count, int local_count,
+ Handle<SharedFunctionInfo> shared_info,
+ ContextCallingMode context_calling_mode) {
+ return new (zone()->New(sizeof(FrameStateFunctionInfo)))
+ FrameStateFunctionInfo(type, parameter_count, local_count, shared_info,
+ context_calling_mode);
}
} // namespace compiler
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index af6066b..83cb5b2 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -1,18 +1,23 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_COMMON_OPERATOR_H_
#define V8_COMPILER_COMMON_OPERATOR_H_
-#include "src/compiler/machine-type.h"
-#include "src/unique.h"
+#include "src/compiler/frame-states.h"
+#include "src/machine-type.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
// Forward declarations.
class ExternalReference;
+template <class>
+class TypeImpl;
+struct ZoneTypeConfig;
+typedef TypeImpl<ZoneTypeConfig> Type;
namespace compiler {
@@ -26,6 +31,19 @@
// Prediction hint for branches.
enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
+inline BranchHint NegateBranchHint(BranchHint hint) {
+ switch (hint) {
+ case BranchHint::kNone:
+ return hint;
+ case BranchHint::kTrue:
+ return BranchHint::kFalse;
+ case BranchHint::kFalse:
+ return BranchHint::kTrue;
+ }
+ UNREACHABLE();
+ return hint;
+}
+
inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
std::ostream& operator<<(std::ostream&, BranchHint);
@@ -33,17 +51,35 @@
BranchHint BranchHintOf(const Operator* const);
-class SelectParameters FINAL {
- public:
- explicit SelectParameters(MachineType type,
- BranchHint hint = BranchHint::kNone)
- : type_(type), hint_(hint) {}
+// Deoptimize bailout kind.
+enum class DeoptimizeKind : uint8_t { kEager, kSoft };
- MachineType type() const { return type_; }
+size_t hash_value(DeoptimizeKind kind);
+
+std::ostream& operator<<(std::ostream&, DeoptimizeKind);
+
+DeoptimizeKind DeoptimizeKindOf(const Operator* const);
+
+
+// Prediction whether throw-site is surrounded by any local catch-scope.
+enum class IfExceptionHint { kLocallyUncaught, kLocallyCaught };
+
+size_t hash_value(IfExceptionHint hint);
+
+std::ostream& operator<<(std::ostream&, IfExceptionHint);
+
+
+class SelectParameters final {
+ public:
+ explicit SelectParameters(MachineRepresentation representation,
+ BranchHint hint = BranchHint::kNone)
+ : representation_(representation), hint_(hint) {}
+
+ MachineRepresentation representation() const { return representation_; }
BranchHint hint() const { return hint_; }
private:
- const MachineType type_;
+ const MachineRepresentation representation_;
const BranchHint hint_;
};
@@ -57,121 +93,61 @@
SelectParameters const& SelectParametersOf(const Operator* const);
-// Flag that describes how to combine the current environment with
-// the output of a node to obtain a framestate for lazy bailout.
-class OutputFrameStateCombine {
+size_t ProjectionIndexOf(const Operator* const);
+
+MachineRepresentation PhiRepresentationOf(const Operator* const);
+
+
+// The {IrOpcode::kParameter} opcode represents an incoming parameter to the
+// function. This class bundles the index and a debug name for such operators.
+class ParameterInfo final {
public:
- enum Kind {
- kPushOutput, // Push the output on the expression stack.
- kPokeAt // Poke at the given environment location,
- // counting from the top of the stack.
- };
+ ParameterInfo(int index, const char* debug_name)
+ : index_(index), debug_name_(debug_name) {}
- static OutputFrameStateCombine Ignore() {
- return OutputFrameStateCombine(kPushOutput, 0);
- }
- static OutputFrameStateCombine Push(size_t count = 1) {
- return OutputFrameStateCombine(kPushOutput, count);
- }
- static OutputFrameStateCombine PokeAt(size_t index) {
- return OutputFrameStateCombine(kPokeAt, index);
- }
-
- Kind kind() const { return kind_; }
- size_t GetPushCount() const {
- DCHECK_EQ(kPushOutput, kind());
- return parameter_;
- }
- size_t GetOffsetToPokeAt() const {
- DCHECK_EQ(kPokeAt, kind());
- return parameter_;
- }
-
- bool IsOutputIgnored() const {
- return kind_ == kPushOutput && parameter_ == 0;
- }
-
- size_t ConsumedOutputCount() const {
- return kind_ == kPushOutput ? GetPushCount() : 1;
- }
-
- bool operator==(OutputFrameStateCombine const& other) const {
- return kind_ == other.kind_ && parameter_ == other.parameter_;
- }
- bool operator!=(OutputFrameStateCombine const& other) const {
- return !(*this == other);
- }
-
- friend size_t hash_value(OutputFrameStateCombine const&);
- friend std::ostream& operator<<(std::ostream&,
- OutputFrameStateCombine const&);
+ int index() const { return index_; }
+ const char* debug_name() const { return debug_name_; }
private:
- OutputFrameStateCombine(Kind kind, size_t parameter)
- : kind_(kind), parameter_(parameter) {}
-
- Kind const kind_;
- size_t const parameter_;
+ int index_;
+ const char* debug_name_;
};
+std::ostream& operator<<(std::ostream&, ParameterInfo const&);
-// The type of stack frame that a FrameState node represents.
-enum FrameStateType {
- JS_FRAME, // Represents an unoptimized JavaScriptFrame.
- ARGUMENTS_ADAPTOR // Represents an ArgumentsAdaptorFrame.
-};
-
-
-class FrameStateCallInfo FINAL {
- public:
- FrameStateCallInfo(
- FrameStateType type, BailoutId bailout_id,
- OutputFrameStateCombine state_combine,
- MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>())
- : type_(type),
- bailout_id_(bailout_id),
- frame_state_combine_(state_combine),
- jsfunction_(jsfunction) {}
-
- FrameStateType type() const { return type_; }
- BailoutId bailout_id() const { return bailout_id_; }
- OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
- MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
-
- private:
- FrameStateType type_;
- BailoutId bailout_id_;
- OutputFrameStateCombine frame_state_combine_;
- MaybeHandle<JSFunction> jsfunction_;
-};
-
-bool operator==(FrameStateCallInfo const&, FrameStateCallInfo const&);
-bool operator!=(FrameStateCallInfo const&, FrameStateCallInfo const&);
-
-size_t hash_value(FrameStateCallInfo const&);
-
-std::ostream& operator<<(std::ostream&, FrameStateCallInfo const&);
+int ParameterIndexOf(const Operator* const);
+const ParameterInfo& ParameterInfoOf(const Operator* const);
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
-class CommonOperatorBuilder FINAL : public ZoneObject {
+class CommonOperatorBuilder final : public ZoneObject {
public:
explicit CommonOperatorBuilder(Zone* zone);
const Operator* Dead();
- const Operator* End();
+ const Operator* End(size_t control_input_count);
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
+ const Operator* IfSuccess();
+ const Operator* IfException(IfExceptionHint hint);
+ const Operator* Switch(size_t control_output_count);
+ const Operator* IfValue(int32_t value);
+ const Operator* IfDefault();
const Operator* Throw();
- const Operator* Terminate(int effects);
- const Operator* Return();
+ const Operator* Deoptimize(DeoptimizeKind kind);
+ const Operator* Return(int value_input_count = 1);
+ const Operator* Terminate();
- const Operator* Start(int num_formal_parameters);
+ const Operator* Start(int value_output_count);
const Operator* Loop(int control_input_count);
const Operator* Merge(int control_input_count);
- const Operator* Parameter(int index);
+ const Operator* Parameter(int index, const char* debug_name = nullptr);
+
+ const Operator* OsrNormalEntry();
+ const Operator* OsrLoopEntry();
+ const Operator* OsrValue(int index);
const Operator* Int32Constant(int32_t);
const Operator* Int64Constant(int64_t);
@@ -179,20 +155,36 @@
const Operator* Float64Constant(volatile double);
const Operator* ExternalConstant(const ExternalReference&);
const Operator* NumberConstant(volatile double);
- const Operator* HeapConstant(const Unique<HeapObject>&);
+ const Operator* HeapConstant(const Handle<HeapObject>&);
- const Operator* Select(MachineType, BranchHint = BranchHint::kNone);
- const Operator* Phi(MachineType type, int arguments);
- const Operator* EffectPhi(int arguments);
- const Operator* ValueEffect(int arguments);
- const Operator* Finish(int arguments);
+ const Operator* Select(MachineRepresentation, BranchHint = BranchHint::kNone);
+ const Operator* Phi(MachineRepresentation representation,
+ int value_input_count);
+ const Operator* EffectPhi(int effect_input_count);
+ const Operator* EffectSet(int arguments);
+ const Operator* Guard(Type* type);
+ const Operator* BeginRegion();
+ const Operator* FinishRegion();
const Operator* StateValues(int arguments);
- const Operator* FrameState(
- FrameStateType type, BailoutId bailout_id,
- OutputFrameStateCombine state_combine,
- MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>());
+ const Operator* ObjectState(int pointer_slots, int id);
+ const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
+ const Operator* FrameState(BailoutId bailout_id,
+ OutputFrameStateCombine state_combine,
+ const FrameStateFunctionInfo* function_info);
const Operator* Call(const CallDescriptor* descriptor);
+ const Operator* TailCall(const CallDescriptor* descriptor);
const Operator* Projection(size_t index);
+ const Operator* LazyBailout();
+
+ // Constructs a new merge or phi operator with the same opcode as {op}, but
+ // with {size} inputs.
+ const Operator* ResizeMergeOrPhi(const Operator* op, int size);
+
+ // Constructs function info for frame state construction.
+ const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
+ FrameStateType type, int parameter_count, int local_count,
+ Handle<SharedFunctionInfo> shared_info,
+ ContextCallingMode context_calling_mode);
private:
Zone* zone() const { return zone_; }
diff --git a/src/compiler/control-builders.cc b/src/compiler/control-builders.cc
index 8725244..6905ef5 100644
--- a/src/compiler/control-builders.cc
+++ b/src/compiler/control-builders.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "control-builders.h"
+#include "src/compiler/control-builders.h"
namespace v8 {
namespace internal {
@@ -32,9 +32,8 @@
}
-void LoopBuilder::BeginLoop(BitVector* assigned) {
- builder_->NewLoop();
- loop_environment_ = environment()->CopyForLoop(assigned);
+void LoopBuilder::BeginLoop(BitVector* assigned, bool is_osr) {
+ loop_environment_ = environment()->CopyForLoop(assigned, is_osr);
continue_environment_ = environment()->CopyAsUnreachable();
break_environment_ = environment()->CopyAsUnreachable();
}
@@ -74,6 +73,16 @@
}
+void LoopBuilder::BreakWhen(Node* condition) {
+ IfBuilder control_if(builder_);
+ control_if.If(condition);
+ control_if.Then();
+ Break();
+ control_if.Else();
+ control_if.End();
+}
+
+
void SwitchBuilder::BeginSwitch() {
body_environment_ = environment()->CopyAsUnreachable();
label_environment_ = environment()->CopyAsUnreachable();
@@ -134,10 +143,90 @@
}
+void BlockBuilder::BreakWhen(Node* condition, BranchHint hint) {
+ IfBuilder control_if(builder_);
+ control_if.If(condition, hint);
+ control_if.Then();
+ Break();
+ control_if.Else();
+ control_if.End();
+}
+
+
+void BlockBuilder::BreakUnless(Node* condition, BranchHint hint) {
+ IfBuilder control_if(builder_);
+ control_if.If(condition, hint);
+ control_if.Then();
+ control_if.Else();
+ Break();
+ control_if.End();
+}
+
+
void BlockBuilder::EndBlock() {
break_environment_->Merge(environment());
set_environment(break_environment_);
}
+
+
+void TryCatchBuilder::BeginTry() {
+ exit_environment_ = environment()->CopyAsUnreachable();
+ catch_environment_ = environment()->CopyAsUnreachable();
+ catch_environment_->Push(the_hole());
}
+
+
+void TryCatchBuilder::Throw(Node* exception) {
+ environment()->Push(exception);
+ catch_environment_->Merge(environment());
+ environment()->Pop();
+ environment()->MarkAsUnreachable();
}
-} // namespace v8::internal::compiler
+
+
+void TryCatchBuilder::EndTry() {
+ exit_environment_->Merge(environment());
+ exception_node_ = catch_environment_->Pop();
+ set_environment(catch_environment_);
+}
+
+
+void TryCatchBuilder::EndCatch() {
+ exit_environment_->Merge(environment());
+ set_environment(exit_environment_);
+}
+
+
+void TryFinallyBuilder::BeginTry() {
+ finally_environment_ = environment()->CopyAsUnreachable();
+ finally_environment_->Push(the_hole());
+ finally_environment_->Push(the_hole());
+}
+
+
+void TryFinallyBuilder::LeaveTry(Node* token, Node* value) {
+ environment()->Push(value);
+ environment()->Push(token);
+ finally_environment_->Merge(environment());
+ environment()->Drop(2);
+}
+
+
+void TryFinallyBuilder::EndTry(Node* fallthrough_token, Node* value) {
+ environment()->Push(value);
+ environment()->Push(fallthrough_token);
+ finally_environment_->Merge(environment());
+ environment()->Drop(2);
+ token_node_ = finally_environment_->Pop();
+ value_node_ = finally_environment_->Pop();
+ set_environment(finally_environment_);
+}
+
+
+void TryFinallyBuilder::EndFinally() {
+ // Nothing to be done here.
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/control-builders.h b/src/compiler/control-builders.h
index 11adfdb..6ff00be 100644
--- a/src/compiler/control-builders.h
+++ b/src/compiler/control-builders.h
@@ -5,9 +5,7 @@
#ifndef V8_COMPILER_CONTROL_BUILDERS_H_
#define V8_COMPILER_CONTROL_BUILDERS_H_
-#include "src/v8.h"
-
-#include "src/compiler/graph-builder.h"
+#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/node.h"
namespace v8 {
@@ -15,37 +13,36 @@
namespace compiler {
// Base class for all control builders. Also provides a common interface for
-// control builders to handle 'break' and 'continue' statements when they are
-// used to model breakable statements.
+// control builders to handle 'break' statements when they are used to model
+// breakable statements.
class ControlBuilder {
public:
- explicit ControlBuilder(StructuredGraphBuilder* builder)
- : builder_(builder) {}
+ explicit ControlBuilder(AstGraphBuilder* builder) : builder_(builder) {}
virtual ~ControlBuilder() {}
- // Interface for break and continue.
+ // Interface for break.
virtual void Break() { UNREACHABLE(); }
- virtual void Continue() { UNREACHABLE(); }
protected:
- typedef StructuredGraphBuilder Builder;
- typedef StructuredGraphBuilder::Environment Environment;
+ typedef AstGraphBuilder Builder;
+ typedef AstGraphBuilder::Environment Environment;
Zone* zone() const { return builder_->local_zone(); }
Environment* environment() { return builder_->environment(); }
void set_environment(Environment* env) { builder_->set_environment(env); }
+ Node* the_hole() const { return builder_->jsgraph()->TheHoleConstant(); }
Builder* builder_;
};
// Tracks control flow for a conditional statement.
-class IfBuilder FINAL : public ControlBuilder {
+class IfBuilder final : public ControlBuilder {
public:
- explicit IfBuilder(StructuredGraphBuilder* builder)
+ explicit IfBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
- then_environment_(NULL),
- else_environment_(NULL) {}
+ then_environment_(nullptr),
+ else_environment_(nullptr) {}
// Primitive control commands.
void If(Node* condition, BranchHint hint = BranchHint::kNone);
@@ -60,25 +57,26 @@
// Tracks control flow for an iteration statement.
-class LoopBuilder FINAL : public ControlBuilder {
+class LoopBuilder final : public ControlBuilder {
public:
- explicit LoopBuilder(StructuredGraphBuilder* builder)
+ explicit LoopBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
- loop_environment_(NULL),
- continue_environment_(NULL),
- break_environment_(NULL) {}
+ loop_environment_(nullptr),
+ continue_environment_(nullptr),
+ break_environment_(nullptr) {}
// Primitive control commands.
- void BeginLoop(BitVector* assigned);
+ void BeginLoop(BitVector* assigned, bool is_osr = false);
+ void Continue();
void EndBody();
void EndLoop();
- // Primitive support for break and continue.
- void Continue() FINAL;
- void Break() FINAL;
+ // Primitive support for break.
+ void Break() final;
- // Compound control command for conditional break.
+ // Compound control commands for conditional break.
void BreakUnless(Node* condition);
+ void BreakWhen(Node* condition);
private:
Environment* loop_environment_; // Environment of the loop header.
@@ -88,13 +86,13 @@
// Tracks control flow for a switch statement.
-class SwitchBuilder FINAL : public ControlBuilder {
+class SwitchBuilder final : public ControlBuilder {
public:
- explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count)
+ explicit SwitchBuilder(AstGraphBuilder* builder, int case_count)
: ControlBuilder(builder),
- body_environment_(NULL),
- label_environment_(NULL),
- break_environment_(NULL),
+ body_environment_(nullptr),
+ label_environment_(nullptr),
+ break_environment_(nullptr),
body_environments_(case_count, zone()) {}
// Primitive control commands.
@@ -107,7 +105,7 @@
void EndSwitch();
// Primitive support for break.
- void Break() FINAL;
+ void Break() final;
// The number of cases within a switch is statically known.
size_t case_count() const { return body_environments_.size(); }
@@ -121,22 +119,79 @@
// Tracks control flow for a block statement.
-class BlockBuilder FINAL : public ControlBuilder {
+class BlockBuilder final : public ControlBuilder {
public:
- explicit BlockBuilder(StructuredGraphBuilder* builder)
- : ControlBuilder(builder), break_environment_(NULL) {}
+ explicit BlockBuilder(AstGraphBuilder* builder)
+ : ControlBuilder(builder), break_environment_(nullptr) {}
// Primitive control commands.
void BeginBlock();
void EndBlock();
// Primitive support for break.
- void Break() FINAL;
+ void Break() final;
+
+ // Compound control commands for conditional break.
+ void BreakWhen(Node* condition, BranchHint = BranchHint::kNone);
+ void BreakUnless(Node* condition, BranchHint hint = BranchHint::kNone);
private:
Environment* break_environment_; // Environment after the block exits.
};
+
+// Tracks control flow for a try-catch statement.
+class TryCatchBuilder final : public ControlBuilder {
+ public:
+ explicit TryCatchBuilder(AstGraphBuilder* builder)
+ : ControlBuilder(builder),
+ catch_environment_(nullptr),
+ exit_environment_(nullptr),
+ exception_node_(nullptr) {}
+
+ // Primitive control commands.
+ void BeginTry();
+ void Throw(Node* exception);
+ void EndTry();
+ void EndCatch();
+
+ // Returns the exception value inside the 'catch' body.
+ Node* GetExceptionNode() const { return exception_node_; }
+
+ private:
+ Environment* catch_environment_; // Environment for the 'catch' body.
+ Environment* exit_environment_; // Environment after the statement.
+ Node* exception_node_; // Node for exception in 'catch' body.
+};
+
+
+// Tracks control flow for a try-finally statement.
+class TryFinallyBuilder final : public ControlBuilder {
+ public:
+ explicit TryFinallyBuilder(AstGraphBuilder* builder)
+ : ControlBuilder(builder),
+ finally_environment_(nullptr),
+ token_node_(nullptr),
+ value_node_(nullptr) {}
+
+ // Primitive control commands.
+ void BeginTry();
+ void LeaveTry(Node* token, Node* value);
+ void EndTry(Node* token, Node* value);
+ void EndFinally();
+
+ // Returns the dispatch token value inside the 'finally' body.
+ Node* GetDispatchTokenNode() const { return token_node_; }
+
+ // Returns the saved result value inside the 'finally' body.
+ Node* GetResultValueNode() const { return value_node_; }
+
+ private:
+ Environment* finally_environment_; // Environment for the 'finally' body.
+ Node* token_node_; // Node for token in 'finally' body.
+ Node* value_node_; // Node for value in 'finally' body.
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/control-equivalence.cc b/src/compiler/control-equivalence.cc
new file mode 100644
index 0000000..af1a115
--- /dev/null
+++ b/src/compiler/control-equivalence.cc
@@ -0,0 +1,237 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/control-equivalence.h"
+#include "src/compiler/node-properties.h"
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_ceq) PrintF(__VA_ARGS__); \
+ } while (false)
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void ControlEquivalence::Run(Node* exit) {
+ if (GetClass(exit) == kInvalidClass) {
+ DetermineParticipation(exit);
+ RunUndirectedDFS(exit);
+ }
+}
+
+
+// static
+STATIC_CONST_MEMBER_DEFINITION const size_t ControlEquivalence::kInvalidClass;
+
+
+void ControlEquivalence::VisitPre(Node* node) {
+ TRACE("CEQ: Pre-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+
+ // Dispense a new pre-order number.
+ SetNumber(node, NewDFSNumber());
+ TRACE(" Assigned DFS number is %zu\n", GetNumber(node));
+}
+
+
+void ControlEquivalence::VisitMid(Node* node, DFSDirection direction) {
+ TRACE("CEQ: Mid-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+ BracketList& blist = GetBracketList(node);
+
+ // Remove brackets pointing to this node [line:19].
+ BracketListDelete(blist, node, direction);
+
+ // Potentially introduce artificial dependency from start to end.
+ if (blist.empty()) {
+ DCHECK_EQ(kInputDirection, direction);
+ VisitBackedge(node, graph_->end(), kInputDirection);
+ }
+
+ // Potentially start a new equivalence class [line:37].
+ BracketListTRACE(blist);
+ Bracket* recent = &blist.back();
+ if (recent->recent_size != blist.size()) {
+ recent->recent_size = blist.size();
+ recent->recent_class = NewClassNumber();
+ }
+
+ // Assign equivalence class to node.
+ SetClass(node, recent->recent_class);
+ TRACE(" Assigned class number is %zu\n", GetClass(node));
+}
+
+
+void ControlEquivalence::VisitPost(Node* node, Node* parent_node,
+ DFSDirection direction) {
+ TRACE("CEQ: Post-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+ BracketList& blist = GetBracketList(node);
+
+ // Remove brackets pointing to this node [line:19].
+ BracketListDelete(blist, node, direction);
+
+ // Propagate bracket list up the DFS tree [line:13].
+ if (parent_node != nullptr) {
+ BracketList& parent_blist = GetBracketList(parent_node);
+ parent_blist.splice(parent_blist.end(), blist);
+ }
+}
+
+
+void ControlEquivalence::VisitBackedge(Node* from, Node* to,
+ DFSDirection direction) {
+ TRACE("CEQ: Backedge from #%d:%s to #%d:%s\n", from->id(),
+ from->op()->mnemonic(), to->id(), to->op()->mnemonic());
+
+ // Push backedge onto the bracket list [line:25].
+ Bracket bracket = {direction, kInvalidClass, 0, from, to};
+ GetBracketList(from).push_back(bracket);
+}
+
+
+void ControlEquivalence::RunUndirectedDFS(Node* exit) {
+ ZoneStack<DFSStackEntry> stack(zone_);
+ DFSPush(stack, exit, nullptr, kInputDirection);
+ VisitPre(exit);
+
+ while (!stack.empty()) { // Undirected depth-first backwards traversal.
+ DFSStackEntry& entry = stack.top();
+ Node* node = entry.node;
+
+ if (entry.direction == kInputDirection) {
+ if (entry.input != node->input_edges().end()) {
+ Edge edge = *entry.input;
+ Node* input = edge.to();
+ ++(entry.input);
+ if (NodeProperties::IsControlEdge(edge)) {
+ // Visit next control input.
+ if (!GetData(input)->participates) continue;
+ if (GetData(input)->visited) continue;
+ if (GetData(input)->on_stack) {
+ // Found backedge if input is on stack.
+ if (input != entry.parent_node) {
+ VisitBackedge(node, input, kInputDirection);
+ }
+ } else {
+ // Push input onto stack.
+ DFSPush(stack, input, node, kInputDirection);
+ VisitPre(input);
+ }
+ }
+ continue;
+ }
+ if (entry.use != node->use_edges().end()) {
+ // Switch direction to uses.
+ entry.direction = kUseDirection;
+ VisitMid(node, kInputDirection);
+ continue;
+ }
+ }
+
+ if (entry.direction == kUseDirection) {
+ if (entry.use != node->use_edges().end()) {
+ Edge edge = *entry.use;
+ Node* use = edge.from();
+ ++(entry.use);
+ if (NodeProperties::IsControlEdge(edge)) {
+ // Visit next control use.
+ if (!GetData(use)->participates) continue;
+ if (GetData(use)->visited) continue;
+ if (GetData(use)->on_stack) {
+ // Found backedge if use is on stack.
+ if (use != entry.parent_node) {
+ VisitBackedge(node, use, kUseDirection);
+ }
+ } else {
+ // Push use onto stack.
+ DFSPush(stack, use, node, kUseDirection);
+ VisitPre(use);
+ }
+ }
+ continue;
+ }
+ if (entry.input != node->input_edges().end()) {
+ // Switch direction to inputs.
+ entry.direction = kInputDirection;
+ VisitMid(node, kUseDirection);
+ continue;
+ }
+ }
+
+ // Pop node from stack when done with all inputs and uses.
+ DCHECK(entry.input == node->input_edges().end());
+ DCHECK(entry.use == node->use_edges().end());
+ DFSPop(stack, node);
+ VisitPost(node, entry.parent_node, entry.direction);
+ }
+}
+
+void ControlEquivalence::DetermineParticipationEnqueue(ZoneQueue<Node*>& queue,
+ Node* node) {
+ if (!GetData(node)->participates) {
+ GetData(node)->participates = true;
+ queue.push(node);
+ }
+}
+
+
+void ControlEquivalence::DetermineParticipation(Node* exit) {
+ ZoneQueue<Node*> queue(zone_);
+ DetermineParticipationEnqueue(queue, exit);
+ while (!queue.empty()) { // Breadth-first backwards traversal.
+ Node* node = queue.front();
+ queue.pop();
+ int max = NodeProperties::PastControlIndex(node);
+ for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+ DetermineParticipationEnqueue(queue, node->InputAt(i));
+ }
+ }
+}
+
+
+void ControlEquivalence::DFSPush(DFSStack& stack, Node* node, Node* from,
+ DFSDirection dir) {
+ DCHECK(GetData(node)->participates);
+ DCHECK(!GetData(node)->visited);
+ GetData(node)->on_stack = true;
+ Node::InputEdges::iterator input = node->input_edges().begin();
+ Node::UseEdges::iterator use = node->use_edges().begin();
+ stack.push({dir, input, use, from, node});
+}
+
+
+void ControlEquivalence::DFSPop(DFSStack& stack, Node* node) {
+ DCHECK_EQ(stack.top().node, node);
+ GetData(node)->on_stack = false;
+ GetData(node)->visited = true;
+ stack.pop();
+}
+
+
+void ControlEquivalence::BracketListDelete(BracketList& blist, Node* to,
+ DFSDirection direction) {
+ // TODO(mstarzinger): Optimize this to avoid linear search.
+ for (BracketList::iterator i = blist.begin(); i != blist.end(); /*nop*/) {
+ if (i->to == to && i->direction != direction) {
+ TRACE(" BList erased: {%d->%d}\n", i->from->id(), i->to->id());
+ i = blist.erase(i);
+ } else {
+ ++i;
+ }
+ }
+}
+
+
+void ControlEquivalence::BracketListTRACE(BracketList& blist) {
+ if (FLAG_trace_turbo_ceq) {
+ TRACE(" BList: ");
+ for (Bracket bracket : blist) {
+ TRACE("{%d->%d} ", bracket.from->id(), bracket.to->id());
+ }
+ TRACE("\n");
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/control-equivalence.h b/src/compiler/control-equivalence.h
index cca087f..478e48b 100644
--- a/src/compiler/control-equivalence.h
+++ b/src/compiler/control-equivalence.h
@@ -5,11 +5,8 @@
#ifndef V8_COMPILER_CONTROL_EQUIVALENCE_H_
#define V8_COMPILER_CONTROL_EQUIVALENCE_H_
-#include "src/v8.h"
-
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
-#include "src/compiler/node-properties.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -31,7 +28,7 @@
// control regions in linear time" by Johnson, Pearson & Pingali (PLDI94) which
// also contains proofs for the aforementioned equivalence. References to line
// numbers in the algorithm from figure 4 have been added [line:x].
-class ControlEquivalence : public ZoneObject {
+class ControlEquivalence final : public ZoneObject {
public:
ControlEquivalence(Zone* zone, Graph* graph)
: zone_(zone),
@@ -46,15 +43,11 @@
// participate in the next step. Takes O(E) time and O(N) space.
// 2) An undirected depth-first backwards traversal that determines class
// numbers for all participating nodes. Takes O(E) time and O(N) space.
- void Run(Node* exit) {
- if (GetClass(exit) != kInvalidClass) return;
- DetermineParticipation(exit);
- RunUndirectedDFS(exit);
- }
+ void Run(Node* exit);
// Retrieves a previously computed class number.
size_t ClassOf(Node* node) {
- DCHECK(GetClass(node) != kInvalidClass);
+ DCHECK_NE(kInvalidClass, GetClass(node));
return GetClass(node);
}
@@ -97,65 +90,16 @@
typedef ZoneVector<NodeData> Data;
// Called at pre-visit during DFS walk.
- void VisitPre(Node* node) {
- Trace("CEQ: Pre-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
-
- // Dispense a new pre-order number.
- SetNumber(node, NewDFSNumber());
- Trace(" Assigned DFS number is %d\n", GetNumber(node));
- }
+ void VisitPre(Node* node);
// Called at mid-visit during DFS walk.
- void VisitMid(Node* node, DFSDirection direction) {
- Trace("CEQ: Mid-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
- BracketList& blist = GetBracketList(node);
-
- // Remove brackets pointing to this node [line:19].
- BracketListDelete(blist, node, direction);
-
- // Potentially introduce artificial dependency from start to end.
- if (blist.empty()) {
- DCHECK_EQ(kInputDirection, direction);
- VisitBackedge(node, graph_->end(), kInputDirection);
- }
-
- // Potentially start a new equivalence class [line:37].
- BracketListTrace(blist);
- Bracket* recent = &blist.back();
- if (recent->recent_size != blist.size()) {
- recent->recent_size = blist.size();
- recent->recent_class = NewClassNumber();
- }
-
- // Assign equivalence class to node.
- SetClass(node, recent->recent_class);
- Trace(" Assigned class number is %d\n", GetClass(node));
- }
+ void VisitMid(Node* node, DFSDirection direction);
// Called at post-visit during DFS walk.
- void VisitPost(Node* node, Node* parent_node, DFSDirection direction) {
- Trace("CEQ: Post-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
- BracketList& blist = GetBracketList(node);
-
- // Remove brackets pointing to this node [line:19].
- BracketListDelete(blist, node, direction);
-
- // Propagate bracket list up the DFS tree [line:13].
- if (parent_node != NULL) {
- BracketList& parent_blist = GetBracketList(parent_node);
- parent_blist.splice(parent_blist.end(), blist);
- }
- }
+ void VisitPost(Node* node, Node* parent_node, DFSDirection direction);
// Called when hitting a back edge in the DFS walk.
- void VisitBackedge(Node* from, Node* to, DFSDirection direction) {
- Trace("CEQ: Backedge from #%d:%s to #%d:%s\n", from->id(),
- from->op()->mnemonic(), to->id(), to->op()->mnemonic());
-
- // Push backedge onto the bracket list [line:25].
- Bracket bracket = {direction, kInvalidClass, 0, from, to};
- GetBracketList(from).push_back(bracket);
- }
+ void VisitBackedge(Node* from, Node* to, DFSDirection direction);
// Performs and undirected DFS walk of the graph. Conceptually all nodes are
// expanded, splitting "input" and "use" out into separate nodes. During the
@@ -171,104 +115,10 @@
//
// This will yield a true spanning tree (without cross or forward edges) and
// also discover proper back edges in both directions.
- void RunUndirectedDFS(Node* exit) {
- ZoneStack<DFSStackEntry> stack(zone_);
- DFSPush(stack, exit, NULL, kInputDirection);
- VisitPre(exit);
+ void RunUndirectedDFS(Node* exit);
- while (!stack.empty()) { // Undirected depth-first backwards traversal.
- DFSStackEntry& entry = stack.top();
- Node* node = entry.node;
-
- if (entry.direction == kInputDirection) {
- if (entry.input != node->input_edges().end()) {
- Edge edge = *entry.input;
- Node* input = edge.to();
- ++(entry.input);
- if (NodeProperties::IsControlEdge(edge) &&
- NodeProperties::IsControl(input)) {
- // Visit next control input.
- if (!GetData(input)->participates) continue;
- if (GetData(input)->visited) continue;
- if (GetData(input)->on_stack) {
- // Found backedge if input is on stack.
- if (input != entry.parent_node) {
- VisitBackedge(node, input, kInputDirection);
- }
- } else {
- // Push input onto stack.
- DFSPush(stack, input, node, kInputDirection);
- VisitPre(input);
- }
- }
- continue;
- }
- if (entry.use != node->use_edges().end()) {
- // Switch direction to uses.
- entry.direction = kUseDirection;
- VisitMid(node, kInputDirection);
- continue;
- }
- }
-
- if (entry.direction == kUseDirection) {
- if (entry.use != node->use_edges().end()) {
- Edge edge = *entry.use;
- Node* use = edge.from();
- ++(entry.use);
- if (NodeProperties::IsControlEdge(edge) &&
- NodeProperties::IsControl(use)) {
- // Visit next control use.
- if (!GetData(use)->participates) continue;
- if (GetData(use)->visited) continue;
- if (GetData(use)->on_stack) {
- // Found backedge if use is on stack.
- if (use != entry.parent_node) {
- VisitBackedge(node, use, kUseDirection);
- }
- } else {
- // Push use onto stack.
- DFSPush(stack, use, node, kUseDirection);
- VisitPre(use);
- }
- }
- continue;
- }
- if (entry.input != node->input_edges().end()) {
- // Switch direction to inputs.
- entry.direction = kInputDirection;
- VisitMid(node, kUseDirection);
- continue;
- }
- }
-
- // Pop node from stack when done with all inputs and uses.
- DCHECK(entry.input == node->input_edges().end());
- DCHECK(entry.use == node->use_edges().end());
- DFSPop(stack, node);
- VisitPost(node, entry.parent_node, entry.direction);
- }
- }
-
- void DetermineParticipationEnqueue(ZoneQueue<Node*>& queue, Node* node) {
- if (!GetData(node)->participates) {
- GetData(node)->participates = true;
- queue.push(node);
- }
- }
-
- void DetermineParticipation(Node* exit) {
- ZoneQueue<Node*> queue(zone_);
- DetermineParticipationEnqueue(queue, exit);
- while (!queue.empty()) { // Breadth-first backwards traversal.
- Node* node = queue.front();
- queue.pop();
- int max = NodeProperties::PastControlIndex(node);
- for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
- DetermineParticipationEnqueue(queue, node->InputAt(i));
- }
- }
- }
+ void DetermineParticipationEnqueue(ZoneQueue<Node*>& queue, Node* node);
+ void DetermineParticipation(Node* exit);
private:
NodeData* GetData(Node* node) { return &node_data_[node->id()]; }
@@ -299,56 +149,16 @@
}
// Mutates the DFS stack by pushing an entry.
- void DFSPush(DFSStack& stack, Node* node, Node* from, DFSDirection dir) {
- DCHECK(GetData(node)->participates);
- DCHECK(!GetData(node)->visited);
- GetData(node)->on_stack = true;
- Node::InputEdges::iterator input = node->input_edges().begin();
- Node::UseEdges::iterator use = node->use_edges().begin();
- stack.push({dir, input, use, from, node});
- }
+ void DFSPush(DFSStack& stack, Node* node, Node* from, DFSDirection dir);
// Mutates the DFS stack by popping an entry.
- void DFSPop(DFSStack& stack, Node* node) {
- DCHECK_EQ(stack.top().node, node);
- GetData(node)->on_stack = false;
- GetData(node)->visited = true;
- stack.pop();
- }
+ void DFSPop(DFSStack& stack, Node* node);
- // TODO(mstarzinger): Optimize this to avoid linear search.
- void BracketListDelete(BracketList& blist, Node* to, DFSDirection direction) {
- for (BracketList::iterator i = blist.begin(); i != blist.end(); /*nop*/) {
- if (i->to == to && i->direction != direction) {
- Trace(" BList erased: {%d->%d}\n", i->from->id(), i->to->id());
- i = blist.erase(i);
- } else {
- ++i;
- }
- }
- }
+ void BracketListDelete(BracketList& blist, Node* to, DFSDirection direction);
+ void BracketListTRACE(BracketList& blist);
- void BracketListTrace(BracketList& blist) {
- if (FLAG_trace_turbo_scheduler) {
- Trace(" BList: ");
- for (Bracket bracket : blist) {
- Trace("{%d->%d} ", bracket.from->id(), bracket.to->id());
- }
- Trace("\n");
- }
- }
-
- void Trace(const char* msg, ...) {
- if (FLAG_trace_turbo_scheduler) {
- va_list arguments;
- va_start(arguments, msg);
- base::OS::VPrint(msg, arguments);
- va_end(arguments);
- }
- }
-
- Zone* zone_;
- Graph* graph_;
+ Zone* const zone_;
+ Graph* const graph_;
int dfs_number_; // Generates new DFS pre-order numbers on demand.
int class_number_; // Generates new equivalence class numbers on demand.
Data node_data_; // Per-node data stored as a side-table.
diff --git a/src/compiler/control-flow-optimizer.cc b/src/compiler/control-flow-optimizer.cc
new file mode 100644
index 0000000..3fc3bce
--- /dev/null
+++ b/src/compiler/control-flow-optimizer.cc
@@ -0,0 +1,278 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/control-flow-optimizer.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ControlFlowOptimizer::ControlFlowOptimizer(Graph* graph,
+ CommonOperatorBuilder* common,
+ MachineOperatorBuilder* machine,
+ Zone* zone)
+ : graph_(graph),
+ common_(common),
+ machine_(machine),
+ queue_(zone),
+ queued_(graph, 2),
+ zone_(zone) {}
+
+
+void ControlFlowOptimizer::Optimize() {
+ Enqueue(graph()->start());
+ while (!queue_.empty()) {
+ Node* node = queue_.front();
+ queue_.pop();
+ if (node->IsDead()) continue;
+ switch (node->opcode()) {
+ case IrOpcode::kBranch:
+ VisitBranch(node);
+ break;
+ default:
+ VisitNode(node);
+ break;
+ }
+ }
+}
+
+
+void ControlFlowOptimizer::Enqueue(Node* node) {
+ DCHECK_NOT_NULL(node);
+ if (node->IsDead() || queued_.Get(node)) return;
+ queued_.Set(node, true);
+ queue_.push(node);
+}
+
+
+void ControlFlowOptimizer::VisitNode(Node* node) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ Enqueue(edge.from());
+ }
+ }
+}
+
+
+void ControlFlowOptimizer::VisitBranch(Node* node) {
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ if (TryBuildSwitch(node)) return;
+ if (TryCloneBranch(node)) return;
+ VisitNode(node);
+}
+
+
+bool ControlFlowOptimizer::TryCloneBranch(Node* node) {
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+
+ // This optimization is a special case of (super)block cloning. It takes an
+ // input graph as shown below and clones the Branch node for every predecessor
+ // to the Merge, essentially removing the Merge completely. This avoids
+ // materializing the bit for the Phi and may offer potential for further
+ // branch folding optimizations (i.e. because one or more inputs to the Phi is
+ // a constant). Note that there may be more Phi nodes hanging off the Merge,
+ // but we can only a certain subset of them currently (actually only Phi and
+ // EffectPhi nodes whose uses have either the IfTrue or IfFalse as control
+ // input).
+
+ // Control1 ... ControlN
+ // ^ ^
+ // | | Cond1 ... CondN
+ // +----+ +----+ ^ ^
+ // | | | |
+ // | | +----+ |
+ // Merge<--+ | +------------+
+ // ^ \|/
+ // | Phi
+ // | |
+ // Branch----+
+ // ^
+ // |
+ // +-----+-----+
+ // | |
+ // IfTrue IfFalse
+ // ^ ^
+ // | |
+
+ // The resulting graph (modulo the Phi and EffectPhi nodes) looks like this:
+
+ // Control1 Cond1 ... ControlN CondN
+ // ^ ^ ^ ^
+ // \ / \ /
+ // Branch ... Branch
+ // ^ ^
+ // | |
+ // +---+---+ +---+----+
+ // | | | |
+ // IfTrue IfFalse ... IfTrue IfFalse
+ // ^ ^ ^ ^
+ // | | | |
+ // +--+ +-------------+ |
+ // | | +--------------+ +--+
+ // | | | |
+ // Merge Merge
+ // ^ ^
+ // | |
+
+ Node* branch = node;
+ Node* cond = NodeProperties::GetValueInput(branch, 0);
+ if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return false;
+ Node* merge = NodeProperties::GetControlInput(branch);
+ if (merge->opcode() != IrOpcode::kMerge ||
+ NodeProperties::GetControlInput(cond) != merge) {
+ return false;
+ }
+ // Grab the IfTrue/IfFalse projections of the Branch.
+ BranchMatcher matcher(branch);
+ // Check/collect other Phi/EffectPhi nodes hanging off the Merge.
+ NodeVector phis(zone());
+ for (Node* const use : merge->uses()) {
+ if (use == branch || use == cond) continue;
+ // We cannot currently deal with non-Phi/EffectPhi nodes hanging off the
+ // Merge. Ideally, we would just clone the nodes (and everything that
+ // depends on it to some distant join point), but that requires knowledge
+ // about dominance/post-dominance.
+ if (!NodeProperties::IsPhi(use)) return false;
+ for (Edge edge : use->use_edges()) {
+ // Right now we can only handle Phi/EffectPhi nodes whose uses are
+ // directly control-dependend on either the IfTrue or the IfFalse
+ // successor, because we know exactly how to update those uses.
+ // TODO(turbofan): Generalize this to all Phi/EffectPhi nodes using
+ // dominance/post-dominance on the sea of nodes.
+ if (edge.from()->op()->ControlInputCount() != 1) return false;
+ Node* control = NodeProperties::GetControlInput(edge.from());
+ if (NodeProperties::IsPhi(edge.from())) {
+ control = NodeProperties::GetControlInput(control, edge.index());
+ }
+ if (control != matcher.IfTrue() && control != matcher.IfFalse())
+ return false;
+ }
+ phis.push_back(use);
+ }
+ BranchHint const hint = BranchHintOf(branch->op());
+ int const input_count = merge->op()->ControlInputCount();
+ DCHECK_LE(1, input_count);
+ Node** const inputs = zone()->NewArray<Node*>(2 * input_count);
+ Node** const merge_true_inputs = &inputs[0];
+ Node** const merge_false_inputs = &inputs[input_count];
+ for (int index = 0; index < input_count; ++index) {
+ Node* cond1 = NodeProperties::GetValueInput(cond, index);
+ Node* control1 = NodeProperties::GetControlInput(merge, index);
+ Node* branch1 = graph()->NewNode(common()->Branch(hint), cond1, control1);
+ merge_true_inputs[index] = graph()->NewNode(common()->IfTrue(), branch1);
+ merge_false_inputs[index] = graph()->NewNode(common()->IfFalse(), branch1);
+ Enqueue(branch1);
+ }
+ Node* const merge_true = graph()->NewNode(common()->Merge(input_count),
+ input_count, merge_true_inputs);
+ Node* const merge_false = graph()->NewNode(common()->Merge(input_count),
+ input_count, merge_false_inputs);
+ for (Node* const phi : phis) {
+ for (int index = 0; index < input_count; ++index) {
+ inputs[index] = phi->InputAt(index);
+ }
+ inputs[input_count] = merge_true;
+ Node* phi_true = graph()->NewNode(phi->op(), input_count + 1, inputs);
+ inputs[input_count] = merge_false;
+ Node* phi_false = graph()->NewNode(phi->op(), input_count + 1, inputs);
+ for (Edge edge : phi->use_edges()) {
+ Node* control = NodeProperties::GetControlInput(edge.from());
+ if (NodeProperties::IsPhi(edge.from())) {
+ control = NodeProperties::GetControlInput(control, edge.index());
+ }
+ DCHECK(control == matcher.IfTrue() || control == matcher.IfFalse());
+ edge.UpdateTo((control == matcher.IfTrue()) ? phi_true : phi_false);
+ }
+ phi->Kill();
+ }
+ // Fix up IfTrue and IfFalse and kill all dead nodes.
+ matcher.IfFalse()->ReplaceUses(merge_false);
+ matcher.IfTrue()->ReplaceUses(merge_true);
+ matcher.IfFalse()->Kill();
+ matcher.IfTrue()->Kill();
+ branch->Kill();
+ cond->Kill();
+ merge->Kill();
+ return true;
+}
+
+
+bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+
+ Node* branch = node;
+ if (BranchHintOf(branch->op()) != BranchHint::kNone) return false;
+ Node* cond = NodeProperties::GetValueInput(branch, 0);
+ if (cond->opcode() != IrOpcode::kWord32Equal) return false;
+ Int32BinopMatcher m(cond);
+ Node* index = m.left().node();
+ if (!m.right().HasValue()) return false;
+ int32_t value = m.right().Value();
+ ZoneSet<int32_t> values(zone());
+ values.insert(value);
+
+ Node* if_false;
+ Node* if_true;
+ while (true) {
+ BranchMatcher matcher(branch);
+ DCHECK(matcher.Matched());
+
+ if_true = matcher.IfTrue();
+ if_false = matcher.IfFalse();
+
+ auto it = if_false->uses().begin();
+ if (it == if_false->uses().end()) break;
+ Node* branch1 = *it++;
+ if (branch1->opcode() != IrOpcode::kBranch) break;
+ if (BranchHintOf(branch1->op()) != BranchHint::kNone) break;
+ if (it != if_false->uses().end()) break;
+ Node* cond1 = branch1->InputAt(0);
+ if (cond1->opcode() != IrOpcode::kWord32Equal) break;
+ Int32BinopMatcher m1(cond1);
+ if (m1.left().node() != index) break;
+ if (!m1.right().HasValue()) break;
+ int32_t value1 = m1.right().Value();
+ if (values.find(value1) != values.end()) break;
+ DCHECK_NE(value, value1);
+
+ if (branch != node) {
+ branch->NullAllInputs();
+ if_true->ReplaceInput(0, node);
+ }
+ NodeProperties::ChangeOp(if_true, common()->IfValue(value));
+ if_false->NullAllInputs();
+ Enqueue(if_true);
+
+ branch = branch1;
+ value = value1;
+ values.insert(value);
+ }
+
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+ if (branch == node) {
+ DCHECK_EQ(1u, values.size());
+ return false;
+ }
+ DCHECK_LT(1u, values.size());
+ node->ReplaceInput(0, index);
+ NodeProperties::ChangeOp(node, common()->Switch(values.size() + 1));
+ if_true->ReplaceInput(0, node);
+ NodeProperties::ChangeOp(if_true, common()->IfValue(value));
+ Enqueue(if_true);
+ if_false->ReplaceInput(0, node);
+ NodeProperties::ChangeOp(if_false, common()->IfDefault());
+ Enqueue(if_false);
+ branch->NullAllInputs();
+ return true;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/control-flow-optimizer.h b/src/compiler/control-flow-optimizer.h
new file mode 100644
index 0000000..f72fa58
--- /dev/null
+++ b/src/compiler/control-flow-optimizer.h
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
+#define V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
+
+#include "src/compiler/node-marker.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+class MachineOperatorBuilder;
+class Node;
+
+
+class ControlFlowOptimizer final {
+ public:
+ ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common,
+ MachineOperatorBuilder* machine, Zone* zone);
+
+ void Optimize();
+
+ private:
+ void Enqueue(Node* node);
+ void VisitNode(Node* node);
+ void VisitBranch(Node* node);
+
+ bool TryBuildSwitch(Node* node);
+ bool TryCloneBranch(Node* node);
+
+ Graph* graph() const { return graph_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ MachineOperatorBuilder* machine() const { return machine_; }
+ Zone* zone() const { return zone_; }
+
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ MachineOperatorBuilder* const machine_;
+ ZoneQueue<Node*> queue_;
+ NodeMarker<bool> queued_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(ControlFlowOptimizer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
diff --git a/src/compiler/control-reducer.cc b/src/compiler/control-reducer.cc
deleted file mode 100644
index eef8a49..0000000
--- a/src/compiler/control-reducer.cc
+++ /dev/null
@@ -1,592 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/control-reducer.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-enum VisitState { kUnvisited = 0, kOnStack = 1, kRevisit = 2, kVisited = 3 };
-enum Decision { kFalse, kUnknown, kTrue };
-
-class ReachabilityMarker : public NodeMarker<uint8_t> {
- public:
- explicit ReachabilityMarker(Graph* graph) : NodeMarker<uint8_t>(graph, 8) {}
- bool SetReachableFromEnd(Node* node) {
- uint8_t before = Get(node);
- Set(node, before | kFromEnd);
- return before & kFromEnd;
- }
- bool IsReachableFromEnd(Node* node) { return Get(node) & kFromEnd; }
- bool SetReachableFromStart(Node* node) {
- uint8_t before = Get(node);
- Set(node, before | kFromStart);
- return before & kFromStart;
- }
- bool IsReachableFromStart(Node* node) { return Get(node) & kFromStart; }
- void Push(Node* node) { Set(node, Get(node) | kFwStack); }
- void Pop(Node* node) { Set(node, Get(node) & ~kFwStack); }
- bool IsOnStack(Node* node) { return Get(node) & kFwStack; }
-
- private:
- enum Bit { kFromEnd = 1, kFromStart = 2, kFwStack = 4 };
-};
-
-
-#define TRACE(x) \
- if (FLAG_trace_turbo_reduction) PrintF x
-
-class ControlReducerImpl {
- public:
- ControlReducerImpl(Zone* zone, JSGraph* jsgraph,
- CommonOperatorBuilder* common)
- : zone_(zone),
- jsgraph_(jsgraph),
- common_(common),
- state_(jsgraph->graph()->NodeCount(), kUnvisited, zone_),
- stack_(zone_),
- revisit_(zone_),
- dead_(NULL) {}
-
- Zone* zone_;
- JSGraph* jsgraph_;
- CommonOperatorBuilder* common_;
- ZoneVector<VisitState> state_;
- ZoneDeque<Node*> stack_;
- ZoneDeque<Node*> revisit_;
- Node* dead_;
-
- void Reduce() {
- Push(graph()->end());
- do {
- // Process the node on the top of the stack, potentially pushing more
- // or popping the node off the stack.
- ReduceTop();
- // If the stack becomes empty, revisit any nodes in the revisit queue.
- // If no nodes in the revisit queue, try removing dead loops.
- // If no dead loops, then finish.
- } while (!stack_.empty() || TryRevisit() || RepairAndRemoveLoops());
- }
-
- bool TryRevisit() {
- while (!revisit_.empty()) {
- Node* n = revisit_.back();
- revisit_.pop_back();
- if (state_[n->id()] == kRevisit) { // state can change while in queue.
- Push(n);
- return true;
- }
- }
- return false;
- }
-
- // Repair the graph after the possible creation of non-terminating or dead
- // loops. Removing dead loops can produce more opportunities for reduction.
- bool RepairAndRemoveLoops() {
- // TODO(turbofan): we can skip this if the graph has no loops, but
- // we have to be careful about proper loop detection during reduction.
-
- // Gather all nodes backwards-reachable from end (through inputs).
- ReachabilityMarker marked(graph());
- NodeVector nodes(zone_);
- AddNodesReachableFromEnd(marked, nodes);
-
- // Walk forward through control nodes, looking for back edges to nodes
- // that are not connected to end. Those are non-terminating loops (NTLs).
- Node* start = graph()->start();
- marked.Push(start);
- marked.SetReachableFromStart(start);
-
- // We use a stack of (Node, UseIter) pairs to avoid O(n^2) traversal.
- typedef std::pair<Node*, UseIter> FwIter;
- ZoneVector<FwIter> fw_stack(zone_);
- fw_stack.push_back(FwIter(start, start->uses().begin()));
-
- while (!fw_stack.empty()) {
- Node* node = fw_stack.back().first;
- TRACE(("ControlFw: #%d:%s\n", node->id(), node->op()->mnemonic()));
- bool pop = true;
- while (fw_stack.back().second != node->uses().end()) {
- Node* succ = *(fw_stack.back().second);
- if (marked.IsOnStack(succ) && !marked.IsReachableFromEnd(succ)) {
- // {succ} is on stack and not reachable from end.
- Node* added = ConnectNTL(succ);
- nodes.push_back(added);
- marked.SetReachableFromEnd(added);
- AddBackwardsReachableNodes(marked, nodes, nodes.size() - 1);
-
- // Reset the use iterators for the entire stack.
- for (size_t i = 0; i < fw_stack.size(); i++) {
- FwIter& iter = fw_stack[i];
- fw_stack[i] = FwIter(iter.first, iter.first->uses().begin());
- }
- pop = false; // restart traversing successors of this node.
- break;
- }
- if (IrOpcode::IsControlOpcode(succ->opcode()) &&
- !marked.IsReachableFromStart(succ)) {
- // {succ} is a control node and not yet reached from start.
- marked.Push(succ);
- marked.SetReachableFromStart(succ);
- fw_stack.push_back(FwIter(succ, succ->uses().begin()));
- pop = false; // "recurse" into successor control node.
- break;
- }
- ++fw_stack.back().second;
- }
- if (pop) {
- marked.Pop(node);
- fw_stack.pop_back();
- }
- }
-
- // Trim references from dead nodes to live nodes first.
- jsgraph_->GetCachedNodes(&nodes);
- TrimNodes(marked, nodes);
-
- // Any control nodes not reachable from start are dead, even loops.
- for (size_t i = 0; i < nodes.size(); i++) {
- Node* node = nodes[i];
- if (IrOpcode::IsControlOpcode(node->opcode()) &&
- !marked.IsReachableFromStart(node)) {
- ReplaceNode(node, dead()); // uses will be added to revisit queue.
- }
- }
- return TryRevisit(); // try to push a node onto the stack.
- }
-
- // Connect {loop}, the header of a non-terminating loop, to the end node.
- Node* ConnectNTL(Node* loop) {
- TRACE(("ConnectNTL: #%d:%s\n", loop->id(), loop->op()->mnemonic()));
-
- if (loop->opcode() != IrOpcode::kTerminate) {
- // Insert a {Terminate} node if the loop has effects.
- ZoneDeque<Node*> effects(zone_);
- for (Node* const use : loop->uses()) {
- if (use->opcode() == IrOpcode::kEffectPhi) effects.push_back(use);
- }
- int count = static_cast<int>(effects.size());
- if (count > 0) {
- Node** inputs = zone_->NewArray<Node*>(1 + count);
- for (int i = 0; i < count; i++) inputs[i] = effects[i];
- inputs[count] = loop;
- loop = graph()->NewNode(common_->Terminate(count), 1 + count, inputs);
- TRACE(("AddTerminate: #%d:%s[%d]\n", loop->id(), loop->op()->mnemonic(),
- count));
- }
- }
-
- Node* to_add = loop;
- Node* end = graph()->end();
- CHECK_EQ(IrOpcode::kEnd, end->opcode());
- Node* merge = end->InputAt(0);
- if (merge == NULL || merge->opcode() == IrOpcode::kDead) {
- // The end node died; just connect end to {loop}.
- end->ReplaceInput(0, loop);
- } else if (merge->opcode() != IrOpcode::kMerge) {
- // Introduce a final merge node for {end->InputAt(0)} and {loop}.
- merge = graph()->NewNode(common_->Merge(2), merge, loop);
- end->ReplaceInput(0, merge);
- to_add = merge;
- // Mark the node as visited so that we can revisit later.
- EnsureStateSize(merge->id());
- state_[merge->id()] = kVisited;
- } else {
- // Append a new input to the final merge at the end.
- merge->AppendInput(graph()->zone(), loop);
- merge->set_op(common_->Merge(merge->InputCount()));
- }
- return to_add;
- }
-
- void AddNodesReachableFromEnd(ReachabilityMarker& marked, NodeVector& nodes) {
- Node* end = graph()->end();
- marked.SetReachableFromEnd(end);
- if (!end->IsDead()) {
- nodes.push_back(end);
- AddBackwardsReachableNodes(marked, nodes, nodes.size() - 1);
- }
- }
-
- void AddBackwardsReachableNodes(ReachabilityMarker& marked, NodeVector& nodes,
- size_t cursor) {
- while (cursor < nodes.size()) {
- Node* node = nodes[cursor++];
- for (Node* const input : node->inputs()) {
- if (!marked.SetReachableFromEnd(input)) {
- nodes.push_back(input);
- }
- }
- }
- }
-
- void Trim() {
- // Gather all nodes backwards-reachable from end through inputs.
- ReachabilityMarker marked(graph());
- NodeVector nodes(zone_);
- AddNodesReachableFromEnd(marked, nodes);
-
- // Process cached nodes in the JSGraph too.
- jsgraph_->GetCachedNodes(&nodes);
- TrimNodes(marked, nodes);
- }
-
- void TrimNodes(ReachabilityMarker& marked, NodeVector& nodes) {
- // Remove dead->live edges.
- for (size_t j = 0; j < nodes.size(); j++) {
- Node* node = nodes[j];
- for (Edge edge : node->use_edges()) {
- Node* use = edge.from();
- if (!marked.IsReachableFromEnd(use)) {
- TRACE(("DeadLink: #%d:%s(%d) -> #%d:%s\n", use->id(),
- use->op()->mnemonic(), edge.index(), node->id(),
- node->op()->mnemonic()));
- edge.UpdateTo(NULL);
- }
- }
- }
-#if DEBUG
- // Verify that no inputs to live nodes are NULL.
- for (size_t j = 0; j < nodes.size(); j++) {
- Node* node = nodes[j];
- for (Node* const input : node->inputs()) {
- CHECK_NE(NULL, input);
- }
- for (Node* const use : node->uses()) {
- CHECK(marked.IsReachableFromEnd(use));
- }
- }
-#endif
- }
-
- // Reduce the node on the top of the stack.
- // If an input {i} is not yet visited or needs to be revisited, push {i} onto
- // the stack and return. Otherwise, all inputs are visited, so apply
- // reductions for {node} and pop it off the stack.
- void ReduceTop() {
- size_t height = stack_.size();
- Node* node = stack_.back();
-
- if (node->IsDead()) return Pop(); // Node was killed while on stack.
-
- TRACE(("ControlReduce: #%d:%s\n", node->id(), node->op()->mnemonic()));
-
- // Recurse on an input if necessary.
- for (Node* const input : node->inputs()) {
- if (Recurse(input)) return;
- }
-
- // All inputs should be visited or on stack. Apply reductions to node.
- Node* replacement = ReduceNode(node);
- if (replacement != node) ReplaceNode(node, replacement);
-
- // After reducing the node, pop it off the stack.
- CHECK_EQ(static_cast<int>(height), static_cast<int>(stack_.size()));
- Pop();
-
- // If there was a replacement, reduce it after popping {node}.
- if (replacement != node) Recurse(replacement);
- }
-
- void EnsureStateSize(size_t id) {
- if (id >= state_.size()) {
- state_.resize((3 * id) / 2, kUnvisited);
- }
- }
-
- // Push a node onto the stack if its state is {kUnvisited} or {kRevisit}.
- bool Recurse(Node* node) {
- size_t id = static_cast<size_t>(node->id());
- EnsureStateSize(id);
- if (state_[id] != kRevisit && state_[id] != kUnvisited) return false;
- Push(node);
- return true;
- }
-
- void Push(Node* node) {
- state_[node->id()] = kOnStack;
- stack_.push_back(node);
- }
-
- void Pop() {
- int pos = static_cast<int>(stack_.size()) - 1;
- DCHECK_GE(pos, 0);
- DCHECK_EQ(kOnStack, state_[stack_[pos]->id()]);
- state_[stack_[pos]->id()] = kVisited;
- stack_.pop_back();
- }
-
- // Queue a node to be revisited if it has been visited once already.
- void Revisit(Node* node) {
- size_t id = static_cast<size_t>(node->id());
- if (id < state_.size() && state_[id] == kVisited) {
- TRACE((" Revisit #%d:%s\n", node->id(), node->op()->mnemonic()));
- state_[id] = kRevisit;
- revisit_.push_back(node);
- }
- }
-
- Node* dead() {
- if (dead_ == NULL) dead_ = graph()->NewNode(common_->Dead());
- return dead_;
- }
-
- //===========================================================================
- // Reducer implementation: perform reductions on a node.
- //===========================================================================
- Node* ReduceNode(Node* node) {
- if (node->op()->ControlInputCount() == 1) {
- // If a node has only one control input and it is dead, replace with dead.
- Node* control = NodeProperties::GetControlInput(node);
- if (control->opcode() == IrOpcode::kDead) {
- TRACE(("ControlDead: #%d:%s\n", node->id(), node->op()->mnemonic()));
- return control;
- }
- }
-
- // Reduce branches, phis, and merges.
- switch (node->opcode()) {
- case IrOpcode::kBranch:
- return ReduceBranch(node);
- case IrOpcode::kLoop:
- case IrOpcode::kMerge:
- return ReduceMerge(node);
- case IrOpcode::kSelect:
- return ReduceSelect(node);
- case IrOpcode::kPhi:
- case IrOpcode::kEffectPhi:
- return ReducePhi(node);
- default:
- return node;
- }
- }
-
- // Try to statically fold a condition.
- Decision DecideCondition(Node* cond) {
- switch (cond->opcode()) {
- case IrOpcode::kInt32Constant:
- return Int32Matcher(cond).Is(0) ? kFalse : kTrue;
- case IrOpcode::kInt64Constant:
- return Int64Matcher(cond).Is(0) ? kFalse : kTrue;
- case IrOpcode::kNumberConstant:
- return NumberMatcher(cond).Is(0) ? kFalse : kTrue;
- case IrOpcode::kHeapConstant: {
- Handle<Object> object =
- HeapObjectMatcher<Object>(cond).Value().handle();
- if (object->IsTrue()) return kTrue;
- if (object->IsFalse()) return kFalse;
- // TODO(turbofan): decide more conditions for heap constants.
- break;
- }
- default:
- break;
- }
- return kUnknown;
- }
-
- // Reduce redundant selects.
- Node* ReduceSelect(Node* const node) {
- Node* const tvalue = node->InputAt(1);
- Node* const fvalue = node->InputAt(2);
- if (tvalue == fvalue) return tvalue;
- Decision result = DecideCondition(node->InputAt(0));
- if (result == kTrue) return tvalue;
- if (result == kFalse) return fvalue;
- return node;
- }
-
- // Reduce redundant phis.
- Node* ReducePhi(Node* node) {
- int n = node->InputCount();
- if (n <= 1) return dead(); // No non-control inputs.
- if (n == 2) return node->InputAt(0); // Only one non-control input.
-
- // Never remove an effect phi from a (potentially non-terminating) loop.
- // Otherwise, we might end up eliminating effect nodes, such as calls,
- // before the loop.
- if (node->opcode() == IrOpcode::kEffectPhi &&
- NodeProperties::GetControlInput(node)->opcode() == IrOpcode::kLoop) {
- return node;
- }
-
- Node* replacement = NULL;
- Node::Inputs inputs = node->inputs();
- for (InputIter it = inputs.begin(); n > 1; --n, ++it) {
- Node* input = *it;
- if (input->opcode() == IrOpcode::kDead) continue; // ignore dead inputs.
- if (input != node && input != replacement) { // non-redundant input.
- if (replacement != NULL) return node;
- replacement = input;
- }
- }
- return replacement == NULL ? dead() : replacement;
- }
-
- // Reduce merges by trimming away dead inputs from the merge and phis.
- Node* ReduceMerge(Node* node) {
- // Count the number of live inputs.
- int live = 0;
- int index = 0;
- int live_index = 0;
- for (Node* const input : node->inputs()) {
- if (input->opcode() != IrOpcode::kDead) {
- live++;
- live_index = index;
- }
- index++;
- }
-
- if (live > 1 && live == node->InputCount()) return node; // nothing to do.
-
- TRACE(("ReduceMerge: #%d:%s (%d live)\n", node->id(),
- node->op()->mnemonic(), live));
-
- if (live == 0) return dead(); // no remaining inputs.
-
- // Gather phis and effect phis to be edited.
- ZoneVector<Node*> phis(zone_);
- for (Node* const use : node->uses()) {
- if (use->opcode() == IrOpcode::kPhi ||
- use->opcode() == IrOpcode::kEffectPhi) {
- phis.push_back(use);
- }
- }
-
- if (live == 1) {
- // All phis are redundant. Replace them with their live input.
- for (Node* const phi : phis) ReplaceNode(phi, phi->InputAt(live_index));
- // The merge itself is redundant.
- return node->InputAt(live_index);
- }
-
- // Edit phis in place, removing dead inputs and revisiting them.
- for (Node* const phi : phis) {
- TRACE((" PhiInMerge: #%d:%s (%d live)\n", phi->id(),
- phi->op()->mnemonic(), live));
- RemoveDeadInputs(node, phi);
- Revisit(phi);
- }
- // Edit the merge in place, removing dead inputs.
- RemoveDeadInputs(node, node);
- return node;
- }
-
- // Reduce branches if they have constant inputs.
- Node* ReduceBranch(Node* node) {
- Decision result = DecideCondition(node->InputAt(0));
- if (result == kUnknown) return node;
-
- TRACE(("BranchReduce: #%d:%s = %s\n", node->id(), node->op()->mnemonic(),
- (result == kTrue) ? "true" : "false"));
-
- // Replace IfTrue and IfFalse projections from this branch.
- Node* control = NodeProperties::GetControlInput(node);
- for (Edge edge : node->use_edges()) {
- Node* use = edge.from();
- if (use->opcode() == IrOpcode::kIfTrue) {
- TRACE((" IfTrue: #%d:%s\n", use->id(), use->op()->mnemonic()));
- edge.UpdateTo(NULL);
- ReplaceNode(use, (result == kTrue) ? control : dead());
- } else if (use->opcode() == IrOpcode::kIfFalse) {
- TRACE((" IfFalse: #%d:%s\n", use->id(), use->op()->mnemonic()));
- edge.UpdateTo(NULL);
- ReplaceNode(use, (result == kTrue) ? dead() : control);
- }
- }
- return control;
- }
-
- // Remove inputs to {node} corresponding to the dead inputs to {merge}
- // and compact the remaining inputs, updating the operator.
- void RemoveDeadInputs(Node* merge, Node* node) {
- int pos = 0;
- for (int i = 0; i < node->InputCount(); i++) {
- // skip dead inputs.
- if (i < merge->InputCount() &&
- merge->InputAt(i)->opcode() == IrOpcode::kDead)
- continue;
- // compact live inputs.
- if (pos != i) node->ReplaceInput(pos, node->InputAt(i));
- pos++;
- }
- node->TrimInputCount(pos);
- if (node->opcode() == IrOpcode::kPhi) {
- node->set_op(common_->Phi(OpParameter<MachineType>(node->op()), pos - 1));
- } else if (node->opcode() == IrOpcode::kEffectPhi) {
- node->set_op(common_->EffectPhi(pos - 1));
- } else if (node->opcode() == IrOpcode::kMerge) {
- node->set_op(common_->Merge(pos));
- } else if (node->opcode() == IrOpcode::kLoop) {
- node->set_op(common_->Loop(pos));
- } else {
- UNREACHABLE();
- }
- }
-
- // Replace uses of {node} with {replacement} and revisit the uses.
- void ReplaceNode(Node* node, Node* replacement) {
- if (node == replacement) return;
- TRACE((" Replace: #%d:%s with #%d:%s\n", node->id(),
- node->op()->mnemonic(), replacement->id(),
- replacement->op()->mnemonic()));
- for (Node* const use : node->uses()) {
- // Don't revisit this node if it refers to itself.
- if (use != node) Revisit(use);
- }
- node->ReplaceUses(replacement);
- node->Kill();
- }
-
- Graph* graph() { return jsgraph_->graph(); }
-};
-
-
-void ControlReducer::ReduceGraph(Zone* zone, JSGraph* jsgraph,
- CommonOperatorBuilder* common) {
- ControlReducerImpl impl(zone, jsgraph, common);
- impl.Reduce();
-}
-
-
-void ControlReducer::TrimGraph(Zone* zone, JSGraph* jsgraph) {
- ControlReducerImpl impl(zone, jsgraph, NULL);
- impl.Trim();
-}
-
-
-Node* ControlReducer::ReducePhiForTesting(JSGraph* jsgraph,
- CommonOperatorBuilder* common,
- Node* node) {
- Zone zone(jsgraph->graph()->zone()->isolate());
- ControlReducerImpl impl(&zone, jsgraph, common);
- return impl.ReducePhi(node);
-}
-
-
-Node* ControlReducer::ReduceMergeForTesting(JSGraph* jsgraph,
- CommonOperatorBuilder* common,
- Node* node) {
- Zone zone(jsgraph->graph()->zone()->isolate());
- ControlReducerImpl impl(&zone, jsgraph, common);
- return impl.ReduceMerge(node);
-}
-
-
-Node* ControlReducer::ReduceBranchForTesting(JSGraph* jsgraph,
- CommonOperatorBuilder* common,
- Node* node) {
- Zone zone(jsgraph->graph()->zone()->isolate());
- ControlReducerImpl impl(&zone, jsgraph, common);
- return impl.ReduceBranch(node);
-}
-}
-}
-} // namespace v8::internal::compiler
diff --git a/src/compiler/control-reducer.h b/src/compiler/control-reducer.h
deleted file mode 100644
index e25bb88..0000000
--- a/src/compiler/control-reducer.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_CONTROL_REDUCER_H_
-#define V8_COMPILER_CONTROL_REDUCER_H_
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class JSGraph;
-class CommonOperatorBuilder;
-class Node;
-
-class ControlReducer {
- public:
- // Perform branch folding and dead code elimination on the graph.
- static void ReduceGraph(Zone* zone, JSGraph* graph,
- CommonOperatorBuilder* builder);
-
- // Trim nodes in the graph that are not reachable from end.
- static void TrimGraph(Zone* zone, JSGraph* graph);
-
- // Testing interface.
- static Node* ReducePhiForTesting(JSGraph* graph,
- CommonOperatorBuilder* builder, Node* node);
- static Node* ReduceBranchForTesting(JSGraph* graph,
- CommonOperatorBuilder* builder,
- Node* node);
- static Node* ReduceMergeForTesting(JSGraph* graph,
- CommonOperatorBuilder* builder,
- Node* node);
-};
-}
-}
-} // namespace v8::internal::compiler
-
-#endif // V8_COMPILER_CONTROL_REDUCER_H_
diff --git a/src/compiler/dead-code-elimination.cc b/src/compiler/dead-code-elimination.cc
new file mode 100644
index 0000000..697d7f8
--- /dev/null
+++ b/src/compiler/dead-code-elimination.cc
@@ -0,0 +1,145 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/dead-code-elimination.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common)
+ : AdvancedReducer(editor),
+ graph_(graph),
+ common_(common),
+ dead_(graph->NewNode(common->Dead())) {}
+
+
+Reduction DeadCodeElimination::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kEnd:
+ return ReduceEnd(node);
+ case IrOpcode::kLoop:
+ case IrOpcode::kMerge:
+ return ReduceLoopOrMerge(node);
+ default:
+ return ReduceNode(node);
+ }
+ UNREACHABLE();
+ return NoChange();
+}
+
+
+Reduction DeadCodeElimination::ReduceEnd(Node* node) {
+ DCHECK_EQ(IrOpcode::kEnd, node->opcode());
+ int const input_count = node->InputCount();
+ DCHECK_LE(1, input_count);
+ int live_input_count = 0;
+ for (int i = 0; i < input_count; ++i) {
+ Node* const input = node->InputAt(i);
+ // Skip dead inputs.
+ if (input->opcode() == IrOpcode::kDead) continue;
+ // Compact live inputs.
+ if (i != live_input_count) node->ReplaceInput(live_input_count, input);
+ ++live_input_count;
+ }
+ if (live_input_count == 0) {
+ return Replace(dead());
+ } else if (live_input_count < input_count) {
+ node->TrimInputCount(live_input_count);
+ NodeProperties::ChangeOp(node, common()->End(live_input_count));
+ return Changed(node);
+ }
+ DCHECK_EQ(input_count, live_input_count);
+ return NoChange();
+}
+
+
+Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
+ DCHECK(IrOpcode::IsMergeOpcode(node->opcode()));
+ int const input_count = node->InputCount();
+ DCHECK_LE(1, input_count);
+ // Count the number of live inputs to {node} and compact them on the fly, also
+ // compacting the inputs of the associated {Phi} and {EffectPhi} uses at the
+ // same time. We consider {Loop}s dead even if only the first control input
+ // is dead.
+ int live_input_count = 0;
+ if (node->opcode() != IrOpcode::kLoop ||
+ node->InputAt(0)->opcode() != IrOpcode::kDead) {
+ for (int i = 0; i < input_count; ++i) {
+ Node* const input = node->InputAt(i);
+ // Skip dead inputs.
+ if (input->opcode() == IrOpcode::kDead) continue;
+ // Compact live inputs.
+ if (live_input_count != i) {
+ node->ReplaceInput(live_input_count, input);
+ for (Node* const use : node->uses()) {
+ if (NodeProperties::IsPhi(use)) {
+ DCHECK_EQ(input_count + 1, use->InputCount());
+ use->ReplaceInput(live_input_count, use->InputAt(i));
+ }
+ }
+ }
+ ++live_input_count;
+ }
+ }
+ if (live_input_count == 0) {
+ return Replace(dead());
+ } else if (live_input_count == 1) {
+ // Due to compaction above, the live input is at offset 0.
+ for (Node* const use : node->uses()) {
+ if (NodeProperties::IsPhi(use)) {
+ Replace(use, use->InputAt(0));
+ } else if (use->opcode() == IrOpcode::kTerminate) {
+ DCHECK_EQ(IrOpcode::kLoop, node->opcode());
+ Replace(use, dead());
+ }
+ }
+ return Replace(node->InputAt(0));
+ }
+ DCHECK_LE(2, live_input_count);
+ DCHECK_LE(live_input_count, input_count);
+ // Trim input count for the {Merge} or {Loop} node.
+ if (live_input_count < input_count) {
+ // Trim input counts for all phi uses and revisit them.
+ for (Node* const use : node->uses()) {
+ if (NodeProperties::IsPhi(use)) {
+ use->ReplaceInput(live_input_count, node);
+ TrimMergeOrPhi(use, live_input_count);
+ Revisit(use);
+ }
+ }
+ TrimMergeOrPhi(node, live_input_count);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction DeadCodeElimination::ReduceNode(Node* node) {
+ // If {node} has exactly one control input and this is {Dead},
+ // replace {node} with {Dead}.
+ int const control_input_count = node->op()->ControlInputCount();
+ if (control_input_count == 0) return NoChange();
+ DCHECK_EQ(1, control_input_count);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (control->opcode() == IrOpcode::kDead) return Replace(control);
+ return NoChange();
+}
+
+
+void DeadCodeElimination::TrimMergeOrPhi(Node* node, int size) {
+ const Operator* const op = common()->ResizeMergeOrPhi(node->op(), size);
+ node->TrimInputCount(OperatorProperties::GetTotalInputCount(op));
+ NodeProperties::ChangeOp(node, op);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/dead-code-elimination.h b/src/compiler/dead-code-elimination.h
new file mode 100644
index 0000000..e5996c8
--- /dev/null
+++ b/src/compiler/dead-code-elimination.h
@@ -0,0 +1,52 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_DEAD_CODE_ELIMINATION_H_
+#define V8_COMPILER_DEAD_CODE_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+
+
+// Propagates {Dead} control through the graph and thereby removes dead code.
+// Note that this does not include trimming dead uses from the graph, and it
+// also does not include detecting dead code by any other means than seeing a
+// {Dead} control input; that is left to other reducers.
+class DeadCodeElimination final : public AdvancedReducer {
+ public:
+ DeadCodeElimination(Editor* editor, Graph* graph,
+ CommonOperatorBuilder* common);
+ ~DeadCodeElimination() final {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceEnd(Node* node);
+ Reduction ReduceLoopOrMerge(Node* node);
+ Reduction ReduceNode(Node* node);
+
+ void TrimMergeOrPhi(Node* node, int size);
+
+ Graph* graph() const { return graph_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ Node* dead() const { return dead_; }
+
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ Node* const dead_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeadCodeElimination);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_DEAD_CODE_ELIMINATION_H_
diff --git a/src/compiler/diamond.h b/src/compiler/diamond.h
index 6133cc5..e133305 100644
--- a/src/compiler/diamond.h
+++ b/src/compiler/diamond.h
@@ -5,10 +5,8 @@
#ifndef V8_COMPILER_DIAMOND_H_
#define V8_COMPILER_DIAMOND_H_
-#include "src/v8.h"
-
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph.h"
#include "src/compiler/node.h"
namespace v8 {
@@ -51,35 +49,13 @@
}
}
- Node* Phi(MachineType machine_type, Node* tv, Node* fv) {
- return graph->NewNode(common->Phi(machine_type, 2), tv, fv, merge);
- }
-
- Node* EffectPhi(Node* tv, Node* fv) {
- return graph->NewNode(common->EffectPhi(2), tv, fv, merge);
- }
-
- void OverwriteWithPhi(Node* node, MachineType machine_type, Node* tv,
- Node* fv) {
- DCHECK(node->InputCount() >= 3);
- node->set_op(common->Phi(machine_type, 2));
- node->ReplaceInput(0, tv);
- node->ReplaceInput(1, fv);
- node->ReplaceInput(2, merge);
- node->TrimInputCount(3);
- }
-
- void OverwriteWithEffectPhi(Node* node, Node* te, Node* fe) {
- DCHECK(node->InputCount() >= 3);
- node->set_op(common->EffectPhi(2));
- node->ReplaceInput(0, te);
- node->ReplaceInput(1, fe);
- node->ReplaceInput(2, merge);
- node->TrimInputCount(3);
+ Node* Phi(MachineRepresentation rep, Node* tv, Node* fv) {
+ return graph->NewNode(common->Phi(rep, 2), tv, fv, merge);
}
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_DIAMOND_H_
diff --git a/src/compiler/escape-analysis-reducer.cc b/src/compiler/escape-analysis-reducer.cc
new file mode 100644
index 0000000..df8b65d
--- /dev/null
+++ b/src/compiler/escape-analysis-reducer.cc
@@ -0,0 +1,313 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/escape-analysis-reducer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/counters.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
+ EscapeAnalysis* escape_analysis,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ escape_analysis_(escape_analysis),
+ zone_(zone),
+ visited_(static_cast<int>(jsgraph->graph()->NodeCount()), zone) {}
+
+
+Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kLoadField:
+ case IrOpcode::kLoadElement:
+ return ReduceLoad(node);
+ case IrOpcode::kStoreField:
+ case IrOpcode::kStoreElement:
+ return ReduceStore(node);
+ case IrOpcode::kAllocate:
+ return ReduceAllocate(node);
+ case IrOpcode::kFinishRegion:
+ return ReduceFinishRegion(node);
+ case IrOpcode::kReferenceEqual:
+ return ReduceReferenceEqual(node);
+ case IrOpcode::kObjectIsSmi:
+ return ReduceObjectIsSmi(node);
+ default:
+ // TODO(sigurds): Change this to GetFrameStateInputCount once
+ // it is working. For now we use EffectInputCount > 0 to determine
+ // whether a node might have a frame state input.
+ if (node->op()->EffectInputCount() > 0) {
+ return ReduceFrameStateUses(node);
+ }
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kLoadField ||
+ node->opcode() == IrOpcode::kLoadElement);
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ if (Node* rep = escape_analysis()->GetReplacement(node)) {
+ visited_.Add(node->id());
+ counters()->turbo_escape_loads_replaced()->Increment();
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced #%d (%s) with #%d (%s)\n", node->id(),
+ node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
+ }
+ ReplaceWithValue(node, rep);
+ return Changed(rep);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kStoreField ||
+ node->opcode() == IrOpcode::kStoreElement);
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Removed #%d (%s) from effect chain\n", node->id(),
+ node->op()->mnemonic());
+ }
+ RelaxEffectsAndControls(node);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceAllocate(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ if (escape_analysis()->IsVirtual(node)) {
+ RelaxEffectsAndControls(node);
+ counters()->turbo_escape_allocs_replaced()->Increment();
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Removed allocate #%d from effect chain\n", node->id());
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceFinishRegion(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
+ Node* effect = NodeProperties::GetEffectInput(node, 0);
+ if (effect->opcode() == IrOpcode::kBeginRegion) {
+ RelaxEffectsAndControls(effect);
+ RelaxEffectsAndControls(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Removed region #%d / #%d from effect chain,", effect->id(),
+ node->id());
+ PrintF(" %d user(s) of #%d remain(s):", node->UseCount(), node->id());
+ for (Edge edge : node->use_edges()) {
+ PrintF(" #%d", edge.from()->id());
+ }
+ PrintF("\n");
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kReferenceEqual);
+ Node* left = NodeProperties::GetValueInput(node, 0);
+ Node* right = NodeProperties::GetValueInput(node, 1);
+ if (escape_analysis()->IsVirtual(left)) {
+ if (escape_analysis()->IsVirtual(right) &&
+ escape_analysis()->CompareVirtualObjects(left, right)) {
+ ReplaceWithValue(node, jsgraph()->TrueConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ref eq #%d with true\n", node->id());
+ }
+ }
+ // Right-hand side is not a virtual object, or a different one.
+ ReplaceWithValue(node, jsgraph()->FalseConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ref eq #%d with false\n", node->id());
+ }
+ return Replace(node);
+ } else if (escape_analysis()->IsVirtual(right)) {
+ // Left-hand side is not a virtual object.
+ ReplaceWithValue(node, jsgraph()->FalseConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ref eq #%d with false\n", node->id());
+ }
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceObjectIsSmi(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kObjectIsSmi);
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ if (escape_analysis()->IsVirtual(input)) {
+ ReplaceWithValue(node, jsgraph()->FalseConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ObjectIsSmi #%d with false\n", node->id());
+ }
+ return Replace(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceFrameStateUses(Node* node) {
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ DCHECK_GE(node->op()->EffectInputCount(), 1);
+ bool changed = false;
+ for (int i = 0; i < node->InputCount(); ++i) {
+ Node* input = node->InputAt(i);
+ if (input->opcode() == IrOpcode::kFrameState) {
+ if (Node* ret = ReduceFrameState(input, node, false)) {
+ node->ReplaceInput(i, ret);
+ changed = true;
+ }
+ }
+ }
+ if (changed) {
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+// Returns the clone if it duplicated the node, and null otherwise.
+Node* EscapeAnalysisReducer::ReduceFrameState(Node* node, Node* effect,
+ bool multiple_users) {
+ DCHECK(node->opcode() == IrOpcode::kFrameState);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Reducing FrameState %d\n", node->id());
+ }
+ Node* clone = nullptr;
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ Node* ret =
+ input->opcode() == IrOpcode::kStateValues
+ ? ReduceStateValueInputs(input, effect, node->UseCount() > 1)
+ : ReduceStateValueInput(node, i, effect, node->UseCount() > 1);
+ if (ret) {
+ if (node->UseCount() > 1 || multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Cloning #%d", node->id());
+ }
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" to #%d\n", node->id());
+ }
+ multiple_users = false; // Don't clone anymore.
+ }
+ NodeProperties::ReplaceValueInput(node, ret, i);
+ }
+ }
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
+ if (Node* ret =
+ ReduceFrameState(outer_frame_state, effect, node->UseCount() > 1)) {
+ if (node->UseCount() > 1 || multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Cloning #%d", node->id());
+ }
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" to #%d\n", node->id());
+ }
+ multiple_users = false;
+ }
+ NodeProperties::ReplaceFrameStateInput(node, 0, ret);
+ }
+ }
+ return clone;
+}
+
+
+// Returns the clone if it duplicated the node, and null otherwise.
+Node* EscapeAnalysisReducer::ReduceStateValueInputs(Node* node, Node* effect,
+ bool multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Reducing StateValue #%d\n", node->id());
+ }
+ DCHECK(node->opcode() == IrOpcode::kStateValues);
+ DCHECK_NOT_NULL(effect);
+ Node* clone = nullptr;
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ Node* ret = nullptr;
+ if (input->opcode() == IrOpcode::kStateValues) {
+ ret = ReduceStateValueInputs(input, effect, multiple_users);
+ } else {
+ ret = ReduceStateValueInput(node, i, effect, multiple_users);
+ }
+ if (ret) {
+ node = ret;
+ DCHECK_NULL(clone);
+ clone = ret;
+ multiple_users = false;
+ }
+ }
+ return clone;
+}
+
+
+// Returns the clone if it duplicated the node, and null otherwise.
+Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
+ Node* effect,
+ bool multiple_users) {
+ Node* input = NodeProperties::GetValueInput(node, node_index);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Reducing State Input #%d (%s)\n", input->id(),
+ input->op()->mnemonic());
+ }
+ Node* clone = nullptr;
+ if (input->opcode() == IrOpcode::kFinishRegion ||
+ input->opcode() == IrOpcode::kAllocate) {
+ if (escape_analysis()->IsVirtual(input)) {
+ if (Node* object_state =
+ escape_analysis()->GetOrCreateObjectState(effect, input)) {
+ if (node->UseCount() > 1 || multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Cloning #%d", node->id());
+ }
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" to #%d\n", node->id());
+ }
+ }
+ NodeProperties::ReplaceValueInput(node, object_state, node_index);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced state #%d input #%d with object state #%d\n",
+ node->id(), input->id(), object_state->id());
+ }
+ } else {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("No object state replacement available.\n");
+ }
+ }
+ }
+ }
+ return clone;
+}
+
+
+Counters* EscapeAnalysisReducer::counters() const {
+ return jsgraph_->isolate()->counters();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/escape-analysis-reducer.h b/src/compiler/escape-analysis-reducer.h
new file mode 100644
index 0000000..1c0da16
--- /dev/null
+++ b/src/compiler/escape-analysis-reducer.h
@@ -0,0 +1,63 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
+#define V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
+
+#include "src/bit-vector.h"
+#include "src/compiler/escape-analysis.h"
+#include "src/compiler/graph-reducer.h"
+
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Counters;
+
+
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+
+
+class EscapeAnalysisReducer final : public AdvancedReducer {
+ public:
+ EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
+ EscapeAnalysis* escape_analysis, Zone* zone);
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceLoad(Node* node);
+ Reduction ReduceStore(Node* node);
+ Reduction ReduceAllocate(Node* node);
+ Reduction ReduceFinishRegion(Node* node);
+ Reduction ReduceReferenceEqual(Node* node);
+ Reduction ReduceObjectIsSmi(Node* node);
+ Reduction ReduceFrameStateUses(Node* node);
+ Node* ReduceFrameState(Node* node, Node* effect, bool multiple_users);
+ Node* ReduceStateValueInputs(Node* node, Node* effect, bool multiple_users);
+ Node* ReduceStateValueInput(Node* node, int node_index, Node* effect,
+ bool multiple_users);
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+ EscapeAnalysis* escape_analysis() const { return escape_analysis_; }
+ Zone* zone() const { return zone_; }
+ Counters* counters() const;
+
+ JSGraph* const jsgraph_;
+ EscapeAnalysis* escape_analysis_;
+ Zone* const zone_;
+ BitVector visited_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
new file mode 100644
index 0000000..af0ba6a
--- /dev/null
+++ b/src/compiler/escape-analysis.cc
@@ -0,0 +1,1471 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/escape-analysis.h"
+
+#include <limits>
+
+#include "src/base/flags.h"
+#include "src/bootstrapper.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/objects-inl.h"
+#include "src/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+const EscapeAnalysis::Alias EscapeAnalysis::kNotReachable =
+ std::numeric_limits<Alias>::max();
+const EscapeAnalysis::Alias EscapeAnalysis::kUntrackable =
+ std::numeric_limits<Alias>::max() - 1;
+
+
+class VirtualObject : public ZoneObject {
+ public:
+ enum Status { kUntracked = 0, kTracked = 1 };
+ VirtualObject(NodeId id, Zone* zone)
+ : id_(id),
+ status_(kUntracked),
+ fields_(zone),
+ phi_(zone),
+ object_state_(nullptr) {}
+
+ VirtualObject(const VirtualObject& other)
+ : id_(other.id_),
+ status_(other.status_),
+ fields_(other.fields_),
+ phi_(other.phi_),
+ object_state_(other.object_state_) {}
+
+ VirtualObject(NodeId id, Zone* zone, size_t field_number)
+ : id_(id),
+ status_(kTracked),
+ fields_(zone),
+ phi_(zone),
+ object_state_(nullptr) {
+ fields_.resize(field_number);
+ phi_.resize(field_number, false);
+ }
+
+ Node* GetField(size_t offset) {
+ if (offset < fields_.size()) {
+ return fields_[offset];
+ }
+ return nullptr;
+ }
+
+ bool IsCreatedPhi(size_t offset) {
+ if (offset < phi_.size()) {
+ return phi_[offset];
+ }
+ return false;
+ }
+
+ bool SetField(size_t offset, Node* node, bool created_phi = false) {
+ bool changed = fields_[offset] != node || phi_[offset] != created_phi;
+ fields_[offset] = node;
+ phi_[offset] = created_phi;
+ if (changed && FLAG_trace_turbo_escape && node) {
+ PrintF("Setting field %zu of #%d to #%d (%s)\n", offset, id(), node->id(),
+ node->op()->mnemonic());
+ }
+ return changed;
+ }
+ bool IsVirtual() const { return status_ == kTracked; }
+ bool IsTracked() const { return status_ != kUntracked; }
+
+ Node** fields_array() { return &fields_.front(); }
+ size_t field_count() { return fields_.size(); }
+ bool ResizeFields(size_t field_count) {
+ if (field_count != fields_.size()) {
+ fields_.resize(field_count);
+ phi_.resize(field_count);
+ return true;
+ }
+ return false;
+ }
+ bool ClearAllFields() {
+ bool changed = false;
+ for (size_t i = 0; i < fields_.size(); ++i) {
+ if (fields_[i] != nullptr) {
+ fields_[i] = nullptr;
+ changed = true;
+ }
+ phi_[i] = false;
+ }
+ return changed;
+ }
+ bool UpdateFrom(const VirtualObject& other);
+ void SetObjectState(Node* node) { object_state_ = node; }
+ Node* GetObjectState() const { return object_state_; }
+
+ NodeId id() const { return id_; }
+ void id(NodeId id) { id_ = id; }
+
+ private:
+ NodeId id_;
+ Status status_;
+ ZoneVector<Node*> fields_;
+ ZoneVector<bool> phi_;
+ Node* object_state_;
+};
+
+
+bool VirtualObject::UpdateFrom(const VirtualObject& other) {
+ bool changed = status_ != other.status_;
+ status_ = other.status_;
+ if (fields_.size() != other.fields_.size()) {
+ fields_ = other.fields_;
+ return true;
+ }
+ for (size_t i = 0; i < fields_.size(); ++i) {
+ if (fields_[i] != other.fields_[i]) {
+ changed = true;
+ fields_[i] = other.fields_[i];
+ }
+ }
+ return changed;
+}
+
+
+class VirtualState : public ZoneObject {
+ public:
+ VirtualState(Zone* zone, size_t size);
+ VirtualState(const VirtualState& states);
+
+ VirtualObject* VirtualObjectFromAlias(size_t alias);
+ VirtualObject* GetOrCreateTrackedVirtualObject(EscapeAnalysis::Alias alias,
+ NodeId id, Zone* zone);
+ void SetVirtualObject(EscapeAnalysis::Alias alias, VirtualObject* state);
+ void LastChangedAt(Node* node) { last_changed_ = node; }
+ Node* GetLastChanged() { return last_changed_; }
+ bool UpdateFrom(VirtualState* state, Zone* zone);
+ bool MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
+ CommonOperatorBuilder* common, Node* control);
+ size_t size() const { return info_.size(); }
+
+ private:
+ ZoneVector<VirtualObject*> info_;
+ Node* last_changed_;
+};
+
+
+class MergeCache : public ZoneObject {
+ public:
+ explicit MergeCache(Zone* zone)
+ : states_(zone), objects_(zone), fields_(zone) {
+ states_.reserve(4);
+ objects_.reserve(4);
+ fields_.reserve(4);
+ }
+ ZoneVector<VirtualState*>& states() { return states_; }
+ ZoneVector<VirtualObject*>& objects() { return objects_; }
+ ZoneVector<Node*>& fields() { return fields_; }
+ void Clear() {
+ states_.clear();
+ objects_.clear();
+ fields_.clear();
+ }
+ size_t LoadVirtualObjectsFromStatesFor(EscapeAnalysis::Alias alias);
+ void LoadVirtualObjectsForFieldsFrom(
+ VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases);
+ Node* GetFields(size_t pos);
+
+ private:
+ ZoneVector<VirtualState*> states_;
+ ZoneVector<VirtualObject*> objects_;
+ ZoneVector<Node*> fields_;
+};
+
+
+size_t MergeCache::LoadVirtualObjectsFromStatesFor(
+ EscapeAnalysis::Alias alias) {
+ objects_.clear();
+ DCHECK_GT(states_.size(), 0u);
+ size_t min = std::numeric_limits<size_t>::max();
+ for (VirtualState* state : states_) {
+ if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
+ objects_.push_back(obj);
+ min = std::min(obj->field_count(), min);
+ }
+ }
+ return min;
+}
+
+
+void MergeCache::LoadVirtualObjectsForFieldsFrom(
+ VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases) {
+ objects_.clear();
+ size_t max_alias = state->size();
+ for (Node* field : fields_) {
+ EscapeAnalysis::Alias alias = aliases[field->id()];
+ if (alias >= max_alias) continue;
+ if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
+ objects_.push_back(obj);
+ }
+ }
+}
+
+
+Node* MergeCache::GetFields(size_t pos) {
+ fields_.clear();
+ Node* rep = objects_.front()->GetField(pos);
+ for (VirtualObject* obj : objects_) {
+ Node* field = obj->GetField(pos);
+ if (field) {
+ fields_.push_back(field);
+ }
+ if (field != rep) {
+ rep = nullptr;
+ }
+ }
+ return rep;
+}
+
+
+VirtualState::VirtualState(Zone* zone, size_t size)
+ : info_(size, nullptr, zone), last_changed_(nullptr) {}
+
+
+VirtualState::VirtualState(const VirtualState& state)
+ : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
+ last_changed_(state.last_changed_) {
+ for (size_t i = 0; i < state.info_.size(); ++i) {
+ if (state.info_[i]) {
+ info_[i] =
+ new (info_.get_allocator().zone()) VirtualObject(*state.info_[i]);
+ }
+ }
+}
+
+
+VirtualObject* VirtualState::VirtualObjectFromAlias(size_t alias) {
+ return info_[alias];
+}
+
+
+VirtualObject* VirtualState::GetOrCreateTrackedVirtualObject(
+ EscapeAnalysis::Alias alias, NodeId id, Zone* zone) {
+ if (VirtualObject* obj = VirtualObjectFromAlias(alias)) {
+ return obj;
+ }
+ VirtualObject* obj = new (zone) VirtualObject(id, zone, 0);
+ SetVirtualObject(alias, obj);
+ return obj;
+}
+
+
+void VirtualState::SetVirtualObject(EscapeAnalysis::Alias alias,
+ VirtualObject* obj) {
+ info_[alias] = obj;
+}
+
+
+bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
+ bool changed = false;
+ for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
+ VirtualObject* ls = VirtualObjectFromAlias(alias);
+ VirtualObject* rs = from->VirtualObjectFromAlias(alias);
+
+ if (rs == nullptr) {
+ continue;
+ }
+
+ if (ls == nullptr) {
+ ls = new (zone) VirtualObject(*rs);
+ SetVirtualObject(alias, ls);
+ changed = true;
+ continue;
+ }
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Updating fields of @%d\n", alias);
+ }
+
+ changed = ls->UpdateFrom(*rs) || changed;
+ }
+ return false;
+}
+
+
+namespace {
+
+bool IsEquivalentPhi(Node* node1, Node* node2) {
+ if (node1 == node2) return true;
+ if (node1->opcode() != IrOpcode::kPhi || node2->opcode() != IrOpcode::kPhi ||
+ node1->op()->ValueInputCount() != node2->op()->ValueInputCount()) {
+ return false;
+ }
+ for (int i = 0; i < node1->op()->ValueInputCount(); ++i) {
+ Node* input1 = NodeProperties::GetValueInput(node1, i);
+ Node* input2 = NodeProperties::GetValueInput(node2, i);
+ if (!IsEquivalentPhi(input1, input2)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
+ if (phi->opcode() != IrOpcode::kPhi) return false;
+ if (phi->op()->ValueInputCount() != inputs.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ Node* input = NodeProperties::GetValueInput(phi, static_cast<int>(i));
+ if (!IsEquivalentPhi(input, inputs[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+
+Node* EscapeAnalysis::GetReplacementIfSame(ZoneVector<VirtualObject*>& objs) {
+ Node* rep = GetReplacement(objs.front()->id());
+ for (VirtualObject* obj : objs) {
+ if (GetReplacement(obj->id()) != rep) {
+ return nullptr;
+ }
+ }
+ return rep;
+}
+
+
+bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
+ CommonOperatorBuilder* common, Node* control) {
+ DCHECK_GT(cache->states().size(), 0u);
+ bool changed = false;
+ for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
+ size_t fields = cache->LoadVirtualObjectsFromStatesFor(alias);
+ if (cache->objects().size() == cache->states().size()) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Merging virtual objects of @%d\n", alias);
+ }
+ VirtualObject* mergeObject = GetOrCreateTrackedVirtualObject(
+ alias, cache->objects().front()->id(), zone);
+ changed = mergeObject->ResizeFields(fields) || changed;
+ for (size_t i = 0; i < fields; ++i) {
+ if (Node* field = cache->GetFields(i)) {
+ changed = mergeObject->SetField(i, field) || changed;
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Field %zu agree on rep #%d\n", i, field->id());
+ }
+ } else {
+ int value_input_count = static_cast<int>(cache->fields().size());
+ if (cache->fields().size() == cache->objects().size()) {
+ Node* rep = mergeObject->GetField(i);
+ if (!rep || !mergeObject->IsCreatedPhi(i)) {
+ cache->fields().push_back(control);
+ Node* phi = graph->NewNode(
+ common->Phi(MachineRepresentation::kTagged,
+ value_input_count),
+ value_input_count + 1, &cache->fields().front());
+ mergeObject->SetField(i, phi, true);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Creating Phi #%d as merge of", phi->id());
+ for (int i = 0; i < value_input_count; i++) {
+ PrintF(" #%d (%s)", cache->fields()[i]->id(),
+ cache->fields()[i]->op()->mnemonic());
+ }
+ PrintF("\n");
+ }
+ changed = true;
+ } else {
+ DCHECK(rep->opcode() == IrOpcode::kPhi);
+ for (int n = 0; n < value_input_count; ++n) {
+ if (n < rep->op()->ValueInputCount()) {
+ Node* old = NodeProperties::GetValueInput(rep, n);
+ if (old != cache->fields()[n]) {
+ changed = true;
+ NodeProperties::ReplaceValueInput(rep, cache->fields()[n],
+ n);
+ }
+ } else {
+ changed = true;
+ rep->InsertInput(graph->zone(), n, cache->fields()[n]);
+ }
+ }
+ if (rep->op()->ValueInputCount() != value_input_count) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Widening Phi #%d of arity %d to %d", rep->id(),
+ rep->op()->ValueInputCount(), value_input_count);
+ }
+ NodeProperties::ChangeOp(
+ rep, common->Phi(MachineRepresentation::kTagged,
+ value_input_count));
+ }
+ }
+ } else {
+ changed = mergeObject->SetField(i, nullptr) || changed;
+ }
+ }
+ }
+ } else {
+ SetVirtualObject(alias, nullptr);
+ }
+ }
+ return changed;
+}
+
+
+EscapeStatusAnalysis::EscapeStatusAnalysis(EscapeAnalysis* object_analysis,
+ Graph* graph, Zone* zone)
+ : object_analysis_(object_analysis),
+ graph_(graph),
+ zone_(zone),
+ status_(graph->NodeCount(), kUnknown, zone),
+ queue_(zone) {}
+
+
+EscapeStatusAnalysis::~EscapeStatusAnalysis() {}
+
+
+bool EscapeStatusAnalysis::HasEntry(Node* node) {
+ return status_[node->id()] & (kTracked | kEscaped);
+}
+
+
+bool EscapeStatusAnalysis::IsVirtual(Node* node) {
+ return (status_[node->id()] & kTracked) && !(status_[node->id()] & kEscaped);
+}
+
+
+bool EscapeStatusAnalysis::IsEscaped(Node* node) {
+ return status_[node->id()] & kEscaped;
+}
+
+
+bool EscapeStatusAnalysis::IsAllocation(Node* node) {
+ return node->opcode() == IrOpcode::kAllocate ||
+ node->opcode() == IrOpcode::kFinishRegion;
+}
+
+
+bool EscapeStatusAnalysis::SetEscaped(Node* node) {
+ bool changed = !(status_[node->id()] & kEscaped);
+ status_[node->id()] |= kEscaped | kTracked;
+ return changed;
+}
+
+
+void EscapeStatusAnalysis::Resize() {
+ status_.resize(graph()->NodeCount(), kUnknown);
+}
+
+
+size_t EscapeStatusAnalysis::size() { return status_.size(); }
+
+
+void EscapeStatusAnalysis::Run() {
+ Resize();
+ queue_.push_back(graph()->end());
+ status_[graph()->end()->id()] |= kOnStack;
+ while (!queue_.empty()) {
+ Node* node = queue_.front();
+ queue_.pop_front();
+ status_[node->id()] &= ~kOnStack;
+ Process(node);
+ status_[node->id()] |= kVisited;
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (!(status_[input->id()] & (kVisited | kOnStack))) {
+ queue_.push_back(input);
+ status_[input->id()] |= kOnStack;
+ }
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::RevisitInputs(Node* node) {
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (!(status_[input->id()] & kOnStack)) {
+ queue_.push_back(input);
+ status_[input->id()] |= kOnStack;
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::RevisitUses(Node* node) {
+ for (Edge edge : node->use_edges()) {
+ Node* use = edge.from();
+ if (!(status_[use->id()] & kOnStack)) {
+ queue_.push_back(use);
+ status_[use->id()] |= kOnStack;
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::Process(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ ProcessAllocate(node);
+ break;
+ case IrOpcode::kFinishRegion:
+ ProcessFinishRegion(node);
+ break;
+ case IrOpcode::kStoreField:
+ ProcessStoreField(node);
+ break;
+ case IrOpcode::kStoreElement:
+ ProcessStoreElement(node);
+ break;
+ case IrOpcode::kLoadField:
+ case IrOpcode::kLoadElement: {
+ if (Node* rep = object_analysis_->GetReplacement(node)) {
+ if (IsAllocation(rep) && CheckUsesForEscape(node, rep)) {
+ RevisitInputs(rep);
+ RevisitUses(rep);
+ }
+ }
+ break;
+ }
+ case IrOpcode::kPhi:
+ if (!HasEntry(node)) {
+ status_[node->id()] |= kTracked;
+ if (!IsAllocationPhi(node)) {
+ SetEscaped(node);
+ RevisitUses(node);
+ }
+ }
+ CheckUsesForEscape(node);
+ default:
+ break;
+ }
+}
+
+
+bool EscapeStatusAnalysis::IsAllocationPhi(Node* node) {
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (input->opcode() == IrOpcode::kPhi && !IsEscaped(input)) continue;
+ if (IsAllocation(input)) continue;
+ return false;
+ }
+ return true;
+}
+
+
+void EscapeStatusAnalysis::ProcessStoreField(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* val = NodeProperties::GetValueInput(node, 1);
+ if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
+ RevisitUses(val);
+ RevisitInputs(val);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
+ val->id(), val->op()->mnemonic(), to->id());
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::ProcessStoreElement(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* val = NodeProperties::GetValueInput(node, 2);
+ if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
+ RevisitUses(val);
+ RevisitInputs(val);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
+ val->id(), val->op()->mnemonic(), to->id());
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::ProcessAllocate(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
+ if (!HasEntry(node)) {
+ status_[node->id()] |= kTracked;
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Created status entry for node #%d (%s)\n", node->id(),
+ node->op()->mnemonic());
+ }
+ NumberMatcher size(node->InputAt(0));
+ DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
+ if (!size.HasValue() && SetEscaped(node)) {
+ RevisitUses(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d to escaped because of non-const alloc\n",
+ node->id());
+ }
+ // This node is known to escape, uses do not have to be checked.
+ return;
+ }
+ }
+ if (CheckUsesForEscape(node, true)) {
+ RevisitUses(node);
+ }
+}
+
+
+bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
+ bool phi_escaping) {
+ for (Edge edge : uses->use_edges()) {
+ Node* use = edge.from();
+ if (edge.index() >= use->op()->ValueInputCount() +
+ OperatorProperties::GetContextInputCount(use->op()))
+ continue;
+ switch (use->opcode()) {
+ case IrOpcode::kPhi:
+ if (phi_escaping && SetEscaped(rep)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because of use by phi node "
+ "#%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return true;
+ }
+ // Fallthrough.
+ case IrOpcode::kStoreField:
+ case IrOpcode::kLoadField:
+ case IrOpcode::kStoreElement:
+ case IrOpcode::kLoadElement:
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues:
+ case IrOpcode::kReferenceEqual:
+ case IrOpcode::kFinishRegion:
+ if (IsEscaped(use) && SetEscaped(rep)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because of use by escaping node "
+ "#%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return true;
+ }
+ break;
+ case IrOpcode::kObjectIsSmi:
+ if (!IsAllocation(rep) && SetEscaped(rep)) {
+ PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ return true;
+ }
+ break;
+ default:
+ if (use->op()->EffectInputCount() == 0 &&
+ uses->op()->EffectInputCount() > 0) {
+ PrintF("Encountered unaccounted use by #%d (%s)\n", use->id(),
+ use->op()->mnemonic());
+ UNREACHABLE();
+ }
+ if (SetEscaped(rep)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+void EscapeStatusAnalysis::ProcessFinishRegion(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
+ if (!HasEntry(node)) {
+ status_[node->id()] |= kTracked;
+ RevisitUses(node);
+ }
+ if (CheckUsesForEscape(node, true)) {
+ RevisitInputs(node);
+ }
+}
+
+
+void EscapeStatusAnalysis::DebugPrint() {
+ for (NodeId id = 0; id < status_.size(); id++) {
+ if (status_[id] & kTracked) {
+ PrintF("Node #%d is %s\n", id,
+ (status_[id] & kEscaped) ? "escaping" : "virtual");
+ }
+ }
+}
+
+
+EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
+ Zone* zone)
+ : graph_(graph),
+ common_(common),
+ zone_(zone),
+ virtual_states_(zone),
+ replacements_(zone),
+ escape_status_(this, graph, zone),
+ cache_(new (zone) MergeCache(zone)),
+ aliases_(zone),
+ next_free_alias_(0) {}
+
+
+EscapeAnalysis::~EscapeAnalysis() {}
+
+
+void EscapeAnalysis::Run() {
+ replacements_.resize(graph()->NodeCount());
+ AssignAliases();
+ RunObjectAnalysis();
+ escape_status_.Run();
+}
+
+
+void EscapeAnalysis::AssignAliases() {
+ ZoneVector<Node*> stack(zone());
+ stack.push_back(graph()->end());
+ CHECK_LT(graph()->NodeCount(), kUntrackable);
+ aliases_.resize(graph()->NodeCount(), kNotReachable);
+ aliases_[graph()->end()->id()] = kUntrackable;
+ while (!stack.empty()) {
+ Node* node = stack.back();
+ stack.pop_back();
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ if (aliases_[node->id()] >= kUntrackable) {
+ aliases_[node->id()] = NextAlias();
+ }
+ break;
+ case IrOpcode::kFinishRegion: {
+ Node* allocate = NodeProperties::GetValueInput(node, 0);
+ if (allocate->opcode() == IrOpcode::kAllocate) {
+ if (aliases_[allocate->id()] >= kUntrackable) {
+ if (aliases_[allocate->id()] == kNotReachable) {
+ stack.push_back(allocate);
+ }
+ aliases_[allocate->id()] = NextAlias();
+ }
+ aliases_[node->id()] = aliases_[allocate->id()];
+ } else {
+ aliases_[node->id()] = NextAlias();
+ }
+ break;
+ }
+ default:
+ DCHECK_EQ(aliases_[node->id()], kUntrackable);
+ break;
+ }
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (aliases_[input->id()] == kNotReachable) {
+ stack.push_back(input);
+ aliases_[input->id()] = kUntrackable;
+ }
+ }
+ }
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Discovered trackable nodes");
+ for (EscapeAnalysis::Alias id = 0; id < graph()->NodeCount(); ++id) {
+ if (aliases_[id] < kUntrackable) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" #%u", id);
+ }
+ }
+ }
+ PrintF("\n");
+ }
+}
+
+
+void EscapeAnalysis::RunObjectAnalysis() {
+ virtual_states_.resize(graph()->NodeCount());
+ ZoneVector<Node*> stack(zone());
+ stack.push_back(graph()->start());
+ while (!stack.empty()) {
+ Node* node = stack.back();
+ stack.pop_back();
+ if (aliases_[node->id()] != kNotReachable && Process(node)) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ Node* use = edge.from();
+ if ((use->opcode() != IrOpcode::kLoadField &&
+ use->opcode() != IrOpcode::kLoadElement) ||
+ !IsDanglingEffectNode(use)) {
+ stack.push_back(use);
+ }
+ }
+ }
+ // First process loads: dangling loads are a problem otherwise.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ Node* use = edge.from();
+ if ((use->opcode() == IrOpcode::kLoadField ||
+ use->opcode() == IrOpcode::kLoadElement) &&
+ IsDanglingEffectNode(use)) {
+ stack.push_back(use);
+ }
+ }
+ }
+ }
+ }
+ if (FLAG_trace_turbo_escape) {
+ DebugPrint();
+ }
+}
+
+
+bool EscapeAnalysis::IsDanglingEffectNode(Node* node) {
+ if (node->op()->EffectInputCount() == 0) return false;
+ if (node->op()->EffectOutputCount() == 0) return false;
+ if (node->op()->EffectInputCount() == 1 &&
+ NodeProperties::GetEffectInput(node)->opcode() == IrOpcode::kStart) {
+ // The start node is used as sentinel for nodes that are in general
+ // effectful, but of which an analysis has determined that they do not
+ // produce effects in this instance. We don't consider these nodes dangling.
+ return false;
+ }
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool EscapeAnalysis::Process(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ ProcessAllocation(node);
+ break;
+ case IrOpcode::kBeginRegion:
+ ForwardVirtualState(node);
+ break;
+ case IrOpcode::kFinishRegion:
+ ProcessFinishRegion(node);
+ break;
+ case IrOpcode::kStoreField:
+ ProcessStoreField(node);
+ break;
+ case IrOpcode::kLoadField:
+ ProcessLoadField(node);
+ break;
+ case IrOpcode::kStoreElement:
+ ProcessStoreElement(node);
+ break;
+ case IrOpcode::kLoadElement:
+ ProcessLoadElement(node);
+ break;
+ case IrOpcode::kStart:
+ ProcessStart(node);
+ break;
+ case IrOpcode::kEffectPhi:
+ return ProcessEffectPhi(node);
+ break;
+ default:
+ if (node->op()->EffectInputCount() > 0) {
+ ForwardVirtualState(node);
+ }
+ ProcessAllocationUsers(node);
+ break;
+ }
+ return true;
+}
+
+
+void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (!NodeProperties::IsValueEdge(edge) &&
+ !NodeProperties::IsContextEdge(edge))
+ continue;
+ switch (node->opcode()) {
+ case IrOpcode::kStoreField:
+ case IrOpcode::kLoadField:
+ case IrOpcode::kStoreElement:
+ case IrOpcode::kLoadElement:
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues:
+ case IrOpcode::kReferenceEqual:
+ case IrOpcode::kFinishRegion:
+ case IrOpcode::kPhi:
+ break;
+ default:
+ VirtualState* state = virtual_states_[node->id()];
+ if (VirtualObject* obj = ResolveVirtualObject(state, input)) {
+ if (obj->ClearAllFields()) {
+ state->LastChangedAt(node);
+ }
+ }
+ break;
+ }
+ }
+}
+
+
+bool EscapeAnalysis::IsEffectBranchPoint(Node* node) {
+ int count = 0;
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ if (++count > 1) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+void EscapeAnalysis::ForwardVirtualState(Node* node) {
+ DCHECK_EQ(node->op()->EffectInputCount(), 1);
+ if (node->opcode() != IrOpcode::kLoadField &&
+ node->opcode() != IrOpcode::kLoadElement &&
+ node->opcode() != IrOpcode::kLoad && IsDanglingEffectNode(node)) {
+ PrintF("Dangeling effect node: #%d (%s)\n", node->id(),
+ node->op()->mnemonic());
+ UNREACHABLE();
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Break the cycle for effect phis.
+ if (effect->opcode() == IrOpcode::kEffectPhi) {
+ if (virtual_states_[effect->id()] == nullptr) {
+ virtual_states_[effect->id()] =
+ new (zone()) VirtualState(zone(), AliasCount());
+ }
+ }
+ DCHECK_NOT_NULL(virtual_states_[effect->id()]);
+ if (IsEffectBranchPoint(effect)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Copying object state %p from #%d (%s) to #%d (%s)\n",
+ static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
+ effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
+ }
+ if (!virtual_states_[node->id()]) {
+ virtual_states_[node->id()] =
+ new (zone()) VirtualState(*virtual_states_[effect->id()]);
+ } else {
+ virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
+ zone());
+ }
+ } else {
+ virtual_states_[node->id()] = virtual_states_[effect->id()];
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Forwarding object state %p from #%d (%s) to #%d (%s)\n",
+ static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
+ effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessStart(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStart);
+ virtual_states_[node->id()] = new (zone()) VirtualState(zone(), AliasCount());
+}
+
+
+bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
+ bool changed = false;
+
+ VirtualState* mergeState = virtual_states_[node->id()];
+ if (!mergeState) {
+ mergeState = new (zone()) VirtualState(zone(), AliasCount());
+ virtual_states_[node->id()] = mergeState;
+ changed = true;
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Effect Phi #%d got new states map %p.\n", node->id(),
+ static_cast<void*>(mergeState));
+ }
+ } else if (mergeState->GetLastChanged() != node) {
+ changed = true;
+ }
+
+ cache_->Clear();
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF("At Effect Phi #%d, merging states into %p:", node->id(),
+ static_cast<void*>(mergeState));
+ }
+
+ for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
+ Node* input = NodeProperties::GetEffectInput(node, i);
+ VirtualState* state = virtual_states_[input->id()];
+ if (state) {
+ cache_->states().push_back(state);
+ }
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" %p (from %d %s)", static_cast<void*>(state), input->id(),
+ input->op()->mnemonic());
+ }
+ }
+ if (FLAG_trace_turbo_escape) {
+ PrintF("\n");
+ }
+
+ if (cache_->states().size() == 0) {
+ return changed;
+ }
+
+ changed = mergeState->MergeFrom(cache_, zone(), graph(), common(),
+ NodeProperties::GetControlInput(node)) ||
+ changed;
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Merge %s the node.\n", changed ? "changed" : "did not change");
+ }
+
+ if (changed) {
+ mergeState->LastChangedAt(node);
+ escape_status_.Resize();
+ }
+ return changed;
+}
+
+
+void EscapeAnalysis::ProcessAllocation(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
+ ForwardVirtualState(node);
+
+ // Check if we have already processed this node.
+ if (virtual_states_[node->id()]->VirtualObjectFromAlias(
+ aliases_[node->id()])) {
+ return;
+ }
+
+ NumberMatcher size(node->InputAt(0));
+ DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
+ if (size.HasValue()) {
+ virtual_states_[node->id()]->SetVirtualObject(
+ aliases_[node->id()],
+ new (zone())
+ VirtualObject(node->id(), zone(), size.Value() / kPointerSize));
+ } else {
+ virtual_states_[node->id()]->SetVirtualObject(
+ aliases_[node->id()], new (zone()) VirtualObject(node->id(), zone()));
+ }
+ virtual_states_[node->id()]->LastChangedAt(node);
+}
+
+
+void EscapeAnalysis::ProcessFinishRegion(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
+ ForwardVirtualState(node);
+ Node* allocation = NodeProperties::GetValueInput(node, 0);
+ if (allocation->opcode() == IrOpcode::kAllocate) {
+ VirtualState* state = virtual_states_[node->id()];
+ if (!state->VirtualObjectFromAlias(aliases_[node->id()])) {
+ VirtualObject* vobj_alloc =
+ state->VirtualObjectFromAlias(aliases_[allocation->id()]);
+ DCHECK_NOT_NULL(vobj_alloc);
+ state->SetVirtualObject(aliases_[node->id()], vobj_alloc);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Linked finish region node #%d to node #%d\n", node->id(),
+ allocation->id());
+ }
+ state->LastChangedAt(node);
+ }
+ }
+}
+
+
+Node* EscapeAnalysis::replacement(NodeId id) {
+ if (id >= replacements_.size()) return nullptr;
+ return replacements_[id];
+}
+
+
+Node* EscapeAnalysis::replacement(Node* node) {
+ return replacement(node->id());
+}
+
+
+bool EscapeAnalysis::SetReplacement(Node* node, Node* rep) {
+ bool changed = replacements_[node->id()] != rep;
+ replacements_[node->id()] = rep;
+ return changed;
+}
+
+
+bool EscapeAnalysis::UpdateReplacement(VirtualState* state, Node* node,
+ Node* rep) {
+ if (SetReplacement(node, rep)) {
+ state->LastChangedAt(node);
+ if (FLAG_trace_turbo_escape) {
+ if (rep) {
+ PrintF("Replacement of #%d is #%d (%s)\n", node->id(), rep->id(),
+ rep->op()->mnemonic());
+ } else {
+ PrintF("Replacement of #%d cleared\n", node->id());
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+
+Node* EscapeAnalysis::ResolveReplacement(Node* node) {
+ while (replacement(node)) {
+ node = replacement(node);
+ }
+ return node;
+}
+
+
+Node* EscapeAnalysis::GetReplacement(Node* node) {
+ return GetReplacement(node->id());
+}
+
+
+Node* EscapeAnalysis::GetReplacement(NodeId id) {
+ Node* node = nullptr;
+ while (replacement(id)) {
+ node = replacement(id);
+ id = node->id();
+ }
+ return node;
+}
+
+
+bool EscapeAnalysis::IsVirtual(Node* node) {
+ if (node->id() >= escape_status_.size()) {
+ return false;
+ }
+ return escape_status_.IsVirtual(node);
+}
+
+
+bool EscapeAnalysis::IsEscaped(Node* node) {
+ if (node->id() >= escape_status_.size()) {
+ return false;
+ }
+ return escape_status_.IsEscaped(node);
+}
+
+
+bool EscapeAnalysis::SetEscaped(Node* node) {
+ return escape_status_.SetEscaped(node);
+}
+
+
+VirtualObject* EscapeAnalysis::GetVirtualObject(Node* at, NodeId id) {
+ if (VirtualState* states = virtual_states_[at->id()]) {
+ return states->VirtualObjectFromAlias(aliases_[id]);
+ }
+ return nullptr;
+}
+
+
+VirtualObject* EscapeAnalysis::ResolveVirtualObject(VirtualState* state,
+ Node* node) {
+ VirtualObject* obj = GetVirtualObject(state, ResolveReplacement(node));
+ while (obj && replacement(obj->id())) {
+ if (VirtualObject* next = GetVirtualObject(state, replacement(obj->id()))) {
+ obj = next;
+ } else {
+ break;
+ }
+ }
+ return obj;
+}
+
+
+bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
+ DCHECK(IsVirtual(left) && IsVirtual(right));
+ left = ResolveReplacement(left);
+ right = ResolveReplacement(right);
+ if (IsEquivalentPhi(left, right)) {
+ return true;
+ }
+ return false;
+}
+
+
+int EscapeAnalysis::OffsetFromAccess(Node* node) {
+ DCHECK(OpParameter<FieldAccess>(node).offset % kPointerSize == 0);
+ return OpParameter<FieldAccess>(node).offset / kPointerSize;
+}
+
+
+void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* node,
+ VirtualState* state) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Load #%d from phi #%d", node->id(), from->id());
+ }
+
+ cache_->fields().clear();
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ cache_->fields().push_back(input);
+ }
+
+ cache_->LoadVirtualObjectsForFieldsFrom(state, aliases_);
+ if (cache_->objects().size() == cache_->fields().size()) {
+ cache_->GetFields(offset);
+ if (cache_->fields().size() == cache_->objects().size()) {
+ Node* rep = replacement(node);
+ if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
+ int value_input_count = static_cast<int>(cache_->fields().size());
+ cache_->fields().push_back(NodeProperties::GetControlInput(from));
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, value_input_count),
+ value_input_count + 1, &cache_->fields().front());
+ escape_status_.Resize();
+ SetReplacement(node, phi);
+ state->LastChangedAt(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" got phi created.\n");
+ }
+ } else if (FLAG_trace_turbo_escape) {
+ PrintF(" has already phi #%d.\n", rep->id());
+ }
+ } else if (FLAG_trace_turbo_escape) {
+ PrintF(" has incomplete field info.\n");
+ }
+ } else if (FLAG_trace_turbo_escape) {
+ PrintF(" has incomplete virtual object info.\n");
+ }
+}
+
+
+void EscapeAnalysis::ProcessLoadField(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kLoadField);
+ ForwardVirtualState(node);
+ Node* from = NodeProperties::GetValueInput(node, 0);
+ VirtualState* state = virtual_states_[node->id()];
+ if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+ int offset = OffsetFromAccess(node);
+ if (!object->IsTracked()) return;
+ Node* value = object->GetField(offset);
+ if (value) {
+ value = ResolveReplacement(value);
+ }
+ // Record that the load has this alias.
+ UpdateReplacement(state, node, value);
+ } else {
+ if (from->opcode() == IrOpcode::kPhi &&
+ OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
+ int offset = OffsetFromAccess(node);
+ // Only binary phis are supported for now.
+ ProcessLoadFromPhi(offset, from, node, state);
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessLoadElement(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kLoadElement);
+ ForwardVirtualState(node);
+ Node* from = NodeProperties::GetValueInput(node, 0);
+ VirtualState* state = virtual_states_[node->id()];
+ Node* index_node = node->InputAt(1);
+ NumberMatcher index(index_node);
+ DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
+ index_node->opcode() != IrOpcode::kInt64Constant &&
+ index_node->opcode() != IrOpcode::kFloat32Constant &&
+ index_node->opcode() != IrOpcode::kFloat64Constant);
+ ElementAccess access = OpParameter<ElementAccess>(node);
+ if (index.HasValue()) {
+ int offset = index.Value() + access.header_size / kPointerSize;
+ if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+ CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+ kPointerSizeLog2);
+ CHECK_EQ(access.header_size % kPointerSize, 0);
+
+ if (!object->IsTracked()) return;
+ Node* value = object->GetField(offset);
+ if (value) {
+ value = ResolveReplacement(value);
+ }
+ // Record that the load has this alias.
+ UpdateReplacement(state, node, value);
+ } else if (from->opcode() == IrOpcode::kPhi) {
+ ElementAccess access = OpParameter<ElementAccess>(node);
+ int offset = index.Value() + access.header_size / kPointerSize;
+ ProcessLoadFromPhi(offset, from, node, state);
+ }
+ } else {
+ // We have a load from a non-const index, cannot eliminate object.
+ if (SetEscaped(from)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because store element #%d to "
+ "non-const "
+ "index #%d (%s)\n",
+ from->id(), from->op()->mnemonic(), node->id(), index_node->id(),
+ index_node->op()->mnemonic());
+ }
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessStoreField(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
+ ForwardVirtualState(node);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* val = NodeProperties::GetValueInput(node, 1);
+ VirtualState* state = virtual_states_[node->id()];
+ if (VirtualObject* obj = ResolveVirtualObject(state, to)) {
+ if (!obj->IsTracked()) return;
+ int offset = OffsetFromAccess(node);
+ if (obj->SetField(offset, ResolveReplacement(val))) {
+ state->LastChangedAt(node);
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessStoreElement(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
+ ForwardVirtualState(node);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* index_node = node->InputAt(1);
+ NumberMatcher index(index_node);
+ DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
+ index_node->opcode() != IrOpcode::kInt64Constant &&
+ index_node->opcode() != IrOpcode::kFloat32Constant &&
+ index_node->opcode() != IrOpcode::kFloat64Constant);
+ ElementAccess access = OpParameter<ElementAccess>(node);
+ Node* val = NodeProperties::GetValueInput(node, 2);
+ if (index.HasValue()) {
+ int offset = index.Value() + access.header_size / kPointerSize;
+ VirtualState* states = virtual_states_[node->id()];
+ if (VirtualObject* obj = ResolveVirtualObject(states, to)) {
+ if (!obj->IsTracked()) return;
+ CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+ kPointerSizeLog2);
+ CHECK_EQ(access.header_size % kPointerSize, 0);
+ if (obj->SetField(offset, ResolveReplacement(val))) {
+ states->LastChangedAt(node);
+ }
+ }
+ } else {
+ // We have a store to a non-const index, cannot eliminate object.
+ if (SetEscaped(to)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because store element #%d to "
+ "non-const "
+ "index #%d (%s)\n",
+ to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
+ index_node->op()->mnemonic());
+ }
+ }
+ }
+}
+
+
+Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
+ if ((node->opcode() == IrOpcode::kFinishRegion ||
+ node->opcode() == IrOpcode::kAllocate) &&
+ IsVirtual(node)) {
+ if (VirtualObject* vobj =
+ ResolveVirtualObject(virtual_states_[effect->id()], node)) {
+ if (Node* object_state = vobj->GetObjectState()) {
+ return object_state;
+ } else {
+ cache_->fields().clear();
+ for (size_t i = 0; i < vobj->field_count(); ++i) {
+ if (Node* field = vobj->GetField(i)) {
+ cache_->fields().push_back(field);
+ }
+ }
+ int input_count = static_cast<int>(cache_->fields().size());
+ Node* new_object_state =
+ graph()->NewNode(common()->ObjectState(input_count, vobj->id()),
+ input_count, &cache_->fields().front());
+ vobj->SetObjectState(new_object_state);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Creating object state #%d for vobj %p (from node #%d) at effect "
+ "#%d\n",
+ new_object_state->id(), static_cast<void*>(vobj), node->id(),
+ effect->id());
+ }
+ // Now fix uses of other objects.
+ for (size_t i = 0; i < vobj->field_count(); ++i) {
+ if (Node* field = vobj->GetField(i)) {
+ if (Node* field_object_state =
+ GetOrCreateObjectState(effect, field)) {
+ NodeProperties::ReplaceValueInput(
+ new_object_state, field_object_state, static_cast<int>(i));
+ }
+ }
+ }
+ return new_object_state;
+ }
+ }
+ }
+ return nullptr;
+}
+
+
+void EscapeAnalysis::DebugPrintObject(VirtualObject* object, Alias alias) {
+ PrintF(" Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
+ object->field_count());
+ for (size_t i = 0; i < object->field_count(); ++i) {
+ if (Node* f = object->GetField(i)) {
+ PrintF(" Field %zu = #%d (%s)\n", i, f->id(), f->op()->mnemonic());
+ }
+ }
+}
+
+
+void EscapeAnalysis::DebugPrintState(VirtualState* state) {
+ PrintF("Dumping object state %p\n", static_cast<void*>(state));
+ for (Alias alias = 0; alias < AliasCount(); ++alias) {
+ if (VirtualObject* object = state->VirtualObjectFromAlias(alias)) {
+ DebugPrintObject(object, alias);
+ }
+ }
+}
+
+
+void EscapeAnalysis::DebugPrint() {
+ ZoneVector<VirtualState*> object_states(zone());
+ for (NodeId id = 0; id < virtual_states_.size(); id++) {
+ if (VirtualState* states = virtual_states_[id]) {
+ if (std::find(object_states.begin(), object_states.end(), states) ==
+ object_states.end()) {
+ object_states.push_back(states);
+ }
+ }
+ }
+ for (size_t n = 0; n < object_states.size(); n++) {
+ DebugPrintState(object_states[n]);
+ }
+}
+
+
+VirtualObject* EscapeAnalysis::GetVirtualObject(VirtualState* state,
+ Node* node) {
+ if (node->id() >= aliases_.size()) return nullptr;
+ Alias alias = aliases_[node->id()];
+ if (alias >= state->size()) return nullptr;
+ return state->VirtualObjectFromAlias(alias);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
new file mode 100644
index 0000000..ea7b11e
--- /dev/null
+++ b/src/compiler/escape-analysis.h
@@ -0,0 +1,169 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ESCAPE_ANALYSIS_H_
+#define V8_COMPILER_ESCAPE_ANALYSIS_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class EscapeAnalysis;
+class VirtualState;
+class VirtualObject;
+
+
+// EscapeStatusAnalysis determines for each allocation whether it escapes.
+class EscapeStatusAnalysis {
+ public:
+ ~EscapeStatusAnalysis();
+
+ enum EscapeStatusFlag {
+ kUnknown = 0u,
+ kTracked = 1u << 0,
+ kEscaped = 1u << 1,
+ kOnStack = 1u << 2,
+ kVisited = 1u << 3,
+ };
+ typedef base::Flags<EscapeStatusFlag, unsigned char> EscapeStatusFlags;
+
+ void Run();
+
+ bool IsVirtual(Node* node);
+ bool IsEscaped(Node* node);
+ bool IsAllocation(Node* node);
+
+ void DebugPrint();
+
+ friend class EscapeAnalysis;
+
+ private:
+ EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
+ Zone* zone);
+ void Process(Node* node);
+ void ProcessAllocate(Node* node);
+ void ProcessFinishRegion(Node* node);
+ void ProcessStoreField(Node* node);
+ void ProcessStoreElement(Node* node);
+ bool CheckUsesForEscape(Node* node, bool phi_escaping = false) {
+ return CheckUsesForEscape(node, node, phi_escaping);
+ }
+ bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
+ void RevisitUses(Node* node);
+ void RevisitInputs(Node* node);
+ bool SetEscaped(Node* node);
+ bool HasEntry(Node* node);
+ void Resize();
+ size_t size();
+ bool IsAllocationPhi(Node* node);
+
+ Graph* graph() const { return graph_; }
+ Zone* zone() const { return zone_; }
+
+ EscapeAnalysis* object_analysis_;
+ Graph* const graph_;
+ Zone* const zone_;
+ ZoneVector<EscapeStatusFlags> status_;
+ ZoneDeque<Node*> queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
+};
+
+
+DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::EscapeStatusFlags)
+
+
+// Forward Declaration.
+class MergeCache;
+
+
+// EscapeObjectAnalysis simulates stores to determine values of loads if
+// an object is virtual and eliminated.
+class EscapeAnalysis {
+ public:
+ typedef NodeId Alias;
+
+ EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
+ ~EscapeAnalysis();
+
+ void Run();
+
+ Node* GetReplacement(Node* node);
+ bool IsVirtual(Node* node);
+ bool IsEscaped(Node* node);
+ bool CompareVirtualObjects(Node* left, Node* right);
+ Node* GetOrCreateObjectState(Node* effect, Node* node);
+
+ private:
+ void RunObjectAnalysis();
+ void AssignAliases();
+ bool Process(Node* node);
+ void ProcessLoadField(Node* node);
+ void ProcessStoreField(Node* node);
+ void ProcessLoadElement(Node* node);
+ void ProcessStoreElement(Node* node);
+ void ProcessAllocationUsers(Node* node);
+ void ProcessAllocation(Node* node);
+ void ProcessFinishRegion(Node* node);
+ void ProcessCall(Node* node);
+ void ProcessStart(Node* node);
+ bool ProcessEffectPhi(Node* node);
+ void ProcessLoadFromPhi(int offset, Node* from, Node* node,
+ VirtualState* states);
+
+ void ForwardVirtualState(Node* node);
+ bool IsEffectBranchPoint(Node* node);
+ bool IsDanglingEffectNode(Node* node);
+ int OffsetFromAccess(Node* node);
+
+ VirtualObject* GetVirtualObject(Node* at, NodeId id);
+ VirtualObject* ResolveVirtualObject(VirtualState* state, Node* node);
+ Node* GetReplacementIfSame(ZoneVector<VirtualObject*>& objs);
+
+ bool SetEscaped(Node* node);
+ Node* replacement(NodeId id);
+ Node* replacement(Node* node);
+ Node* ResolveReplacement(Node* node);
+ Node* GetReplacement(NodeId id);
+ bool SetReplacement(Node* node, Node* rep);
+ bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
+
+ VirtualObject* GetVirtualObject(VirtualState* state, Node* node);
+
+ void DebugPrint();
+ void DebugPrintState(VirtualState* state);
+ void DebugPrintObject(VirtualObject* state, Alias id);
+
+ Alias NextAlias() { return next_free_alias_++; }
+ Alias AliasCount() const { return next_free_alias_; }
+
+ Graph* graph() const { return graph_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ Zone* zone() const { return zone_; }
+
+ static const Alias kNotReachable;
+ static const Alias kUntrackable;
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ Zone* const zone_;
+ ZoneVector<VirtualState*> virtual_states_;
+ ZoneVector<Node*> replacements_;
+ EscapeStatusAnalysis escape_status_;
+ MergeCache* cache_;
+ ZoneVector<Alias> aliases_;
+ Alias next_free_alias_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeAnalysis);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ESCAPE_ANALYSIS_H_
diff --git a/src/compiler/fast-accessor-assembler.cc b/src/compiler/fast-accessor-assembler.cc
new file mode 100644
index 0000000..09d513f
--- /dev/null
+++ b/src/compiler/fast-accessor-assembler.cc
@@ -0,0 +1,220 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/fast-accessor-assembler.h"
+
+#include "src/base/logging.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/verifier.h"
+#include "src/handles-inl.h"
+#include "src/objects.h" // For FAA::GetInternalField impl.
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
+ : zone_(),
+ assembler_(new RawMachineAssembler(
+ isolate, new (zone()) Graph(zone()),
+ Linkage::GetJSCallDescriptor(&zone_, false, 1,
+ CallDescriptor::kNoFlags))),
+ state_(kBuilding) {}
+
+
+FastAccessorAssembler::~FastAccessorAssembler() {}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::IntegerConstant(
+ int const_value) {
+ CHECK_EQ(kBuilding, state_);
+ return FromRaw(assembler_->NumberConstant(const_value));
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::GetReceiver() {
+ CHECK_EQ(kBuilding, state_);
+
+ // For JS call descriptor, the receiver is parameter 0. If we use other
+ // call descriptors, this may or may not hold. So let's check.
+ CHECK(assembler_->call_descriptor()->IsJSFunctionCall());
+ return FromRaw(assembler_->Parameter(0));
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
+ ValueId value, int field_no) {
+ CHECK_EQ(kBuilding, state_);
+ // Determine the 'value' object's instance type.
+ Node* object_map =
+ assembler_->Load(MachineType::Pointer(), FromId(value),
+ assembler_->IntPtrConstant(
+ Internals::kHeapObjectMapOffset - kHeapObjectTag));
+ Node* instance_type = assembler_->WordAnd(
+ assembler_->Load(
+ MachineType::Uint16(), object_map,
+ assembler_->IntPtrConstant(
+ Internals::kMapInstanceTypeAndBitFieldOffset - kHeapObjectTag)),
+ assembler_->IntPtrConstant(0xff));
+
+ // Check whether we have a proper JSObject.
+ RawMachineLabel is_jsobject, is_not_jsobject, merge;
+ assembler_->Branch(
+ assembler_->WordEqual(
+ instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
+ &is_jsobject, &is_not_jsobject);
+
+ // JSObject? Then load the internal field field_no.
+ assembler_->Bind(&is_jsobject);
+ Node* internal_field = assembler_->Load(
+ MachineType::Pointer(), FromId(value),
+ assembler_->IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag +
+ kPointerSize * field_no));
+ assembler_->Goto(&merge);
+
+ // No JSObject? Return undefined.
+ // TODO(vogelheim): Check whether this is the appropriate action, or whether
+ // the method should take a label instead.
+ assembler_->Bind(&is_not_jsobject);
+ Node* fail_value = assembler_->UndefinedConstant();
+ assembler_->Goto(&merge);
+
+ // Return.
+ assembler_->Bind(&merge);
+ Node* phi = assembler_->Phi(MachineRepresentation::kTagged, internal_field,
+ fail_value);
+ return FromRaw(phi);
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(ValueId value,
+ int offset) {
+ CHECK_EQ(kBuilding, state_);
+ return FromRaw(assembler_->Load(MachineType::IntPtr(), FromId(value),
+ assembler_->IntPtrConstant(offset)));
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(ValueId value,
+ int offset) {
+ CHECK_EQ(kBuilding, state_);
+ return FromRaw(
+ assembler_->Load(MachineType::AnyTagged(),
+ assembler_->Load(MachineType::Pointer(), FromId(value),
+ assembler_->IntPtrConstant(offset))));
+}
+
+
+void FastAccessorAssembler::ReturnValue(ValueId value) {
+ CHECK_EQ(kBuilding, state_);
+ assembler_->Return(FromId(value));
+}
+
+
+void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel pass, fail;
+ assembler_->Branch(
+ assembler_->Word32Equal(
+ assembler_->Word32And(FromId(value), assembler_->Int32Constant(mask)),
+ assembler_->Int32Constant(0)),
+ &pass, &fail);
+ assembler_->Bind(&fail);
+ assembler_->Return(assembler_->NullConstant());
+ assembler_->Bind(&pass);
+}
+
+
+void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value) {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel is_null, not_null;
+ assembler_->Branch(
+ assembler_->IntPtrEqual(FromId(value), assembler_->IntPtrConstant(0)),
+ &is_null, ¬_null);
+ assembler_->Bind(&is_null);
+ assembler_->Return(assembler_->NullConstant());
+ assembler_->Bind(¬_null);
+}
+
+
+FastAccessorAssembler::LabelId FastAccessorAssembler::MakeLabel() {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel* label =
+ new (zone()->New(sizeof(RawMachineLabel))) RawMachineLabel;
+ return FromRaw(label);
+}
+
+
+void FastAccessorAssembler::SetLabel(LabelId label_id) {
+ CHECK_EQ(kBuilding, state_);
+ assembler_->Bind(FromId(label_id));
+}
+
+
+void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
+ LabelId label_id) {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel pass;
+ assembler_->Branch(
+ assembler_->IntPtrEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
+ &pass, FromId(label_id));
+ assembler_->Bind(&pass);
+}
+
+
+MaybeHandle<Code> FastAccessorAssembler::Build() {
+ CHECK_EQ(kBuilding, state_);
+
+ // Cleanup: We no longer need this.
+ nodes_.clear();
+ labels_.clear();
+
+ // Export the schedule and call the compiler.
+ Schedule* schedule = assembler_->Export();
+ MaybeHandle<Code> code = Pipeline::GenerateCodeForCodeStub(
+ assembler_->isolate(), assembler_->call_descriptor(), assembler_->graph(),
+ schedule, Code::STUB, "FastAccessorAssembler");
+
+ // Update state & return.
+ state_ = !code.is_null() ? kBuilt : kError;
+ return code;
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::FromRaw(Node* node) {
+ nodes_.push_back(node);
+ ValueId value = {nodes_.size() - 1};
+ return value;
+}
+
+
+FastAccessorAssembler::LabelId FastAccessorAssembler::FromRaw(
+ RawMachineLabel* label) {
+ labels_.push_back(label);
+ LabelId label_id = {labels_.size() - 1};
+ return label_id;
+}
+
+
+Node* FastAccessorAssembler::FromId(ValueId value) const {
+ CHECK_LT(value.value_id, nodes_.size());
+ CHECK_NOT_NULL(nodes_.at(value.value_id));
+ return nodes_.at(value.value_id);
+}
+
+
+RawMachineLabel* FastAccessorAssembler::FromId(LabelId label) const {
+ CHECK_LT(label.label_id, labels_.size());
+ CHECK_NOT_NULL(labels_.at(label.label_id));
+ return labels_.at(label.label_id);
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/fast-accessor-assembler.h b/src/compiler/fast-accessor-assembler.h
new file mode 100644
index 0000000..a9df3f0
--- /dev/null
+++ b/src/compiler/fast-accessor-assembler.h
@@ -0,0 +1,106 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
+#define V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
+
+#include <stdint.h>
+#include <vector>
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "include/v8-experimental.h"
+#include "src/base/macros.h"
+#include "src/base/smart-pointers.h"
+#include "src/handles.h"
+
+
+namespace v8 {
+namespace internal {
+
+class Code;
+class Isolate;
+class Zone;
+
+namespace compiler {
+
+class Node;
+class RawMachineAssembler;
+class RawMachineLabel;
+
+
+// This interface "exports" an aggregated subset of RawMachineAssembler, for
+// use by the API to implement Fast Dom Accessors.
+//
+// This interface is made for this single purpose only and does not attempt
+// to implement a general purpose solution. If you need one, please look at
+// RawMachineAssembler instead.
+//
+// The life cycle of a FastAccessorAssembler has two phases:
+// - After creating the instance, you can call an arbitrary sequence of
+// builder functions to build the desired function.
+// - When done, you can Build() the accessor and query for the build results.
+//
+// You cannot call any result getters before Build() was called & successful;
+// and you cannot call any builder functions after Build() was called.
+class FastAccessorAssembler {
+ public:
+ typedef v8::experimental::FastAccessorBuilder::ValueId ValueId;
+ typedef v8::experimental::FastAccessorBuilder::LabelId LabelId;
+
+ explicit FastAccessorAssembler(Isolate* isolate);
+ ~FastAccessorAssembler();
+
+ // Builder / assembler functions:
+ ValueId IntegerConstant(int int_constant);
+ ValueId GetReceiver();
+ ValueId LoadInternalField(ValueId value_id, int field_no);
+ ValueId LoadValue(ValueId value_id, int offset);
+ ValueId LoadObject(ValueId value_id, int offset);
+
+ // Builder / assembler functions for control flow.
+ void ReturnValue(ValueId value_id);
+ void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
+ void CheckNotZeroOrReturnNull(ValueId value_id);
+
+ // TODO(vogelheim): Implement a C++ callback.
+ // void CheckNotNullOrCallback(ValueId value_id, ..c++-callback type...,
+ // ValueId arg1, ValueId arg2, ...);
+
+ LabelId MakeLabel();
+ void SetLabel(LabelId label_id);
+ void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
+
+ // Assemble the code.
+ MaybeHandle<Code> Build();
+
+ private:
+ ValueId FromRaw(Node* node);
+ LabelId FromRaw(RawMachineLabel* label);
+ Node* FromId(ValueId value) const;
+ RawMachineLabel* FromId(LabelId value) const;
+
+ Zone* zone() { return &zone_; }
+
+ Zone zone_;
+ base::SmartPointer<RawMachineAssembler> assembler_;
+
+ // To prevent exposing the RMA internals to the outside world, we'll map
+ // Node + Label pointers integers wrapped in ValueId and LabelId instances.
+ // These vectors maintain this mapping.
+ std::vector<Node*> nodes_;
+ std::vector<RawMachineLabel*> labels_;
+
+ // Remember the current state for easy error checking. (We prefer to be
+ // strict as this class will be exposed at the API.)
+ enum { kBuilding, kBuilt, kError } state_;
+
+ DISALLOW_COPY_AND_ASSIGN(FastAccessorAssembler);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
diff --git a/src/compiler/frame-elider.cc b/src/compiler/frame-elider.cc
new file mode 100644
index 0000000..7c3f9b2
--- /dev/null
+++ b/src/compiler/frame-elider.cc
@@ -0,0 +1,131 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/adapters.h"
+#include "src/compiler/frame-elider.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+FrameElider::FrameElider(InstructionSequence* code) : code_(code) {}
+
+void FrameElider::Run() {
+ MarkBlocks();
+ PropagateMarks();
+ MarkDeConstruction();
+}
+
+
+void FrameElider::MarkBlocks() {
+ for (auto block : instruction_blocks()) {
+ if (block->needs_frame()) continue;
+ for (auto i = block->code_start(); i < block->code_end(); ++i) {
+ if (InstructionAt(i)->IsCall() ||
+ InstructionAt(i)->opcode() == ArchOpcode::kArchDeoptimize) {
+ block->mark_needs_frame();
+ break;
+ }
+ }
+ }
+}
+
+
+void FrameElider::PropagateMarks() {
+ while (PropagateInOrder() && PropagateReversed()) {
+ }
+}
+
+
+void FrameElider::MarkDeConstruction() {
+ for (auto block : instruction_blocks()) {
+ if (block->needs_frame()) {
+ // Special case: The start block needs a frame.
+ if (block->predecessors().empty()) {
+ block->mark_must_construct_frame();
+ }
+ // Find "frame -> no frame" transitions, inserting frame
+ // deconstructions.
+ for (auto succ : block->successors()) {
+ if (!InstructionBlockAt(succ)->needs_frame()) {
+ DCHECK_EQ(1U, block->SuccessorCount());
+ block->mark_must_deconstruct_frame();
+ }
+ }
+ } else {
+ // Find "no frame -> frame" transitions, inserting frame constructions.
+ for (auto succ : block->successors()) {
+ if (InstructionBlockAt(succ)->needs_frame()) {
+ DCHECK_NE(1U, block->SuccessorCount());
+ InstructionBlockAt(succ)->mark_must_construct_frame();
+ }
+ }
+ }
+ }
+}
+
+
+bool FrameElider::PropagateInOrder() {
+ bool changed = false;
+ for (auto block : instruction_blocks()) {
+ changed |= PropagateIntoBlock(block);
+ }
+ return changed;
+}
+
+
+bool FrameElider::PropagateReversed() {
+ bool changed = false;
+ for (auto block : base::Reversed(instruction_blocks())) {
+ changed |= PropagateIntoBlock(block);
+ }
+ return changed;
+}
+
+
+bool FrameElider::PropagateIntoBlock(InstructionBlock* block) {
+ // Already marked, nothing to do...
+ if (block->needs_frame()) return false;
+
+ // Never mark the dummy end node, otherwise we might incorrectly decide to
+ // put frame deconstruction code there later,
+ if (block->successors().empty()) return false;
+
+ // Propagate towards the end ("downwards") if there is a predecessor needing
+ // a frame, but don't "bleed" from deferred code to non-deferred code.
+ for (auto pred : block->predecessors()) {
+ if (InstructionBlockAt(pred)->needs_frame() &&
+ (!InstructionBlockAt(pred)->IsDeferred() || block->IsDeferred())) {
+ block->mark_needs_frame();
+ return true;
+ }
+ }
+
+ // Propagate towards start ("upwards") if there are successors and all of
+ // them need a frame.
+ for (auto succ : block->successors()) {
+ if (!InstructionBlockAt(succ)->needs_frame()) return false;
+ }
+ block->mark_needs_frame();
+ return true;
+}
+
+
+const InstructionBlocks& FrameElider::instruction_blocks() const {
+ return code_->instruction_blocks();
+}
+
+
+InstructionBlock* FrameElider::InstructionBlockAt(RpoNumber rpo_number) const {
+ return code_->InstructionBlockAt(rpo_number);
+}
+
+
+Instruction* FrameElider::InstructionAt(int index) const {
+ return code_->InstructionAt(index);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/frame-elider.h b/src/compiler/frame-elider.h
new file mode 100644
index 0000000..7d31619
--- /dev/null
+++ b/src/compiler/frame-elider.h
@@ -0,0 +1,41 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FRAME_ELIDER_H_
+#define V8_COMPILER_FRAME_ELIDER_H_
+
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Determine which instruction blocks need a frame and where frames must be
+// constructed/deconstructed.
+class FrameElider {
+ public:
+ explicit FrameElider(InstructionSequence* code);
+ void Run();
+
+
+ private:
+ void MarkBlocks();
+ void PropagateMarks();
+ void MarkDeConstruction();
+ bool PropagateInOrder();
+ bool PropagateReversed();
+ bool PropagateIntoBlock(InstructionBlock* block);
+ const InstructionBlocks& instruction_blocks() const;
+ InstructionBlock* InstructionBlockAt(RpoNumber rpo_number) const;
+ Instruction* InstructionAt(int index) const;
+
+ InstructionSequence* const code_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_FRAME_ELIDER_H_
diff --git a/src/compiler/frame-states.cc b/src/compiler/frame-states.cc
new file mode 100644
index 0000000..387d6a9
--- /dev/null
+++ b/src/compiler/frame-states.cc
@@ -0,0 +1,81 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/frame-states.h"
+
+#include "src/base/functional.h"
+#include "src/handles-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+size_t hash_value(OutputFrameStateCombine const& sc) {
+ return base::hash_combine(sc.kind_, sc.parameter_);
+}
+
+
+std::ostream& operator<<(std::ostream& os, OutputFrameStateCombine const& sc) {
+ switch (sc.kind_) {
+ case OutputFrameStateCombine::kPushOutput:
+ if (sc.parameter_ == 0) return os << "Ignore";
+ return os << "Push(" << sc.parameter_ << ")";
+ case OutputFrameStateCombine::kPokeAt:
+ return os << "PokeAt(" << sc.parameter_ << ")";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+bool operator==(FrameStateInfo const& lhs, FrameStateInfo const& rhs) {
+ return lhs.type() == rhs.type() && lhs.bailout_id() == rhs.bailout_id() &&
+ lhs.state_combine() == rhs.state_combine() &&
+ lhs.function_info() == rhs.function_info();
+}
+
+
+bool operator!=(FrameStateInfo const& lhs, FrameStateInfo const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(FrameStateInfo const& info) {
+ return base::hash_combine(static_cast<int>(info.type()), info.bailout_id(),
+ info.state_combine());
+}
+
+
+std::ostream& operator<<(std::ostream& os, FrameStateType type) {
+ switch (type) {
+ case FrameStateType::kJavaScriptFunction:
+ os << "JS_FRAME";
+ break;
+ case FrameStateType::kInterpretedFunction:
+ os << "INTERPRETED_FRAME";
+ break;
+ case FrameStateType::kArgumentsAdaptor:
+ os << "ARGUMENTS_ADAPTOR";
+ break;
+ case FrameStateType::kConstructStub:
+ os << "CONSTRUCT_STUB";
+ break;
+ }
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, FrameStateInfo const& info) {
+ os << info.type() << ", " << info.bailout_id() << ", "
+ << info.state_combine();
+ Handle<SharedFunctionInfo> shared_info;
+ if (info.shared_info().ToHandle(&shared_info)) {
+ os << ", " << Brief(*shared_info);
+ }
+ return os;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/frame-states.h b/src/compiler/frame-states.h
new file mode 100644
index 0000000..ddb55c3
--- /dev/null
+++ b/src/compiler/frame-states.h
@@ -0,0 +1,177 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FRAME_STATES_H_
+#define V8_COMPILER_FRAME_STATES_H_
+
+#include "src/handles.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class SharedFunctionInfo;
+
+namespace compiler {
+
+// Flag that describes how to combine the current environment with
+// the output of a node to obtain a framestate for lazy bailout.
+class OutputFrameStateCombine {
+ public:
+ enum Kind {
+ kPushOutput, // Push the output on the expression stack.
+ kPokeAt // Poke at the given environment location,
+ // counting from the top of the stack.
+ };
+
+ static OutputFrameStateCombine Ignore() {
+ return OutputFrameStateCombine(kPushOutput, 0);
+ }
+ static OutputFrameStateCombine Push(size_t count = 1) {
+ return OutputFrameStateCombine(kPushOutput, count);
+ }
+ static OutputFrameStateCombine PokeAt(size_t index) {
+ return OutputFrameStateCombine(kPokeAt, index);
+ }
+
+ Kind kind() const { return kind_; }
+ size_t GetPushCount() const {
+ DCHECK_EQ(kPushOutput, kind());
+ return parameter_;
+ }
+ size_t GetOffsetToPokeAt() const {
+ DCHECK_EQ(kPokeAt, kind());
+ return parameter_;
+ }
+
+ bool IsOutputIgnored() const {
+ return kind_ == kPushOutput && parameter_ == 0;
+ }
+
+ size_t ConsumedOutputCount() const {
+ return kind_ == kPushOutput ? GetPushCount() : 1;
+ }
+
+ bool operator==(OutputFrameStateCombine const& other) const {
+ return kind_ == other.kind_ && parameter_ == other.parameter_;
+ }
+ bool operator!=(OutputFrameStateCombine const& other) const {
+ return !(*this == other);
+ }
+
+ friend size_t hash_value(OutputFrameStateCombine const&);
+ friend std::ostream& operator<<(std::ostream&,
+ OutputFrameStateCombine const&);
+
+ private:
+ OutputFrameStateCombine(Kind kind, size_t parameter)
+ : kind_(kind), parameter_(parameter) {}
+
+ Kind const kind_;
+ size_t const parameter_;
+};
+
+
+// The type of stack frame that a FrameState node represents.
+enum class FrameStateType {
+ kJavaScriptFunction, // Represents an unoptimized JavaScriptFrame.
+ kInterpretedFunction, // Represents an InterpretedFrame.
+ kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
+ kConstructStub // Represents a ConstructStubFrame.
+};
+
+
+enum ContextCallingMode {
+ CALL_MAINTAINS_NATIVE_CONTEXT,
+ CALL_CHANGES_NATIVE_CONTEXT
+};
+
+
+class FrameStateFunctionInfo {
+ public:
+ FrameStateFunctionInfo(FrameStateType type, int parameter_count,
+ int local_count,
+ Handle<SharedFunctionInfo> shared_info,
+ ContextCallingMode context_calling_mode)
+ : type_(type),
+ parameter_count_(parameter_count),
+ local_count_(local_count),
+ shared_info_(shared_info),
+ context_calling_mode_(context_calling_mode) {}
+
+ int local_count() const { return local_count_; }
+ int parameter_count() const { return parameter_count_; }
+ Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+ FrameStateType type() const { return type_; }
+ ContextCallingMode context_calling_mode() const {
+ return context_calling_mode_;
+ }
+
+ static bool IsJSFunctionType(FrameStateType type) {
+ return type == FrameStateType::kJavaScriptFunction ||
+ type == FrameStateType::kInterpretedFunction;
+ }
+
+ private:
+ FrameStateType const type_;
+ int const parameter_count_;
+ int const local_count_;
+ Handle<SharedFunctionInfo> const shared_info_;
+ ContextCallingMode context_calling_mode_;
+};
+
+
+class FrameStateInfo final {
+ public:
+ FrameStateInfo(BailoutId bailout_id, OutputFrameStateCombine state_combine,
+ const FrameStateFunctionInfo* info)
+ : bailout_id_(bailout_id),
+ frame_state_combine_(state_combine),
+ info_(info) {}
+
+ FrameStateType type() const {
+ return info_ == nullptr ? FrameStateType::kJavaScriptFunction
+ : info_->type();
+ }
+ BailoutId bailout_id() const { return bailout_id_; }
+ OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
+ MaybeHandle<SharedFunctionInfo> shared_info() const {
+ return info_ == nullptr ? MaybeHandle<SharedFunctionInfo>()
+ : info_->shared_info();
+ }
+ int parameter_count() const {
+ return info_ == nullptr ? 0 : info_->parameter_count();
+ }
+ int local_count() const {
+ return info_ == nullptr ? 0 : info_->local_count();
+ }
+ const FrameStateFunctionInfo* function_info() const { return info_; }
+
+ private:
+ BailoutId const bailout_id_;
+ OutputFrameStateCombine const frame_state_combine_;
+ const FrameStateFunctionInfo* const info_;
+};
+
+bool operator==(FrameStateInfo const&, FrameStateInfo const&);
+bool operator!=(FrameStateInfo const&, FrameStateInfo const&);
+
+size_t hash_value(FrameStateInfo const&);
+
+std::ostream& operator<<(std::ostream&, FrameStateInfo const&);
+
+static const int kFrameStateParametersInput = 0;
+static const int kFrameStateLocalsInput = 1;
+static const int kFrameStateStackInput = 2;
+static const int kFrameStateContextInput = 3;
+static const int kFrameStateFunctionInput = 4;
+static const int kFrameStateOuterStateInput = 5;
+static const int kFrameStateInputCount = kFrameStateOuterStateInput + 1;
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_FRAME_STATES_H_
diff --git a/src/compiler/frame.cc b/src/compiler/frame.cc
new file mode 100644
index 0000000..b08030b
--- /dev/null
+++ b/src/compiler/frame.cc
@@ -0,0 +1,52 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/frame.h"
+
+#include "src/compiler/linkage.h"
+#include "src/compiler/register-allocator.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Frame::Frame(int fixed_frame_size_in_slots, const CallDescriptor* descriptor)
+ : needs_frame_((descriptor != nullptr) &&
+ descriptor->RequiresFrameAsIncoming()),
+ frame_slot_count_(fixed_frame_size_in_slots),
+ callee_saved_slot_count_(0),
+ spill_slot_count_(0),
+ allocated_registers_(nullptr),
+ allocated_double_registers_(nullptr) {}
+
+
+void FrameAccessState::SetFrameAccessToDefault() {
+ if (frame()->needs_frame() && !FLAG_turbo_sp_frame_access) {
+ SetFrameAccessToFP();
+ } else {
+ SetFrameAccessToSP();
+ }
+}
+
+
+FrameOffset FrameAccessState::GetFrameOffset(int spill_slot) const {
+ const int offset =
+ (StandardFrameConstants::kFixedSlotCountAboveFp - spill_slot - 1) *
+ kPointerSize;
+ if (access_frame_with_fp()) {
+ DCHECK(frame()->needs_frame());
+ return FrameOffset::FromFramePointer(offset);
+ } else {
+ // No frame. Retrieve all parameters relative to stack pointer.
+ int sp_offset =
+ offset + ((frame()->GetSpToFpSlotCount() + sp_delta()) * kPointerSize);
+ return FrameOffset::FromStackPointer(sp_offset);
+ }
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index f99d7bd..72f756b 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -5,68 +5,187 @@
#ifndef V8_COMPILER_FRAME_H_
#define V8_COMPILER_FRAME_H_
-#include "src/v8.h"
-
#include "src/bit-vector.h"
+#include "src/frames.h"
namespace v8 {
namespace internal {
namespace compiler {
-// Collects the spill slot requirements and the allocated general and double
-// registers for a compiled function. Frames are usually populated by the
-// register allocator and are used by Linkage to generate code for the prologue
-// and epilogue to compiled code.
+class CallDescriptor;
+
+// Collects the spill slot and other frame slot requirements for a compiled
+// function. Frames are usually populated by the register allocator and are used
+// by Linkage to generate code for the prologue and epilogue to compiled
+// code. Frame objects must be considered immutable once they've been
+// instantiated and the basic information about the frame has been collected
+// into them. Mutable state associated with the frame is stored separately in
+// FrameAccessState.
+//
+// Frames are divided up into three regions.
+// - The first is the fixed header, which always has a constant size and can be
+// predicted before code generation begins depending on the type of code being
+// generated.
+// - The second is the region for spill slots, which is immediately below the
+// fixed header and grows as the register allocator needs to spill to the
+// stack and asks the frame for more space.
+// - The third region, which contains the callee-saved registers must be
+// reserved after register allocation, since its size can only be precisely
+// determined after register allocation once the number of used callee-saved
+// register is certain.
+//
+// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
+// two slots.
+//
+// Stack slot indices >= 0 access the callee stack with slot 0 corresponding to
+// the callee's saved return address and 1 corresponding to the saved frame
+// pointer. Some frames have additional information stored in the fixed header,
+// for example JSFunctions store the function context and marker in the fixed
+// header, with slot index 2 corresponding to the current function context and 3
+// corresponding to the frame marker/JSFunction. The frame region immediately
+// below the fixed header contains spill slots starting at 4 for JsFunctions.
+// The callee-saved frame region below that starts at 4+spill_slot_count_.
+// Callee stack slots corresponding to parameters are accessible through
+// negative slot ids.
+//
+// Every slot of a caller or callee frame is accessible by the register
+// allocator and gap resolver with a SpillSlotOperand containing its
+// corresponding slot id.
+//
+// Below an example JSFunction Frame with slot ids, frame regions and contents:
+//
+// slot JS frame
+// +-----------------+--------------------------------
+// -n-1 | parameter 0 | ^
+// |- - - - - - - - -| |
+// -n | | Caller
+// ... | ... | frame slots
+// -2 | parameter n-1 | (slot < 0)
+// |- - - - - - - - -| |
+// -1 | parameter n | v
+// -----+-----------------+--------------------------------
+// 0 | return addr | ^ ^
+// |- - - - - - - - -| | |
+// 1 | saved frame ptr | Fixed |
+// |- - - - - - - - -| Header <-- frame ptr |
+// 2 | Context | | |
+// |- - - - - - - - -| | |
+// 3 |JSFunction/Marker| v |
+// +-----------------+---- |
+// 4 | spill 1 | ^ Callee
+// |- - - - - - - - -| | frame slots
+// ... | ... | Spill slots (slot >= 0)
+// |- - - - - - - - -| | |
+// m+4 | spill m | v |
+// +-----------------+---- |
+// m+5 | callee-saved 1 | ^ |
+// |- - - - - - - - -| | |
+// | ... | Callee-saved |
+// |- - - - - - - - -| | |
+// m+r+4 | callee-saved r | v v
+// -----+-----------------+----- <-- stack ptr -------------
+//
class Frame : public ZoneObject {
public:
- Frame()
- : register_save_area_size_(0),
- spill_slot_count_(0),
- double_spill_slot_count_(0),
- allocated_registers_(NULL),
- allocated_double_registers_(NULL) {}
+ explicit Frame(int fixed_frame_size_in_slots,
+ const CallDescriptor* descriptor);
- inline int GetSpillSlotCount() { return spill_slot_count_; }
- inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
+ static int FPOffsetToSlot(int frame_offset) {
+ return StandardFrameConstants::kFixedSlotCountAboveFp - 1 -
+ frame_offset / kPointerSize;
+ }
+
+ static int SlotToFPOffset(int slot) {
+ return (StandardFrameConstants::kFixedSlotCountAboveFp - 1 - slot) *
+ kPointerSize;
+ }
+
+ inline bool needs_frame() const { return needs_frame_; }
+ inline void MarkNeedsFrame() { needs_frame_ = true; }
+
+ inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
+
+ inline int GetSpToFpSlotCount() const {
+ return GetTotalFrameSlotCount() -
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ }
+ inline int GetSavedCalleeRegisterSlotCount() const {
+ return callee_saved_slot_count_;
+ }
+ inline int GetSpillSlotCount() const { return spill_slot_count_; }
+
+ inline void SetElidedFrameSizeInSlots(int slots) {
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ DCHECK_EQ(0, spill_slot_count_);
+ frame_slot_count_ = slots;
+ }
void SetAllocatedRegisters(BitVector* regs) {
- DCHECK(allocated_registers_ == NULL);
+ DCHECK(allocated_registers_ == nullptr);
allocated_registers_ = regs;
}
void SetAllocatedDoubleRegisters(BitVector* regs) {
- DCHECK(allocated_double_registers_ == NULL);
+ DCHECK(allocated_double_registers_ == nullptr);
allocated_double_registers_ = regs;
}
- bool DidAllocateDoubleRegisters() {
+ bool DidAllocateDoubleRegisters() const {
return !allocated_double_registers_->IsEmpty();
}
- void SetRegisterSaveAreaSize(int size) {
- DCHECK(IsAligned(size, kPointerSize));
- register_save_area_size_ = size;
+ int AlignSavedCalleeRegisterSlots() {
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ needs_frame_ = true;
+ int delta = frame_slot_count_ & 1;
+ frame_slot_count_ += delta;
+ return delta;
}
- int GetRegisterSaveAreaSize() { return register_save_area_size_; }
+ void AllocateSavedCalleeRegisterSlots(int count) {
+ needs_frame_ = true;
+ frame_slot_count_ += count;
+ callee_saved_slot_count_ += count;
+ }
- int AllocateSpillSlot(bool is_double) {
- // If 32-bit, skip one if the new slot is a double.
- if (is_double) {
- if (kDoubleSize > kPointerSize) {
- DCHECK(kDoubleSize == kPointerSize * 2);
- spill_slot_count_++;
- spill_slot_count_ |= 1;
- }
- double_spill_slot_count_++;
+ int AllocateSpillSlot(int width) {
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ needs_frame_ = true;
+ int frame_slot_count_before = frame_slot_count_;
+ int slot = AllocateAlignedFrameSlot(width);
+ spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
+ return slot;
+ }
+
+ int ReserveSpillSlots(size_t slot_count) {
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ DCHECK_EQ(0, spill_slot_count_);
+ needs_frame_ = true;
+ spill_slot_count_ += static_cast<int>(slot_count);
+ frame_slot_count_ += static_cast<int>(slot_count);
+ return frame_slot_count_ - 1;
+ }
+
+ static const int kContextSlot = 2 + StandardFrameConstants::kCPSlotCount;
+ static const int kJSFunctionSlot = 3 + StandardFrameConstants::kCPSlotCount;
+
+ private:
+ int AllocateAlignedFrameSlot(int width) {
+ DCHECK(width == 4 || width == 8);
+ // Skip one slot if necessary.
+ if (width > kPointerSize) {
+ DCHECK(width == kPointerSize * 2);
+ frame_slot_count_++;
+ frame_slot_count_ |= 1;
}
- return spill_slot_count_++;
+ return frame_slot_count_++;
}
private:
- int register_save_area_size_;
+ bool needs_frame_;
+ int frame_slot_count_;
+ int callee_saved_slot_count_;
int spill_slot_count_;
- int double_spill_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
@@ -99,8 +218,40 @@
static const int kFromSp = 1;
static const int kFromFp = 0;
};
-}
-}
-} // namespace v8::internal::compiler
+
+// Encapsulates the mutable state maintained during code generation about the
+// current function's frame.
+class FrameAccessState : public ZoneObject {
+ public:
+ explicit FrameAccessState(Frame* const frame)
+ : frame_(frame), access_frame_with_fp_(false), sp_delta_(0) {
+ SetFrameAccessToDefault();
+ }
+
+ Frame* frame() const { return frame_; }
+
+ int sp_delta() const { return sp_delta_; }
+ void ClearSPDelta() { sp_delta_ = 0; }
+ void IncreaseSPDelta(int amount) { sp_delta_ += amount; }
+
+ bool access_frame_with_fp() const { return access_frame_with_fp_; }
+ void SetFrameAccessToDefault();
+ void SetFrameAccessToFP() { access_frame_with_fp_ = true; }
+ void SetFrameAccessToSP() { access_frame_with_fp_ = false; }
+
+ // Get the frame offset for a given spill slot. The location depends on the
+ // calling convention and the specific frame layout, and may thus be
+ // architecture-specific. Negative spill slots indicate arguments on the
+ // caller's frame.
+ FrameOffset GetFrameOffset(int spill_slot) const;
+
+ private:
+ Frame* const frame_;
+ bool access_frame_with_fp_;
+ int sp_delta_;
+};
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_FRAME_H_
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
index f369607..4107b0f 100644
--- a/src/compiler/gap-resolver.cc
+++ b/src/compiler/gap-resolver.cc
@@ -12,49 +12,30 @@
namespace internal {
namespace compiler {
-typedef ZoneList<MoveOperands>::iterator op_iterator;
+namespace {
-#ifdef ENABLE_SLOW_DCHECKS
-// TODO(svenpanne) Brush up InstructionOperand with comparison?
-struct InstructionOperandComparator {
- bool operator()(const InstructionOperand* x,
- const InstructionOperand* y) const {
- return (x->kind() < y->kind()) ||
- (x->kind() == y->kind() && x->index() < y->index());
- }
-};
-#endif
-
-// No operand should be the destination for more than one move.
-static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) {
-#ifdef ENABLE_SLOW_DCHECKS
- std::set<InstructionOperand*, InstructionOperandComparator> seen;
- for (op_iterator i = moves->begin(); i != moves->end(); ++i) {
- SLOW_DCHECK(seen.find(i->destination()) == seen.end());
- seen.insert(i->destination());
- }
-#endif
+inline bool Blocks(MoveOperands* move, InstructionOperand destination) {
+ return move->Blocks(destination);
}
-void GapResolver::Resolve(ParallelMove* parallel_move) const {
- ZoneList<MoveOperands>* moves = parallel_move->move_operands();
- // TODO(svenpanne) Use the member version of remove_if when we use real lists.
- op_iterator end =
- std::remove_if(moves->begin(), moves->end(),
- std::mem_fun_ref(&MoveOperands::IsRedundant));
- moves->Rewind(static_cast<int>(end - moves->begin()));
+inline bool IsRedundant(MoveOperands* move) { return move->IsRedundant(); }
- VerifyMovesAreInjective(moves);
+} // namespace
- for (op_iterator move = moves->begin(); move != moves->end(); ++move) {
- if (!move->IsEliminated()) PerformMove(moves, &*move);
+
+void GapResolver::Resolve(ParallelMove* moves) const {
+ // Clear redundant moves.
+ auto it =
+ std::remove_if(moves->begin(), moves->end(), std::ptr_fun(IsRedundant));
+ moves->erase(it, moves->end());
+ for (auto move : *moves) {
+ if (!move->IsEliminated()) PerformMove(moves, move);
}
}
-void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
- MoveOperands* move) const {
+void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We mark a
// move as "pending" on entry to PerformMove in order to detect cycles in the
@@ -65,14 +46,14 @@
// Clear this move's destination to indicate a pending move. The actual
// destination is saved on the side.
- DCHECK_NOT_NULL(move->source()); // Or else it will look eliminated.
- InstructionOperand* destination = move->destination();
- move->set_destination(NULL);
+ DCHECK(!move->source().IsInvalid()); // Or else it will look eliminated.
+ InstructionOperand destination = move->destination();
+ move->SetPending();
// Perform a depth-first traversal of the move graph to resolve dependencies.
// Any unperformed, unpending move with a source the same as this one's
// destination blocks this one so recursively perform all such moves.
- for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
+ for (auto other : *moves) {
if (other->Blocks(destination) && !other->IsPending()) {
// Though PerformMove can change any source operand in the move graph,
// this call cannot create a blocking move via a swap (this loop does not
@@ -93,8 +74,8 @@
// This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it.
- InstructionOperand* source = move->source();
- if (source->Equals(destination)) {
+ InstructionOperand source = move->source();
+ if (source.EqualsCanonicalized(destination)) {
move->Eliminate();
return;
}
@@ -102,28 +83,27 @@
// The move may be blocked on a (at most one) pending move, in which case we
// have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
- op_iterator blocker = std::find_if(
- moves->begin(), moves->end(),
- std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination));
+ auto blocker = std::find_if(moves->begin(), moves->end(),
+ std::bind2nd(std::ptr_fun(&Blocks), destination));
if (blocker == moves->end()) {
// The easy case: This move is not blocked.
- assembler_->AssembleMove(source, destination);
+ assembler_->AssembleMove(&source, &destination);
move->Eliminate();
return;
}
- DCHECK(blocker->IsPending());
+ DCHECK((*blocker)->IsPending());
// Ensure source is a register or both are stack slots, to limit swap cases.
- if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
std::swap(source, destination);
}
- assembler_->AssembleSwap(source, destination);
+ assembler_->AssembleSwap(&source, &destination);
move->Eliminate();
// Any unperformed (including pending) move with a source of either this
// move's source or destination needs to have their source changed to
// reflect the state of affairs after the swap.
- for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
+ for (auto other : *moves) {
if (other->Blocks(source)) {
other->set_source(destination);
} else if (other->Blocks(destination)) {
@@ -131,6 +111,6 @@
}
}
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/gap-resolver.h b/src/compiler/gap-resolver.h
index 4f4f4e4..19806f5 100644
--- a/src/compiler/gap-resolver.h
+++ b/src/compiler/gap-resolver.h
@@ -11,7 +11,7 @@
namespace internal {
namespace compiler {
-class GapResolver FINAL {
+class GapResolver final {
public:
// Interface used by the gap resolver to emit moves and swaps.
class Assembler {
@@ -34,7 +34,7 @@
private:
// Perform the given move, possibly requiring other moves to satisfy
// dependencies.
- void PerformMove(ZoneList<MoveOperands>* moves, MoveOperands* move) const;
+ void PerformMove(ParallelMove* moves, MoveOperands* move) const;
// Assembler used to emit moves and save registers.
Assembler* const assembler_;
diff --git a/src/compiler/generic-algorithm.h b/src/compiler/generic-algorithm.h
deleted file mode 100644
index 391757e..0000000
--- a/src/compiler/generic-algorithm.h
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GENERIC_ALGORITHM_H_
-#define V8_COMPILER_GENERIC_ALGORITHM_H_
-
-#include <stack>
-#include <vector>
-
-#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class Graph;
-class Node;
-
-// GenericGraphVisit allows visitation of graphs of nodes and edges in pre- and
-// post-order. Visitation uses an explicitly allocated stack rather than the
-// execution stack to avoid stack overflow.
-class GenericGraphVisit {
- public:
- // struct Visitor {
- // void Pre(Node* current);
- // void Post(Node* current);
- // void PreEdge(Node* from, int index, Node* to);
- // void PostEdge(Node* from, int index, Node* to);
- // }
- template <class Visitor>
- static void Visit(Graph* graph, Zone* zone, Node** root_begin,
- Node** root_end, Visitor* visitor) {
- typedef typename Node::InputEdges::iterator Iterator;
- typedef std::pair<Iterator, Iterator> NodeState;
- typedef std::stack<NodeState, ZoneDeque<NodeState> > NodeStateStack;
- NodeStateStack stack((ZoneDeque<NodeState>(zone)));
- BoolVector visited(graph->NodeCount(), false, zone);
- Node* current = *root_begin;
- while (true) {
- DCHECK(current != NULL);
- const int id = current->id();
- DCHECK(id >= 0);
- DCHECK(id < graph->NodeCount()); // Must be a valid id.
- bool visit = !GetVisited(&visited, id);
- if (visit) {
- visitor->Pre(current);
- SetVisited(&visited, id);
- }
- Iterator begin(visit ? current->input_edges().begin()
- : current->input_edges().end());
- Iterator end(current->input_edges().end());
- stack.push(NodeState(begin, end));
- Node* post_order_node = current;
- while (true) {
- NodeState top = stack.top();
- if (top.first == top.second) {
- if (visit) {
- visitor->Post(post_order_node);
- SetVisited(&visited, post_order_node->id());
- }
- stack.pop();
- if (stack.empty()) {
- if (++root_begin == root_end) return;
- current = *root_begin;
- break;
- }
- post_order_node = (*stack.top().first).from();
- visit = true;
- } else {
- visitor->PreEdge((*top.first).from(), (*top.first).index(),
- (*top.first).to());
- current = (*top.first).to();
- if (!GetVisited(&visited, current->id())) break;
- }
- top = stack.top();
- visitor->PostEdge((*top.first).from(), (*top.first).index(),
- (*top.first).to());
- ++stack.top().first;
- }
- }
- }
-
- template <class Visitor>
- static void Visit(Graph* graph, Zone* zone, Node* current, Visitor* visitor) {
- Node* array[] = {current};
- Visit<Visitor>(graph, zone, &array[0], &array[1], visitor);
- }
-
- struct NullNodeVisitor {
- void Pre(Node* node) {}
- void Post(Node* node) {}
- void PreEdge(Node* from, int index, Node* to) {}
- void PostEdge(Node* from, int index, Node* to) {}
- };
-
- private:
- static void SetVisited(BoolVector* visited, int id) {
- if (id >= static_cast<int>(visited->size())) {
- // Resize and set all values to unvisited.
- visited->resize((3 * id) / 2, false);
- }
- visited->at(id) = true;
- }
-
- static bool GetVisited(BoolVector* visited, int id) {
- if (id >= static_cast<int>(visited->size())) return false;
- return visited->at(id);
- }
-};
-
-typedef GenericGraphVisit::NullNodeVisitor NullNodeVisitor;
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_GENERIC_ALGORITHM_H_
diff --git a/src/compiler/graph-builder.cc b/src/compiler/graph-builder.cc
deleted file mode 100644
index 6321aaa..0000000
--- a/src/compiler/graph-builder.cc
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/graph-builder.h"
-
-#include "src/bit-vector.h"
-#include "src/compiler.h"
-#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/node.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/operator-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-StructuredGraphBuilder::StructuredGraphBuilder(Zone* local_zone, Graph* graph,
- CommonOperatorBuilder* common)
- : GraphBuilder(graph),
- common_(common),
- environment_(NULL),
- local_zone_(local_zone),
- input_buffer_size_(0),
- input_buffer_(NULL),
- current_context_(NULL),
- exit_control_(NULL) {
- EnsureInputBufferSize(kInputBufferSizeIncrement);
-}
-
-
-Node** StructuredGraphBuilder::EnsureInputBufferSize(int size) {
- if (size > input_buffer_size_) {
- size += kInputBufferSizeIncrement;
- input_buffer_ = local_zone()->NewArray<Node*>(size);
- }
- return input_buffer_;
-}
-
-
-Node* StructuredGraphBuilder::MakeNode(const Operator* op,
- int value_input_count,
- Node** value_inputs, bool incomplete) {
- DCHECK(op->ValueInputCount() == value_input_count);
-
- bool has_context = OperatorProperties::HasContextInput(op);
- bool has_framestate = OperatorProperties::HasFrameStateInput(op);
- bool has_control = op->ControlInputCount() == 1;
- bool has_effect = op->EffectInputCount() == 1;
-
- DCHECK(op->ControlInputCount() < 2);
- DCHECK(op->EffectInputCount() < 2);
-
- Node* result = NULL;
- if (!has_context && !has_framestate && !has_control && !has_effect) {
- result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
- } else {
- int input_count_with_deps = value_input_count;
- if (has_context) ++input_count_with_deps;
- if (has_framestate) ++input_count_with_deps;
- if (has_control) ++input_count_with_deps;
- if (has_effect) ++input_count_with_deps;
- Node** buffer = EnsureInputBufferSize(input_count_with_deps);
- memcpy(buffer, value_inputs, kPointerSize * value_input_count);
- Node** current_input = buffer + value_input_count;
- if (has_context) {
- *current_input++ = current_context();
- }
- if (has_framestate) {
- // The frame state will be inserted later. Here we misuse
- // the dead_control node as a sentinel to be later overwritten
- // with the real frame state.
- *current_input++ = dead_control();
- }
- if (has_effect) {
- *current_input++ = environment_->GetEffectDependency();
- }
- if (has_control) {
- *current_input++ = environment_->GetControlDependency();
- }
- result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
- if (has_effect) {
- environment_->UpdateEffectDependency(result);
- }
- if (result->op()->ControlOutputCount() > 0 &&
- !environment()->IsMarkedAsUnreachable()) {
- environment_->UpdateControlDependency(result);
- }
- }
-
- return result;
-}
-
-
-void StructuredGraphBuilder::UpdateControlDependencyToLeaveFunction(
- Node* exit) {
- if (environment()->IsMarkedAsUnreachable()) return;
- if (exit_control() != NULL) {
- exit = MergeControl(exit_control(), exit);
- }
- environment()->MarkAsUnreachable();
- set_exit_control(exit);
-}
-
-
-StructuredGraphBuilder::Environment* StructuredGraphBuilder::CopyEnvironment(
- Environment* env) {
- return new (local_zone()) Environment(*env);
-}
-
-
-StructuredGraphBuilder::Environment::Environment(
- StructuredGraphBuilder* builder, Node* control_dependency)
- : builder_(builder),
- control_dependency_(control_dependency),
- effect_dependency_(control_dependency),
- values_(zone()) {}
-
-
-StructuredGraphBuilder::Environment::Environment(const Environment& copy)
- : builder_(copy.builder()),
- control_dependency_(copy.control_dependency_),
- effect_dependency_(copy.effect_dependency_),
- values_(copy.zone()) {
- const size_t kStackEstimate = 7; // optimum from experimentation!
- values_.reserve(copy.values_.size() + kStackEstimate);
- values_.insert(values_.begin(), copy.values_.begin(), copy.values_.end());
-}
-
-
-void StructuredGraphBuilder::Environment::Merge(Environment* other) {
- DCHECK(values_.size() == other->values_.size());
-
- // Nothing to do if the other environment is dead.
- if (other->IsMarkedAsUnreachable()) return;
-
- // Resurrect a dead environment by copying the contents of the other one and
- // placing a singleton merge as the new control dependency.
- if (this->IsMarkedAsUnreachable()) {
- Node* other_control = other->control_dependency_;
- Node* inputs[] = {other_control};
- control_dependency_ =
- graph()->NewNode(common()->Merge(1), arraysize(inputs), inputs, true);
- effect_dependency_ = other->effect_dependency_;
- values_ = other->values_;
- return;
- }
-
- // Create a merge of the control dependencies of both environments and update
- // the current environment's control dependency accordingly.
- Node* control = builder_->MergeControl(this->GetControlDependency(),
- other->GetControlDependency());
- UpdateControlDependency(control);
-
- // Create a merge of the effect dependencies of both environments and update
- // the current environment's effect dependency accordingly.
- Node* effect = builder_->MergeEffect(this->GetEffectDependency(),
- other->GetEffectDependency(), control);
- UpdateEffectDependency(effect);
-
- // Introduce Phi nodes for values that have differing input at merge points,
- // potentially extending an existing Phi node if possible.
- for (int i = 0; i < static_cast<int>(values_.size()); ++i) {
- values_[i] = builder_->MergeValue(values_[i], other->values_[i], control);
- }
-}
-
-
-void StructuredGraphBuilder::Environment::PrepareForLoop(BitVector* assigned) {
- Node* control = GetControlDependency();
- int size = static_cast<int>(values()->size());
- if (assigned == NULL) {
- // Assume that everything is updated in the loop.
- for (int i = 0; i < size; ++i) {
- Node* phi = builder_->NewPhi(1, values()->at(i), control);
- values()->at(i) = phi;
- }
- } else {
- // Only build phis for those locals assigned in this loop.
- for (int i = 0; i < size; ++i) {
- if (i < assigned->length() && !assigned->Contains(i)) continue;
- Node* phi = builder_->NewPhi(1, values()->at(i), control);
- values()->at(i) = phi;
- }
- }
- Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
- UpdateEffectDependency(effect);
-}
-
-
-Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
- const Operator* phi_op = common()->Phi(kMachAnyTagged, count);
- Node** buffer = EnsureInputBufferSize(count + 1);
- MemsetPointer(buffer, input, count);
- buffer[count] = control;
- return graph()->NewNode(phi_op, count + 1, buffer, true);
-}
-
-
-// TODO(mstarzinger): Revisit this once we have proper effect states.
-Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
- Node* control) {
- const Operator* phi_op = common()->EffectPhi(count);
- Node** buffer = EnsureInputBufferSize(count + 1);
- MemsetPointer(buffer, input, count);
- buffer[count] = control;
- return graph()->NewNode(phi_op, count + 1, buffer, true);
-}
-
-
-Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) {
- int inputs = control->op()->ControlInputCount() + 1;
- if (control->opcode() == IrOpcode::kLoop) {
- // Control node for loop exists, add input.
- const Operator* op = common()->Loop(inputs);
- control->AppendInput(graph_zone(), other);
- control->set_op(op);
- } else if (control->opcode() == IrOpcode::kMerge) {
- // Control node for merge exists, add input.
- const Operator* op = common()->Merge(inputs);
- control->AppendInput(graph_zone(), other);
- control->set_op(op);
- } else {
- // Control node is a singleton, introduce a merge.
- const Operator* op = common()->Merge(inputs);
- Node* inputs[] = {control, other};
- control = graph()->NewNode(op, arraysize(inputs), inputs, true);
- }
- return control;
-}
-
-
-Node* StructuredGraphBuilder::MergeEffect(Node* value, Node* other,
- Node* control) {
- int inputs = control->op()->ControlInputCount();
- if (value->opcode() == IrOpcode::kEffectPhi &&
- NodeProperties::GetControlInput(value) == control) {
- // Phi already exists, add input.
- value->set_op(common()->EffectPhi(inputs));
- value->InsertInput(graph_zone(), inputs - 1, other);
- } else if (value != other) {
- // Phi does not exist yet, introduce one.
- value = NewEffectPhi(inputs, value, control);
- value->ReplaceInput(inputs - 1, other);
- }
- return value;
-}
-
-
-Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other,
- Node* control) {
- int inputs = control->op()->ControlInputCount();
- if (value->opcode() == IrOpcode::kPhi &&
- NodeProperties::GetControlInput(value) == control) {
- // Phi already exists, add input.
- value->set_op(common()->Phi(kMachAnyTagged, inputs));
- value->InsertInput(graph_zone(), inputs - 1, other);
- } else if (value != other) {
- // Phi does not exist yet, introduce one.
- value = NewPhi(inputs, value, control);
- value->ReplaceInput(inputs - 1, other);
- }
- return value;
-}
-
-
-Node* StructuredGraphBuilder::dead_control() {
- if (!dead_control_.is_set()) {
- Node* dead_node = graph()->NewNode(common_->Dead());
- dead_control_.set(dead_node);
- return dead_node;
- }
- return dead_control_.get();
-}
-}
-}
-} // namespace v8::internal::compiler
diff --git a/src/compiler/graph-builder.h b/src/compiler/graph-builder.h
deleted file mode 100644
index d88b125..0000000
--- a/src/compiler/graph-builder.h
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GRAPH_BUILDER_H_
-#define V8_COMPILER_GRAPH_BUILDER_H_
-
-#include "src/v8.h"
-
-#include "src/allocation.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/unique.h"
-
-namespace v8 {
-namespace internal {
-
-class BitVector;
-
-namespace compiler {
-
-class Node;
-
-// A common base class for anything that creates nodes in a graph.
-class GraphBuilder {
- public:
- explicit GraphBuilder(Graph* graph) : graph_(graph) {}
- virtual ~GraphBuilder() {}
-
- Node* NewNode(const Operator* op, bool incomplete = false) {
- return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
- }
-
- Node* NewNode(const Operator* op, Node* n1) {
- return MakeNode(op, 1, &n1, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2) {
- Node* buffer[] = {n1, n2};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
- Node* buffer[] = {n1, n2, n3};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
- Node* buffer[] = {n1, n2, n3, n4};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5) {
- Node* buffer[] = {n1, n2, n3, n4, n5};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6};
- return MakeNode(op, arraysize(nodes), nodes, false);
- }
-
- Node* NewNode(const Operator* op, int value_input_count, Node** value_inputs,
- bool incomplete = false) {
- return MakeNode(op, value_input_count, value_inputs, incomplete);
- }
-
- Graph* graph() const { return graph_; }
-
- protected:
- // Base implementation used by all factory methods.
- virtual Node* MakeNode(const Operator* op, int value_input_count,
- Node** value_inputs, bool incomplete) = 0;
-
- private:
- Graph* graph_;
-};
-
-
-// The StructuredGraphBuilder produces a high-level IR graph. It is used as the
-// base class for concrete implementations (e.g the AstGraphBuilder or the
-// StubGraphBuilder).
-class StructuredGraphBuilder : public GraphBuilder {
- public:
- StructuredGraphBuilder(Zone* zone, Graph* graph,
- CommonOperatorBuilder* common);
- ~StructuredGraphBuilder() OVERRIDE {}
-
- // Creates a new Phi node having {count} input values.
- Node* NewPhi(int count, Node* input, Node* control);
- Node* NewEffectPhi(int count, Node* input, Node* control);
-
- // Helpers for merging control, effect or value dependencies.
- Node* MergeControl(Node* control, Node* other);
- Node* MergeEffect(Node* value, Node* other, Node* control);
- Node* MergeValue(Node* value, Node* other, Node* control);
-
- // Helpers to create new control nodes.
- Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
- Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
- Node* NewMerge() { return NewNode(common()->Merge(1), true); }
- Node* NewLoop() { return NewNode(common()->Loop(1), true); }
- Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
- return NewNode(common()->Branch(hint), condition);
- }
-
- protected:
- class Environment;
- friend class Environment;
- friend class ControlBuilder;
-
- // The following method creates a new node having the specified operator and
- // ensures effect and control dependencies are wired up. The dependencies
- // tracked by the environment might be mutated.
- Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
- bool incomplete) FINAL;
-
- Environment* environment() const { return environment_; }
- void set_environment(Environment* env) { environment_ = env; }
-
- Node* current_context() const { return current_context_; }
- void set_current_context(Node* context) { current_context_ = context; }
-
- Node* exit_control() const { return exit_control_; }
- void set_exit_control(Node* node) { exit_control_ = node; }
-
- Node* dead_control();
-
- Zone* graph_zone() const { return graph()->zone(); }
- Zone* local_zone() const { return local_zone_; }
- Isolate* isolate() const { return graph_zone()->isolate(); }
- CommonOperatorBuilder* common() const { return common_; }
-
- // Helper to wrap a Handle<T> into a Unique<T>.
- template <class T>
- Unique<T> MakeUnique(Handle<T> object) {
- return Unique<T>::CreateUninitialized(object);
- }
-
- // Support for control flow builders. The concrete type of the environment
- // depends on the graph builder, but environments themselves are not virtual.
- virtual Environment* CopyEnvironment(Environment* env);
-
- // Helper to indicate a node exits the function body.
- void UpdateControlDependencyToLeaveFunction(Node* exit);
-
- private:
- CommonOperatorBuilder* common_;
- Environment* environment_;
-
- // Zone local to the builder for data not leaking into the graph.
- Zone* local_zone_;
-
- // Temporary storage for building node input lists.
- int input_buffer_size_;
- Node** input_buffer_;
-
- // Node representing the control dependency for dead code.
- SetOncePointer<Node> dead_control_;
-
- // Node representing the current context within the function body.
- Node* current_context_;
-
- // Merge of all control nodes that exit the function body.
- Node* exit_control_;
-
- // Growth increment for the temporary buffer used to construct input lists to
- // new nodes.
- static const int kInputBufferSizeIncrement = 64;
-
- Node** EnsureInputBufferSize(int size);
-
- DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder);
-};
-
-
-// The abstract execution environment contains static knowledge about
-// execution state at arbitrary control-flow points. It allows for
-// simulation of the control-flow at compile time.
-class StructuredGraphBuilder::Environment : public ZoneObject {
- public:
- Environment(StructuredGraphBuilder* builder, Node* control_dependency);
- Environment(const Environment& copy);
-
- // Control dependency tracked by this environment.
- Node* GetControlDependency() { return control_dependency_; }
- void UpdateControlDependency(Node* dependency) {
- control_dependency_ = dependency;
- }
-
- // Effect dependency tracked by this environment.
- Node* GetEffectDependency() { return effect_dependency_; }
- void UpdateEffectDependency(Node* dependency) {
- effect_dependency_ = dependency;
- }
-
- // Mark this environment as being unreachable.
- void MarkAsUnreachable() {
- UpdateControlDependency(builder()->dead_control());
- }
- bool IsMarkedAsUnreachable() {
- return GetControlDependency()->opcode() == IrOpcode::kDead;
- }
-
- // Merge another environment into this one.
- void Merge(Environment* other);
-
- // Copies this environment at a control-flow split point.
- Environment* CopyForConditional() { return builder()->CopyEnvironment(this); }
-
- // Copies this environment to a potentially unreachable control-flow point.
- Environment* CopyAsUnreachable() {
- Environment* env = builder()->CopyEnvironment(this);
- env->MarkAsUnreachable();
- return env;
- }
-
- // Copies this environment at a loop header control-flow point.
- Environment* CopyForLoop(BitVector* assigned) {
- PrepareForLoop(assigned);
- return builder()->CopyEnvironment(this);
- }
-
- Node* GetContext() { return builder_->current_context(); }
-
- protected:
- Zone* zone() const { return builder_->local_zone(); }
- Graph* graph() const { return builder_->graph(); }
- StructuredGraphBuilder* builder() const { return builder_; }
- CommonOperatorBuilder* common() { return builder_->common(); }
- NodeVector* values() { return &values_; }
-
- // Prepare environment to be used as loop header.
- void PrepareForLoop(BitVector* assigned);
-
- private:
- StructuredGraphBuilder* builder_;
- Node* control_dependency_;
- Node* effect_dependency_;
- NodeVector values_;
-};
-}
-}
-} // namespace v8::internal::compiler
-
-#endif // V8_COMPILER_GRAPH_BUILDER_H__
diff --git a/src/compiler/graph-inl.h b/src/compiler/graph-inl.h
deleted file mode 100644
index c135ae5..0000000
--- a/src/compiler/graph-inl.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GRAPH_INL_H_
-#define V8_COMPILER_GRAPH_INL_H_
-
-#include "src/compiler/generic-algorithm.h"
-#include "src/compiler/graph.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <class Visitor>
-void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
- Zone tmp_zone(zone()->isolate());
- GenericGraphVisit::Visit<Visitor>(this, &tmp_zone, end(), visitor);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_GRAPH_INL_H_
diff --git a/src/compiler/graph-reducer.cc b/src/compiler/graph-reducer.cc
index 9a6b121..6f583d6 100644
--- a/src/compiler/graph-reducer.cc
+++ b/src/compiler/graph-reducer.cc
@@ -2,11 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/graph-reducer.h"
-
#include <functional>
+#include <limits>
-#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/verifier.h"
namespace v8 {
namespace internal {
@@ -20,14 +23,21 @@
};
-GraphReducer::GraphReducer(Graph* graph, Zone* zone)
+void Reducer::Finalize() {}
+
+
+GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
: graph_(graph),
+ dead_(dead),
state_(graph, 4),
reducers_(zone),
revisit_(zone),
stack_(zone) {}
+GraphReducer::~GraphReducer() {}
+
+
void GraphReducer::AddReducer(Reducer* reducer) {
reducers_.push_back(reducer);
}
@@ -51,7 +61,11 @@
Push(node);
}
} else {
- break;
+ // Run all finalizers.
+ for (Reducer* const reducer : reducers_) reducer->Finalize();
+
+ // Check if we have new nodes to revisit.
+ if (revisit_.empty()) break;
}
}
DCHECK(revisit_.empty());
@@ -112,8 +126,8 @@
if (input != node && Recurse(input)) return;
}
- // Remember the node count before reduction.
- const int node_count = graph()->NodeCount();
+ // Remember the max node id before reduction.
+ NodeId const max_id = static_cast<NodeId>(graph()->NodeCount() - 1);
// All inputs should be visited or on stack. Apply reductions to node.
Reduction reduction = Reduce(node);
@@ -135,35 +149,89 @@
// After reducing the node, pop it off the stack.
Pop();
- // Revisit all uses of the node.
- for (Node* const use : node->uses()) {
- // Don't revisit this node if it refers to itself.
- if (use != node) Revisit(use);
- }
-
// Check if we have a new replacement.
if (replacement != node) {
- if (node == graph()->start()) graph()->SetStart(replacement);
- if (node == graph()->end()) graph()->SetEnd(replacement);
- // If {node} was replaced by an old node, unlink {node} and assume that
- // {replacement} was already reduced and finish.
- if (replacement->id() < node_count) {
- node->ReplaceUses(replacement);
- node->Kill();
- } else {
- // Otherwise {node} was replaced by a new node. Replace all old uses of
- // {node} with {replacement}. New nodes created by this reduction can
- // use {node}.
- node->ReplaceUsesIf(
- [node_count](Node* const node) { return node->id() < node_count; },
- replacement);
- // Unlink {node} if it's no longer used.
- if (node->uses().empty()) {
- node->Kill();
- }
+ Replace(node, replacement, max_id);
+ } else {
+ // Revisit all uses of the node.
+ for (Node* const user : node->uses()) {
+ // Don't revisit this node if it refers to itself.
+ if (user != node) Revisit(user);
+ }
+ }
+}
- // If there was a replacement, reduce it after popping {node}.
- Recurse(replacement);
+
+void GraphReducer::Replace(Node* node, Node* replacement) {
+ Replace(node, replacement, std::numeric_limits<NodeId>::max());
+}
+
+
+void GraphReducer::Replace(Node* node, Node* replacement, NodeId max_id) {
+ if (node == graph()->start()) graph()->SetStart(replacement);
+ if (node == graph()->end()) graph()->SetEnd(replacement);
+ if (replacement->id() <= max_id) {
+ // {replacement} is an old node, so unlink {node} and assume that
+ // {replacement} was already reduced and finish.
+ for (Edge edge : node->use_edges()) {
+ Node* const user = edge.from();
+ Verifier::VerifyEdgeInputReplacement(edge, replacement);
+ edge.UpdateTo(replacement);
+ // Don't revisit this node if it refers to itself.
+ if (user != node) Revisit(user);
+ }
+ node->Kill();
+ } else {
+ // Replace all old uses of {node} with {replacement}, but allow new nodes
+ // created by this reduction to use {node}.
+ for (Edge edge : node->use_edges()) {
+ Node* const user = edge.from();
+ if (user->id() <= max_id) {
+ edge.UpdateTo(replacement);
+ // Don't revisit this node if it refers to itself.
+ if (user != node) Revisit(user);
+ }
+ }
+ // Unlink {node} if it's no longer used.
+ if (node->uses().empty()) node->Kill();
+
+ // If there was a replacement, reduce it after popping {node}.
+ Recurse(replacement);
+ }
+}
+
+
+void GraphReducer::ReplaceWithValue(Node* node, Node* value, Node* effect,
+ Node* control) {
+ if (effect == nullptr && node->op()->EffectInputCount() > 0) {
+ effect = NodeProperties::GetEffectInput(node);
+ }
+ if (control == nullptr && node->op()->ControlInputCount() > 0) {
+ control = NodeProperties::GetControlInput(node);
+ }
+
+ // Requires distinguishing between value, effect and control edges.
+ for (Edge edge : node->use_edges()) {
+ Node* const user = edge.from();
+ DCHECK(!user->IsDead());
+ if (NodeProperties::IsControlEdge(edge)) {
+ if (user->opcode() == IrOpcode::kIfSuccess) {
+ Replace(user, control);
+ } else if (user->opcode() == IrOpcode::kIfException) {
+ DCHECK_NOT_NULL(dead_);
+ edge.UpdateTo(dead_);
+ Revisit(user);
+ } else {
+ UNREACHABLE();
+ }
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ DCHECK_NOT_NULL(effect);
+ edge.UpdateTo(effect);
+ Revisit(user);
+ } else {
+ DCHECK_NOT_NULL(value);
+ edge.UpdateTo(value);
+ Revisit(user);
}
}
}
diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h
index 09a650c..683c345 100644
--- a/src/compiler/graph-reducer.h
+++ b/src/compiler/graph-reducer.h
@@ -5,20 +5,30 @@
#ifndef V8_COMPILER_GRAPH_REDUCER_H_
#define V8_COMPILER_GRAPH_REDUCER_H_
-#include "src/compiler/graph.h"
+#include "src/compiler/node-marker.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
+class Graph;
+class Node;
+
+
+// NodeIds are identifying numbers for nodes that can be used to index auxiliary
+// out-of-line data associated with each node.
+typedef uint32_t NodeId;
+
+
// Represents the result of trying to reduce a node in the graph.
-class Reduction FINAL {
+class Reduction final {
public:
- explicit Reduction(Node* replacement = NULL) : replacement_(replacement) {}
+ explicit Reduction(Node* replacement = nullptr) : replacement_(replacement) {}
Node* replacement() const { return replacement_; }
- bool Changed() const { return replacement() != NULL; }
+ bool Changed() const { return replacement() != nullptr; }
private:
Node* replacement_;
@@ -32,26 +42,88 @@
// phase.
class Reducer {
public:
- Reducer() {}
virtual ~Reducer() {}
// Try to reduce a node if possible.
virtual Reduction Reduce(Node* node) = 0;
+ // Invoked by the {GraphReducer} when all nodes are done. Can be used to
+ // do additional reductions at the end, which in turn can cause a new round
+ // of reductions.
+ virtual void Finalize();
+
// Helper functions for subclasses to produce reductions for a node.
static Reduction NoChange() { return Reduction(); }
static Reduction Replace(Node* node) { return Reduction(node); }
static Reduction Changed(Node* node) { return Reduction(node); }
+};
+
+
+// An advanced reducer can also edit the graphs by changing and replacing nodes
+// other than the one currently being reduced.
+class AdvancedReducer : public Reducer {
+ public:
+ // Observe the actions of this reducer.
+ class Editor {
+ public:
+ virtual ~Editor() {}
+
+ // Replace {node} with {replacement}.
+ virtual void Replace(Node* node, Node* replacement) = 0;
+ // Revisit the {node} again later.
+ virtual void Revisit(Node* node) = 0;
+ // Replace value uses of {node} with {value} and effect uses of {node} with
+ // {effect}. If {effect == nullptr}, then use the effect input to {node}.
+ // All
+ // control uses will be relaxed assuming {node} cannot throw.
+ virtual void ReplaceWithValue(Node* node, Node* value, Node* effect,
+ Node* control) = 0;
+ };
+
+ explicit AdvancedReducer(Editor* editor) : editor_(editor) {}
+
+ protected:
+ // Helper functions for subclasses to produce reductions for a node.
+ static Reduction Replace(Node* node) { return Reducer::Replace(node); }
+
+ // Helper functions for subclasses to edit the graph.
+ void Replace(Node* node, Node* replacement) {
+ DCHECK_NOT_NULL(editor_);
+ editor_->Replace(node, replacement);
+ }
+ void Revisit(Node* node) {
+ DCHECK_NOT_NULL(editor_);
+ editor_->Revisit(node);
+ }
+ void ReplaceWithValue(Node* node, Node* value, Node* effect = nullptr,
+ Node* control = nullptr) {
+ DCHECK_NOT_NULL(editor_);
+ editor_->ReplaceWithValue(node, value, effect, control);
+ }
+
+ // Relax the effects of {node} by immediately replacing effect and control
+ // uses of {node} with the effect and control input to {node}.
+ // TODO(turbofan): replace the effect input to {node} with {graph->start()}.
+ void RelaxEffectsAndControls(Node* node) {
+ ReplaceWithValue(node, node, nullptr, nullptr);
+ }
+
+ // Relax the control uses of {node} by immediately replacing them with the
+ // control input to {node}.
+ void RelaxControls(Node* node) {
+ ReplaceWithValue(node, node, node, nullptr);
+ }
private:
- DISALLOW_COPY_AND_ASSIGN(Reducer);
+ Editor* const editor_;
};
// Performs an iterative reduction of a node graph.
-class GraphReducer FINAL {
+class GraphReducer : public AdvancedReducer::Editor {
public:
- GraphReducer(Graph* graph, Zone* zone);
+ GraphReducer(Zone* zone, Graph* graph, Node* dead = nullptr);
+ ~GraphReducer();
Graph* graph() const { return graph_; }
@@ -74,15 +146,30 @@
// Reduce the node on top of the stack.
void ReduceTop();
+ // Replace {node} with {replacement}.
+ void Replace(Node* node, Node* replacement) final;
+
+ // Replace value uses of {node} with {value} and effect uses of {node} with
+ // {effect}. If {effect == nullptr}, then use the effect input to {node}. All
+ // control uses will be relaxed assuming {node} cannot throw.
+ void ReplaceWithValue(Node* node, Node* value, Node* effect,
+ Node* control) final;
+
+ // Replace all uses of {node} with {replacement} if the id of {replacement} is
+ // less than or equal to {max_id}. Otherwise, replace all uses of {node} whose
+ // id is less than or equal to {max_id} with the {replacement}.
+ void Replace(Node* node, Node* replacement, NodeId max_id);
+
// Node stack operations.
void Pop();
void Push(Node* node);
// Revisit queue operations.
bool Recurse(Node* node);
- void Revisit(Node* node);
+ void Revisit(Node* node) final;
- Graph* graph_;
+ Graph* const graph_;
+ Node* const dead_;
NodeMarker<State> state_;
ZoneVector<Reducer*> reducers_;
ZoneStack<Node*> revisit_;
diff --git a/src/compiler/graph-replay.cc b/src/compiler/graph-replay.cc
index 3a0b783..7f4cc95 100644
--- a/src/compiler/graph-replay.cc
+++ b/src/compiler/graph-replay.cc
@@ -4,9 +4,9 @@
#include "src/compiler/graph-replay.h"
+#include "src/compiler/all-nodes.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/compiler/operator-properties.h"
@@ -19,46 +19,51 @@
void GraphReplayPrinter::PrintReplay(Graph* graph) {
GraphReplayPrinter replay;
- PrintF(" Node* nil = graph.NewNode(common_builder.Dead());\n");
- graph->VisitNodeInputsFromEnd(&replay);
-}
+ PrintF(" Node* nil = graph()->NewNode(common()->Dead());\n");
+ Zone zone;
+ AllNodes nodes(&zone, graph);
-
-void GraphReplayPrinter::Pre(Node* node) {
- PrintReplayOpCreator(node->op());
- PrintF(" Node* n%d = graph.NewNode(op", node->id());
- for (int i = 0; i < node->InputCount(); ++i) {
- PrintF(", nil");
+ // Allocate the nodes first.
+ for (Node* node : nodes.live) {
+ PrintReplayOpCreator(node->op());
+ PrintF(" Node* n%d = graph()->NewNode(op", node->id());
+ for (int i = 0; i < node->InputCount(); ++i) {
+ PrintF(", nil");
+ }
+ PrintF("); USE(n%d);\n", node->id());
}
- PrintF("); USE(n%d);\n", node->id());
-}
-
-void GraphReplayPrinter::PostEdge(Node* from, int index, Node* to) {
- PrintF(" n%d->ReplaceInput(%d, n%d);\n", from->id(), index, to->id());
+ // Connect the nodes to their inputs.
+ for (Node* node : nodes.live) {
+ for (int i = 0; i < node->InputCount(); i++) {
+ PrintF(" n%d->ReplaceInput(%d, n%d);\n", node->id(), i,
+ node->InputAt(i)->id());
+ }
+ }
}
void GraphReplayPrinter::PrintReplayOpCreator(const Operator* op) {
IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
- const char* builder =
- IrOpcode::IsCommonOpcode(opcode) ? "common_builder" : "js_builder";
+ const char* builder = IrOpcode::IsCommonOpcode(opcode) ? "common" : "js";
const char* mnemonic = IrOpcode::IsCommonOpcode(opcode)
? IrOpcode::Mnemonic(opcode)
: IrOpcode::Mnemonic(opcode) + 2;
- PrintF(" op = %s.%s(", builder, mnemonic);
+ PrintF(" op = %s()->%s(", builder, mnemonic);
switch (opcode) {
case IrOpcode::kParameter:
- case IrOpcode::kNumberConstant:
- PrintF("0");
+ PrintF("%d", ParameterIndexOf(op));
break;
- case IrOpcode::kLoad:
- PrintF("unique_name");
+ case IrOpcode::kNumberConstant:
+ PrintF("%g", OpParameter<double>(op));
break;
case IrOpcode::kHeapConstant:
PrintF("unique_constant");
break;
case IrOpcode::kPhi:
+ PrintF("kMachAnyTagged, %d", op->ValueInputCount());
+ break;
+ case IrOpcode::kStateValues:
PrintF("%d", op->ValueInputCount());
break;
case IrOpcode::kEffectPhi:
@@ -68,6 +73,12 @@
case IrOpcode::kMerge:
PrintF("%d", op->ControlInputCount());
break;
+ case IrOpcode::kStart:
+ PrintF("%d", op->ValueOutputCount() - 3);
+ break;
+ case IrOpcode::kFrameState:
+ PrintF("JS_FRAME, BailoutId(-1), OutputFrameStateCombine::Ignore()");
+ break;
default:
break;
}
@@ -75,6 +86,7 @@
}
#endif // DEBUG
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/graph-replay.h b/src/compiler/graph-replay.h
index f41311e..be89ebd 100644
--- a/src/compiler/graph-replay.h
+++ b/src/compiler/graph-replay.h
@@ -5,7 +5,6 @@
#ifndef V8_COMPILER_GRAPH_REPLAY_H_
#define V8_COMPILER_GRAPH_REPLAY_H_
-#include "src/compiler/generic-algorithm.h"
#include "src/compiler/node.h"
namespace v8 {
@@ -18,7 +17,7 @@
// Helper class to print a full replay of a graph. This replay can be used to
// materialize the same graph within a C++ unit test and hence test subsequent
// optimization passes on a graph without going through the construction steps.
-class GraphReplayPrinter FINAL : public NullNodeVisitor {
+class GraphReplayPrinter {
public:
#ifdef DEBUG
static void PrintReplay(Graph* graph);
@@ -26,9 +25,6 @@
static void PrintReplay(Graph* graph) {}
#endif
- void Pre(Node* node);
- void PostEdge(Node* from, int index, Node* to);
-
private:
GraphReplayPrinter() {}
diff --git a/src/compiler/graph-trimmer.cc b/src/compiler/graph-trimmer.cc
new file mode 100644
index 0000000..5fae425
--- /dev/null
+++ b/src/compiler/graph-trimmer.cc
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-trimmer.h"
+
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphTrimmer::GraphTrimmer(Zone* zone, Graph* graph)
+ : graph_(graph), is_live_(graph, 2), live_(zone) {
+ live_.reserve(graph->NodeCount());
+}
+
+
+GraphTrimmer::~GraphTrimmer() {}
+
+
+void GraphTrimmer::TrimGraph() {
+ // Mark end node as live.
+ MarkAsLive(graph()->end());
+ // Compute transitive closure of live nodes.
+ for (size_t i = 0; i < live_.size(); ++i) {
+ for (Node* const input : live_[i]->inputs()) MarkAsLive(input);
+ }
+ // Remove dead->live edges.
+ for (Node* const live : live_) {
+ DCHECK(IsLive(live));
+ for (Edge edge : live->use_edges()) {
+ Node* const user = edge.from();
+ if (!IsLive(user)) {
+ if (FLAG_trace_turbo_reduction) {
+ OFStream os(stdout);
+ os << "DeadLink: " << *user << "(" << edge.index() << ") -> " << *live
+ << std::endl;
+ }
+ edge.UpdateTo(nullptr);
+ }
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/graph-trimmer.h b/src/compiler/graph-trimmer.h
new file mode 100644
index 0000000..d8258be
--- /dev/null
+++ b/src/compiler/graph-trimmer.h
@@ -0,0 +1,57 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_TRIMMER_H_
+#define V8_COMPILER_GRAPH_TRIMMER_H_
+
+#include "src/compiler/node-marker.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+
+
+// Trims dead nodes from the node graph.
+class GraphTrimmer final {
+ public:
+ GraphTrimmer(Zone* zone, Graph* graph);
+ ~GraphTrimmer();
+
+ // Trim nodes in the {graph} that are not reachable from {graph->end()}.
+ void TrimGraph();
+
+ // Trim nodes in the {graph} that are not reachable from either {graph->end()}
+ // or any of the roots in the sequence [{begin},{end}[.
+ template <typename ForwardIterator>
+ void TrimGraph(ForwardIterator begin, ForwardIterator end) {
+ while (begin != end) MarkAsLive(*begin++);
+ TrimGraph();
+ }
+
+ private:
+ V8_INLINE bool IsLive(Node* const node) { return is_live_.Get(node); }
+ V8_INLINE void MarkAsLive(Node* const node) {
+ if (!node->IsDead() && !IsLive(node)) {
+ is_live_.Set(node, true);
+ live_.push_back(node);
+ }
+ }
+
+ Graph* graph() const { return graph_; }
+
+ Graph* const graph_;
+ NodeMarker<bool> is_live_;
+ NodeVector live_;
+
+ DISALLOW_COPY_AND_ASSIGN(GraphTrimmer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_GRAPH_TRIMMER_H_
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index e018c7a..0785176 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -8,80 +8,55 @@
#include <string>
#include "src/code-stubs.h"
+#include "src/compiler/all-nodes.h"
#include "src/compiler/graph.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
+#include "src/interpreter/bytecodes.h"
#include "src/ostreams.h"
namespace v8 {
namespace internal {
namespace compiler {
-static int SafeId(Node* node) { return node == NULL ? -1 : node->id(); }
+
+FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
+ const char* suffix, const char* mode) {
+ EmbeddedVector<char, 256> filename(0);
+ base::SmartArrayPointer<char> debug_name = info->GetDebugName();
+ if (strlen(debug_name.get()) > 0) {
+ SNPrintF(filename, "turbo-%s", debug_name.get());
+ } else if (info->has_shared_info()) {
+ SNPrintF(filename, "turbo-%p", static_cast<void*>(info));
+ } else {
+ SNPrintF(filename, "turbo-none-%s", phase);
+ }
+ std::replace(filename.start(), filename.start() + filename.length(), ' ',
+ '_');
+
+ EmbeddedVector<char, 256> full_filename;
+ if (phase == nullptr) {
+ SNPrintF(full_filename, "%s.%s", filename.start(), suffix);
+ } else {
+ SNPrintF(full_filename, "%s-%s.%s", filename.start(), phase, suffix);
+ }
+ return base::OS::FOpen(full_filename.start(), mode);
+}
+
+
+static int SafeId(Node* node) { return node == nullptr ? -1 : node->id(); }
static const char* SafeMnemonic(Node* node) {
- return node == NULL ? "null" : node->op()->mnemonic();
+ return node == nullptr ? "null" : node->op()->mnemonic();
}
#define DEAD_COLOR "#999999"
-class AllNodes {
- public:
- enum State { kDead, kGray, kLive };
-
- AllNodes(Zone* local_zone, const Graph* graph)
- : state(graph->NodeCount(), kDead, local_zone),
- live(local_zone),
- gray(local_zone) {
- Node* end = graph->end();
- state[end->id()] = kLive;
- live.push_back(end);
- // Find all live nodes reachable from end.
- for (size_t i = 0; i < live.size(); i++) {
- for (Node* const input : live[i]->inputs()) {
- if (input == NULL) {
- // TODO(titzer): print a warning.
- continue;
- }
- if (input->id() >= graph->NodeCount()) {
- // TODO(titzer): print a warning.
- continue;
- }
- if (state[input->id()] != kLive) {
- live.push_back(input);
- state[input->id()] = kLive;
- }
- }
- }
-
- // Find all nodes that are not reachable from end that use live nodes.
- for (size_t i = 0; i < live.size(); i++) {
- for (Node* const use : live[i]->uses()) {
- if (state[use->id()] == kDead) {
- gray.push_back(use);
- state[use->id()] = kGray;
- }
- }
- }
- }
-
- bool IsLive(Node* node) {
- return node != NULL && node->id() < static_cast<int>(state.size()) &&
- state[node->id()] == kLive;
- }
-
- ZoneVector<State> state;
- NodeVector live;
- NodeVector gray;
-};
-
-
class Escaped {
public:
explicit Escaped(const std::ostringstream& os,
@@ -111,25 +86,27 @@
class JSONGraphNodeWriter {
public:
- JSONGraphNodeWriter(std::ostream& os, Zone* zone, const Graph* graph)
- : os_(os), all_(zone, graph), first_node_(true) {}
+ JSONGraphNodeWriter(std::ostream& os, Zone* zone, const Graph* graph,
+ const SourcePositionTable* positions)
+ : os_(os), all_(zone, graph), positions_(positions), first_node_(true) {}
void Print() {
for (Node* const node : all_.live) PrintNode(node);
+ os_ << "\n";
}
void PrintNode(Node* node) {
if (first_node_) {
first_node_ = false;
} else {
- os_ << ",";
+ os_ << ",\n";
}
std::ostringstream label;
label << *node->op();
os_ << "{\"id\":" << SafeId(node) << ",\"label\":\"" << Escaped(label, "\"")
<< "\"";
IrOpcode::Value opcode = node->opcode();
- if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+ if (IrOpcode::IsPhiOpcode(opcode)) {
os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
<< "]";
os_ << ",\"rankWithInput\":[" << NodeProperties::FirstControlIndex(node)
@@ -142,15 +119,26 @@
if (opcode == IrOpcode::kBranch) {
os_ << ",\"rankInputs\":[0]";
}
+ SourcePosition position = positions_->GetSourcePosition(node);
+ if (position.IsKnown()) {
+ os_ << ",\"pos\":" << position.raw();
+ }
os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
: "false");
+ if (NodeProperties::IsTyped(node)) {
+ Type* type = NodeProperties::GetType(node);
+ std::ostringstream type_out;
+ type->PrintTo(type_out);
+ os_ << ",\"type\":\"" << Escaped(type_out, "\"") << "\"";
+ }
os_ << "}";
}
private:
std::ostream& os_;
AllNodes all_;
+ const SourcePositionTable* positions_;
bool first_node_;
DISALLOW_COPY_AND_ASSIGN(JSONGraphNodeWriter);
@@ -164,12 +152,13 @@
void Print() {
for (Node* const node : all_.live) PrintEdges(node);
+ os_ << "\n";
}
void PrintEdges(Node* node) {
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
- if (input == NULL) continue;
+ if (input == nullptr) continue;
PrintEdge(node, i, input);
}
}
@@ -178,9 +167,9 @@
if (first_edge_) {
first_edge_ = false;
} else {
- os_ << ",";
+ os_ << ",\n";
}
- const char* edge_type = NULL;
+ const char* edge_type = nullptr;
if (index < NodeProperties::FirstValueIndex(from)) {
edge_type = "unknown";
} else if (index < NodeProperties::FirstContextIndex(from)) {
@@ -208,195 +197,16 @@
std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
- Zone tmp_zone(ad.graph.zone()->isolate());
- os << "{\"nodes\":[";
- JSONGraphNodeWriter(os, &tmp_zone, &ad.graph).Print();
- os << "],\"edges\":[";
+ Zone tmp_zone;
+ os << "{\n\"nodes\":[";
+ JSONGraphNodeWriter(os, &tmp_zone, &ad.graph, ad.positions).Print();
+ os << "],\n\"edges\":[";
JSONGraphEdgeWriter(os, &tmp_zone, &ad.graph).Print();
os << "]}";
return os;
}
-class GraphVisualizer {
- public:
- GraphVisualizer(std::ostream& os, Zone* zone, const Graph* graph)
- : all_(zone, graph), os_(os) {}
-
- void Print();
-
- void PrintNode(Node* node, bool gray);
-
- private:
- void PrintEdge(Edge edge);
-
- AllNodes all_;
- std::ostream& os_;
-
- DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
-};
-
-
-static Node* GetControlCluster(Node* node) {
- if (OperatorProperties::IsBasicBlockBegin(node->op())) {
- return node;
- } else if (node->op()->ControlInputCount() == 1) {
- Node* control = NodeProperties::GetControlInput(node, 0);
- return control != NULL &&
- OperatorProperties::IsBasicBlockBegin(control->op())
- ? control
- : NULL;
- } else {
- return NULL;
- }
-}
-
-
-void GraphVisualizer::PrintNode(Node* node, bool gray) {
- Node* control_cluster = GetControlCluster(node);
- if (control_cluster != NULL) {
- os_ << " subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
- }
- os_ << " ID" << SafeId(node) << " [\n";
-
- os_ << " shape=\"record\"\n";
- switch (node->opcode()) {
- case IrOpcode::kEnd:
- case IrOpcode::kDead:
- case IrOpcode::kStart:
- os_ << " style=\"diagonals\"\n";
- break;
- case IrOpcode::kMerge:
- case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- case IrOpcode::kLoop:
- os_ << " style=\"rounded\"\n";
- break;
- default:
- break;
- }
-
- if (gray) {
- os_ << " style=\"filled\"\n"
- << " fillcolor=\"" DEAD_COLOR "\"\n";
- }
-
- std::ostringstream label;
- label << *node->op();
- os_ << " label=\"{{#" << SafeId(node) << ":" << Escaped(label);
-
- auto i = node->input_edges().begin();
- for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
- os_ << "|<I" << (*i).index() << ">#" << SafeId((*i).to());
- }
- for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
- ++i, j--) {
- os_ << "|<I" << (*i).index() << ">X #" << SafeId((*i).to());
- }
- for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
- ++i, j--) {
- os_ << "|<I" << (*i).index() << ">F #" << SafeId((*i).to());
- }
- for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
- os_ << "|<I" << (*i).index() << ">E #" << SafeId((*i).to());
- }
-
- if (OperatorProperties::IsBasicBlockBegin(node->op()) ||
- GetControlCluster(node) == NULL) {
- for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
- os_ << "|<I" << (*i).index() << ">C #" << SafeId((*i).to());
- }
- }
- os_ << "}";
-
- if (FLAG_trace_turbo_types && NodeProperties::IsTyped(node)) {
- Bounds bounds = NodeProperties::GetBounds(node);
- std::ostringstream upper;
- bounds.upper->PrintTo(upper);
- std::ostringstream lower;
- bounds.lower->PrintTo(lower);
- os_ << "|" << Escaped(upper) << "|" << Escaped(lower);
- }
- os_ << "}\"\n";
-
- os_ << " ]\n";
- if (control_cluster != NULL) os_ << " }\n";
-}
-
-
-static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
- if (from->opcode() == IrOpcode::kPhi ||
- from->opcode() == IrOpcode::kEffectPhi) {
- Node* control = NodeProperties::GetControlInput(from, 0);
- return control != NULL && control->opcode() != IrOpcode::kMerge &&
- control != to && index != 0;
- } else if (from->opcode() == IrOpcode::kLoop) {
- return index != 0;
- } else {
- return false;
- }
-}
-
-
-void GraphVisualizer::PrintEdge(Edge edge) {
- Node* from = edge.from();
- int index = edge.index();
- Node* to = edge.to();
-
- if (!all_.IsLive(to)) return; // skip inputs that point to dead or NULL.
-
- bool unconstrained = IsLikelyBackEdge(from, index, to);
- os_ << " ID" << SafeId(from);
-
- if (OperatorProperties::IsBasicBlockBegin(from->op()) ||
- GetControlCluster(from) == NULL ||
- (from->op()->ControlInputCount() > 0 &&
- NodeProperties::GetControlInput(from) != to)) {
- os_ << ":I" << index << ":n -> ID" << SafeId(to) << ":s"
- << "[" << (unconstrained ? "constraint=false, " : "")
- << (NodeProperties::IsControlEdge(edge) ? "style=bold, " : "")
- << (NodeProperties::IsEffectEdge(edge) ? "style=dotted, " : "")
- << (NodeProperties::IsContextEdge(edge) ? "style=dashed, " : "") << "]";
- } else {
- os_ << " -> ID" << SafeId(to) << ":s [color=transparent, "
- << (unconstrained ? "constraint=false, " : "")
- << (NodeProperties::IsControlEdge(edge) ? "style=dashed, " : "") << "]";
- }
- os_ << "\n";
-}
-
-
-void GraphVisualizer::Print() {
- os_ << "digraph D {\n"
- << " node [fontsize=8,height=0.25]\n"
- << " rankdir=\"BT\"\n"
- << " ranksep=\"1.2 equally\"\n"
- << " overlap=\"false\"\n"
- << " splines=\"true\"\n"
- << " concentrate=\"true\"\n"
- << " \n";
-
- // Make sure all nodes have been output before writing out the edges.
- for (Node* const node : all_.live) PrintNode(node, false);
- for (Node* const node : all_.gray) PrintNode(node, true);
-
- // With all the nodes written, add the edges.
- for (Node* const node : all_.live) {
- for (Edge edge : node->use_edges()) {
- PrintEdge(edge);
- }
- }
- os_ << "}\n";
-}
-
-
-std::ostream& operator<<(std::ostream& os, const AsDOT& ad) {
- Zone tmp_zone(ad.graph.zone()->isolate());
- GraphVisualizer(os, &tmp_zone, &ad.graph).Print();
- return os;
-}
-
-
class GraphC1Visualizer {
public:
GraphC1Visualizer(std::ostream& os, Zone* zone); // NOLINT
@@ -405,7 +215,7 @@
void PrintSchedule(const char* phase, const Schedule* schedule,
const SourcePositionTable* positions,
const InstructionSequence* instructions);
- void PrintAllocator(const char* phase, const RegisterAllocator* allocator);
+ void PrintLiveRanges(const char* phase, const RegisterAllocationData* data);
Zone* zone() const { return zone_; }
private:
@@ -413,15 +223,18 @@
void PrintStringProperty(const char* name, const char* value);
void PrintLongProperty(const char* name, int64_t value);
void PrintIntProperty(const char* name, int value);
- void PrintBlockProperty(const char* name, BasicBlock::Id block_id);
+ void PrintBlockProperty(const char* name, int rpo_number);
void PrintNodeId(Node* n);
void PrintNode(Node* n);
void PrintInputs(Node* n);
- void PrintInputs(InputIter* i, int count, const char* prefix);
+ template <typename InputIterator>
+ void PrintInputs(InputIterator* i, int count, const char* prefix);
void PrintType(Node* node);
- void PrintLiveRange(LiveRange* range, const char* type);
- class Tag FINAL BASE_EMBEDDED {
+ void PrintLiveRange(LiveRange* range, const char* type, int vreg);
+ void PrintLiveRangeChain(TopLevelLiveRange* range, const char* type);
+
+ class Tag final BASE_EMBEDDED {
public:
Tag(GraphC1Visualizer* visualizer, const char* name) {
name_ = name;
@@ -475,10 +288,9 @@
}
-void GraphC1Visualizer::PrintBlockProperty(const char* name,
- BasicBlock::Id block_id) {
+void GraphC1Visualizer::PrintBlockProperty(const char* name, int rpo_number) {
PrintIndent();
- os_ << name << " \"B" << block_id << "\"\n";
+ os_ << name << " \"B" << rpo_number << "\"\n";
}
@@ -490,15 +302,14 @@
void GraphC1Visualizer::PrintCompilation(const CompilationInfo* info) {
Tag tag(this, "compilation");
+ base::SmartArrayPointer<char> name = info->GetDebugName();
if (info->IsOptimizing()) {
- Handle<String> name = info->function()->debug_name();
- PrintStringProperty("name", name->ToCString().get());
+ PrintStringProperty("name", name.get());
PrintIndent();
- os_ << "method \"" << name->ToCString().get() << ":"
- << info->optimization_id() << "\"\n";
+ os_ << "method \"" << name.get() << ":" << info->optimization_id()
+ << "\"\n";
} else {
- CodeStub::Major major_key = info->code_stub()->MajorKey();
- PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+ PrintStringProperty("name", name.get());
PrintStringProperty("method", "stub");
}
PrintLongProperty("date",
@@ -516,7 +327,8 @@
}
-void GraphC1Visualizer::PrintInputs(InputIter* i, int count,
+template <typename InputIterator>
+void GraphC1Visualizer::PrintInputs(InputIterator* i, int count,
const char* prefix) {
if (count > 0) {
os_ << prefix;
@@ -544,11 +356,9 @@
void GraphC1Visualizer::PrintType(Node* node) {
if (NodeProperties::IsTyped(node)) {
- Bounds bounds = NodeProperties::GetBounds(node);
+ Type* type = NodeProperties::GetType(node);
os_ << " type:";
- bounds.upper->PrintTo(os_);
- os_ << "..";
- bounds.lower->PrintTo(os_);
+ type->PrintTo(os_);
}
}
@@ -563,23 +373,21 @@
for (size_t i = 0; i < rpo->size(); i++) {
BasicBlock* current = (*rpo)[i];
Tag block_tag(this, "block");
- PrintBlockProperty("name", current->id());
+ PrintBlockProperty("name", current->rpo_number());
PrintIntProperty("from_bci", -1);
PrintIntProperty("to_bci", -1);
PrintIndent();
os_ << "predecessors";
- for (BasicBlock::Predecessors::iterator j = current->predecessors_begin();
- j != current->predecessors_end(); ++j) {
- os_ << " \"B" << (*j)->id() << "\"";
+ for (BasicBlock* predecessor : current->predecessors()) {
+ os_ << " \"B" << predecessor->rpo_number() << "\"";
}
os_ << "\n";
PrintIndent();
os_ << "successors";
- for (BasicBlock::Successors::iterator j = current->successors_begin();
- j != current->successors_end(); ++j) {
- os_ << " \"B" << (*j)->id() << "\"";
+ for (BasicBlock* successor : current->successors()) {
+ os_ << " \"B" << successor->rpo_number() << "\"";
}
os_ << "\n";
@@ -589,21 +397,24 @@
PrintIndent();
os_ << "flags\n";
- if (current->dominator() != NULL) {
- PrintBlockProperty("dominator", current->dominator()->id());
+ if (current->dominator() != nullptr) {
+ PrintBlockProperty("dominator", current->dominator()->rpo_number());
}
PrintIntProperty("loop_depth", current->loop_depth());
const InstructionBlock* instruction_block =
- instructions->InstructionBlockAt(current->GetRpoNumber());
+ instructions->InstructionBlockAt(
+ RpoNumber::FromInt(current->rpo_number()));
if (instruction_block->code_start() >= 0) {
int first_index = instruction_block->first_instruction_index();
int last_index = instruction_block->last_instruction_index();
- PrintIntProperty("first_lir_id", LifetimePosition::FromInstructionIndex(
- first_index).Value());
- PrintIntProperty("last_lir_id", LifetimePosition::FromInstructionIndex(
- last_index).Value());
+ PrintIntProperty(
+ "first_lir_id",
+ LifetimePosition::GapFromInstructionIndex(first_index).value());
+ PrintIntProperty("last_lir_id",
+ LifetimePosition::InstructionFromInstructionIndex(
+ last_index).value());
}
{
@@ -644,10 +455,9 @@
os_ << " ";
PrintType(node);
}
- if (positions != NULL) {
+ if (positions != nullptr) {
SourcePosition position = positions->GetSourcePosition(node);
- if (!position.IsUnknown()) {
- DCHECK(!position.IsInvalid());
+ if (position.IsKnown()) {
os_ << " pos:" << position.raw();
}
}
@@ -658,17 +468,16 @@
if (control != BasicBlock::kNone) {
PrintIndent();
os_ << "0 0 ";
- if (current->control_input() != NULL) {
+ if (current->control_input() != nullptr) {
PrintNode(current->control_input());
} else {
- os_ << -1 - current->id().ToInt() << " Goto";
+ os_ << -1 - current->rpo_number() << " Goto";
}
os_ << " ->";
- for (BasicBlock::Successors::iterator j = current->successors_begin();
- j != current->successors_end(); ++j) {
- os_ << " B" << (*j)->id();
+ for (BasicBlock* successor : current->successors()) {
+ os_ << " B" << successor->rpo_number();
}
- if (FLAG_trace_turbo_types && current->control_input() != NULL) {
+ if (FLAG_trace_turbo_types && current->control_input() != nullptr) {
os_ << " ";
PrintType(current->control_input());
}
@@ -676,13 +485,14 @@
}
}
- if (instructions != NULL) {
+ if (instructions != nullptr) {
Tag LIR_tag(this, "LIR");
for (int j = instruction_block->first_instruction_index();
j <= instruction_block->last_instruction_index(); j++) {
PrintIndent();
- PrintableInstruction printable = {RegisterConfiguration::ArchDefault(),
- instructions->InstructionAt(j)};
+ PrintableInstruction printable = {
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
+ instructions->InstructionAt(j)};
os_ << j << " " << printable << " <|@\n";
}
}
@@ -690,77 +500,80 @@
}
-void GraphC1Visualizer::PrintAllocator(const char* phase,
- const RegisterAllocator* allocator) {
+void GraphC1Visualizer::PrintLiveRanges(const char* phase,
+ const RegisterAllocationData* data) {
Tag tag(this, "intervals");
PrintStringProperty("name", phase);
- for (auto range : allocator->fixed_double_live_ranges()) {
- PrintLiveRange(range, "fixed");
+ for (auto range : data->fixed_double_live_ranges()) {
+ PrintLiveRangeChain(range, "fixed");
}
- for (auto range : allocator->fixed_live_ranges()) {
- PrintLiveRange(range, "fixed");
+ for (auto range : data->fixed_live_ranges()) {
+ PrintLiveRangeChain(range, "fixed");
}
- for (auto range : allocator->live_ranges()) {
- PrintLiveRange(range, "object");
+ for (auto range : data->live_ranges()) {
+ PrintLiveRangeChain(range, "object");
}
}
-void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type) {
- if (range != NULL && !range->IsEmpty()) {
+void GraphC1Visualizer::PrintLiveRangeChain(TopLevelLiveRange* range,
+ const char* type) {
+ if (range == nullptr || range->IsEmpty()) return;
+ int vreg = range->vreg();
+ for (LiveRange* child = range; child != nullptr; child = child->next()) {
+ PrintLiveRange(child, type, vreg);
+ }
+}
+
+
+void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
+ int vreg) {
+ if (range != nullptr && !range->IsEmpty()) {
PrintIndent();
- os_ << range->id() << " " << type;
+ os_ << vreg << ":" << range->relative_id() << " " << type;
if (range->HasRegisterAssigned()) {
- InstructionOperand* op = range->CreateAssignedOperand(zone());
- int assigned_reg = op->index();
- if (op->IsDoubleRegister()) {
- os_ << " \"" << DoubleRegister::AllocationIndexToString(assigned_reg)
+ AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
+ if (op.IsDoubleRegister()) {
+ DoubleRegister assigned_reg = op.GetDoubleRegister();
+ os_ << " \"" << assigned_reg.ToString() << "\"";
+ } else {
+ DCHECK(op.IsRegister());
+ Register assigned_reg = op.GetRegister();
+ os_ << " \"" << assigned_reg.ToString() << "\"";
+ }
+ } else if (range->spilled()) {
+ auto top = range->TopLevel();
+ int index = -1;
+ if (top->HasSpillRange()) {
+ index = kMaxInt; // This hasn't been set yet.
+ } else if (top->GetSpillOperand()->IsConstant()) {
+ os_ << " \"const(nostack):"
+ << ConstantOperand::cast(top->GetSpillOperand())->virtual_register()
<< "\"";
} else {
- DCHECK(op->IsRegister());
- os_ << " \"" << Register::AllocationIndexToString(assigned_reg) << "\"";
- }
- } else if (range->IsSpilled()) {
- int index = -1;
- if (range->TopLevel()->HasSpillRange()) {
- index = kMaxInt; // This hasn't been set yet.
- } else {
- index = range->TopLevel()->GetSpillOperand()->index();
- }
- if (range->TopLevel()->Kind() == DOUBLE_REGISTERS) {
- os_ << " \"double_stack:" << index << "\"";
- } else if (range->TopLevel()->Kind() == GENERAL_REGISTERS) {
- os_ << " \"stack:" << index << "\"";
- } else {
- os_ << " \"const(nostack):" << index << "\"";
+ index = AllocatedOperand::cast(top->GetSpillOperand())->index();
+ if (top->kind() == DOUBLE_REGISTERS) {
+ os_ << " \"double_stack:" << index << "\"";
+ } else if (top->kind() == GENERAL_REGISTERS) {
+ os_ << " \"stack:" << index << "\"";
+ }
}
}
- int parent_index = -1;
- if (range->IsChild()) {
- parent_index = range->parent()->id();
- } else {
- parent_index = range->id();
- }
- InstructionOperand* op = range->FirstHint();
- int hint_index = -1;
- if (op != NULL && op->IsUnallocated()) {
- hint_index = UnallocatedOperand::cast(op)->virtual_register();
- }
- os_ << " " << parent_index << " " << hint_index;
- UseInterval* cur_interval = range->first_interval();
- while (cur_interval != NULL && range->Covers(cur_interval->start())) {
- os_ << " [" << cur_interval->start().Value() << ", "
- << cur_interval->end().Value() << "[";
- cur_interval = cur_interval->next();
+
+ os_ << " " << vreg;
+ for (auto interval = range->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ os_ << " [" << interval->start().value() << ", "
+ << interval->end().value() << "[";
}
UsePosition* current_pos = range->first_pos();
- while (current_pos != NULL) {
+ while (current_pos != nullptr) {
if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
- os_ << " " << current_pos->pos().Value() << " M";
+ os_ << " " << current_pos->pos().value() << " M";
}
current_pos = current_pos->next();
}
@@ -771,23 +584,24 @@
std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
- Zone tmp_zone(ac.info_->isolate());
+ Zone tmp_zone;
GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
return os;
}
std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
- Zone tmp_zone(ac.schedule_->zone()->isolate());
+ Zone tmp_zone;
GraphC1Visualizer(os, &tmp_zone)
.PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
return os;
}
-std::ostream& operator<<(std::ostream& os, const AsC1VAllocator& ac) {
- Zone tmp_zone(ac.allocator_->code()->zone()->isolate());
- GraphC1Visualizer(os, &tmp_zone).PrintAllocator(ac.phase_, ac.allocator_);
+std::ostream& operator<<(std::ostream& os,
+ const AsC1VRegisterAllocationData& ac) {
+ Zone tmp_zone;
+ GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_);
return os;
}
@@ -796,7 +610,7 @@
const int kVisited = 2;
std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
- Zone local_zone(ar.graph.zone()->isolate());
+ Zone local_zone;
ZoneVector<byte> state(ar.graph.NodeCount(), kUnvisited, &local_zone);
ZoneStack<Node*> stack(&local_zone);
@@ -816,7 +630,7 @@
if (pop) {
state[n->id()] = kVisited;
stack.pop();
- os << "#" << SafeId(n) << ":" << SafeMnemonic(n) << "(";
+ os << "#" << n->id() << ":" << *n->op() << "(";
int j = 0;
for (Node* const i : n->inputs()) {
if (j++ > 0) os << ", ";
@@ -827,6 +641,6 @@
}
return os;
}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h
index 3dd66ea..1a971a5 100644
--- a/src/compiler/graph-visualizer.h
+++ b/src/compiler/graph-visualizer.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_GRAPH_VISUALIZER_H_
#define V8_COMPILER_GRAPH_VISUALIZER_H_
+#include <stdio.h>
#include <iosfwd>
namespace v8 {
@@ -16,22 +17,17 @@
class Graph;
class InstructionSequence;
-class RegisterAllocator;
+class RegisterAllocationData;
class Schedule;
class SourcePositionTable;
-
-struct AsDOT {
- explicit AsDOT(const Graph& g) : graph(g) {}
- const Graph& graph;
-};
-
-std::ostream& operator<<(std::ostream& os, const AsDOT& ad);
-
+FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
+ const char* suffix, const char* mode);
struct AsJSON {
- explicit AsJSON(const Graph& g) : graph(g) {}
+ AsJSON(const Graph& g, SourcePositionTable* p) : graph(g), positions(p) {}
const Graph& graph;
+ const SourcePositionTable* positions;
};
std::ostream& operator<<(std::ostream& os, const AsJSON& ad);
@@ -52,8 +48,8 @@
struct AsC1V {
AsC1V(const char* phase, const Schedule* schedule,
- const SourcePositionTable* positions = NULL,
- const InstructionSequence* instructions = NULL)
+ const SourcePositionTable* positions = nullptr,
+ const InstructionSequence* instructions = nullptr)
: schedule_(schedule),
instructions_(instructions),
positions_(positions),
@@ -64,18 +60,18 @@
const char* phase_;
};
-struct AsC1VAllocator {
- explicit AsC1VAllocator(const char* phase,
- const RegisterAllocator* allocator = NULL)
- : phase_(phase), allocator_(allocator) {}
+struct AsC1VRegisterAllocationData {
+ explicit AsC1VRegisterAllocationData(
+ const char* phase, const RegisterAllocationData* data = nullptr)
+ : phase_(phase), data_(data) {}
const char* phase_;
- const RegisterAllocator* allocator_;
+ const RegisterAllocationData* data_;
};
-std::ostream& operator<<(std::ostream& os, const AsDOT& ad);
std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac);
std::ostream& operator<<(std::ostream& os, const AsC1V& ac);
-std::ostream& operator<<(std::ostream& os, const AsC1VAllocator& ac);
+std::ostream& operator<<(std::ostream& os,
+ const AsC1VRegisterAllocationData& ac);
} // namespace compiler
} // namespace internal
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
index 995046b..3d4d6da 100644
--- a/src/compiler/graph.cc
+++ b/src/compiler/graph.cc
@@ -4,14 +4,12 @@
#include "src/compiler/graph.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
+#include <algorithm>
+
+#include "src/base/bits.h"
#include "src/compiler/node.h"
-#include "src/compiler/node-aux-data-inl.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/opcodes.h"
-#include "src/compiler/operator-properties.h"
+#include "src/compiler/verifier.h"
namespace v8 {
namespace internal {
@@ -19,30 +17,61 @@
Graph::Graph(Zone* zone)
: zone_(zone),
- start_(NULL),
- end_(NULL),
+ start_(nullptr),
+ end_(nullptr),
mark_max_(0),
next_node_id_(0),
decorators_(zone) {}
void Graph::Decorate(Node* node) {
- for (ZoneVector<GraphDecorator*>::iterator i = decorators_.begin();
- i != decorators_.end(); ++i) {
- (*i)->Decorate(node);
+ for (auto const decorator : decorators_) {
+ decorator->Decorate(node);
}
}
+void Graph::AddDecorator(GraphDecorator* decorator) {
+ decorators_.push_back(decorator);
+}
+
+
+void Graph::RemoveDecorator(GraphDecorator* decorator) {
+ auto const it = std::find(decorators_.begin(), decorators_.end(), decorator);
+ DCHECK(it != decorators_.end());
+ decorators_.erase(it);
+}
+
+
Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs,
bool incomplete) {
- DCHECK_LE(op->ValueInputCount(), input_count);
- Node* result = Node::New(this, input_count, inputs, incomplete);
- result->Initialize(op);
- if (!incomplete) {
- Decorate(result);
- }
- return result;
+ Node* node = NewNodeUnchecked(op, input_count, inputs, incomplete);
+ Verifier::VerifyNode(node);
+ return node;
+}
+
+
+Node* Graph::NewNodeUnchecked(const Operator* op, int input_count,
+ Node** inputs, bool incomplete) {
+ Node* const node =
+ Node::New(zone(), NextNodeId(), op, input_count, inputs, incomplete);
+ Decorate(node);
+ return node;
+}
+
+
+Node* Graph::CloneNode(const Node* node) {
+ DCHECK_NOT_NULL(node);
+ Node* const clone = Node::Clone(zone(), NextNodeId(), node);
+ Decorate(clone);
+ return clone;
+}
+
+
+NodeId Graph::NextNodeId() {
+ NodeId const id = next_node_id_;
+ CHECK(!base::bits::UnsignedAddOverflow32(id, 1, &next_node_id_));
+ return id;
}
} // namespace compiler
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
index d619da2..b53c7fd 100644
--- a/src/compiler/graph.h
+++ b/src/compiler/graph.h
@@ -5,12 +5,8 @@
#ifndef V8_COMPILER_GRAPH_H_
#define V8_COMPILER_GRAPH_H_
-#include <map>
-#include <set>
-
-#include "src/compiler/node.h"
-#include "src/compiler/node-aux-data.h"
-#include "src/compiler/source-position.h"
+#include "src/zone.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -18,6 +14,19 @@
// Forward declarations.
class GraphDecorator;
+class Node;
+class Operator;
+
+
+// Marks are used during traversal of the graph to distinguish states of nodes.
+// Each node has a mark which is a monotonically increasing integer, and a
+// {NodeMarker} has a range of values that indicate states of a node.
+typedef uint32_t Mark;
+
+
+// NodeIds are identifying numbers for nodes that can be used to index auxiliary
+// out-of-line data associated with each node.
+typedef uint32_t NodeId;
class Graph : public ZoneObject {
@@ -25,6 +34,10 @@
explicit Graph(Zone* zone);
// Base implementation used by all factory methods.
+ Node* NewNodeUnchecked(const Operator* op, int input_count, Node** inputs,
+ bool incomplete = false);
+
+ // Factory that checks the input count.
Node* NewNode(const Operator* op, int input_count, Node** inputs,
bool incomplete = false);
@@ -60,9 +73,19 @@
Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7};
return NewNode(op, arraysize(nodes), nodes);
}
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6, Node* n7, Node* n8, Node* n9) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9};
+ return NewNode(op, arraysize(nodes), nodes);
+ }
- template <class Visitor>
- inline void VisitNodeInputsFromEnd(Visitor* visitor);
+ // Clone the {node}, and assign a new node id to the copy.
+ Node* CloneNode(const Node* node);
Zone* zone() const { return zone_; }
Node* start() const { return start_; }
@@ -71,27 +94,18 @@
void SetStart(Node* start) { start_ = start; }
void SetEnd(Node* end) { end_ = end; }
- NodeId NextNodeID() { return next_node_id_++; }
- NodeId NodeCount() const { return next_node_id_; }
+ size_t NodeCount() const { return next_node_id_; }
void Decorate(Node* node);
-
- void AddDecorator(GraphDecorator* decorator) {
- decorators_.push_back(decorator);
- }
-
- void RemoveDecorator(GraphDecorator* decorator) {
- ZoneVector<GraphDecorator*>::iterator it =
- std::find(decorators_.begin(), decorators_.end(), decorator);
- DCHECK(it != decorators_.end());
- decorators_.erase(it, it + 1);
- }
+ void AddDecorator(GraphDecorator* decorator);
+ void RemoveDecorator(GraphDecorator* decorator);
private:
- template <typename State>
- friend class NodeMarker;
+ friend class NodeMarkerBase;
- Zone* zone_;
+ inline NodeId NextNodeId();
+
+ Zone* const zone_;
Node* start_;
Node* end_;
Mark mark_max_;
@@ -102,40 +116,6 @@
};
-// A NodeMarker uses monotonically increasing marks to assign local "states"
-// to nodes. Only one NodeMarker per graph is valid at a given time.
-template <typename State>
-class NodeMarker BASE_EMBEDDED {
- public:
- NodeMarker(Graph* graph, uint32_t num_states)
- : mark_min_(graph->mark_max_), mark_max_(graph->mark_max_ += num_states) {
- DCHECK(num_states > 0); // user error!
- DCHECK(mark_max_ > mark_min_); // check for wraparound.
- }
-
- State Get(Node* node) {
- Mark mark = node->mark();
- if (mark < mark_min_) {
- mark = mark_min_;
- node->set_mark(mark_min_);
- }
- DCHECK_LT(mark, mark_max_);
- return static_cast<State>(mark - mark_min_);
- }
-
- void Set(Node* node, State state) {
- Mark local = static_cast<Mark>(state);
- DCHECK(local < (mark_max_ - mark_min_));
- DCHECK_LT(node->mark(), mark_max_);
- node->set_mark(local + mark_min_);
- }
-
- private:
- Mark mark_min_;
- Mark mark_max_;
-};
-
-
// A graph decorator can be used to add behavior to the creation of nodes
// in a graph.
class GraphDecorator : public ZoneObject {
diff --git a/src/compiler/greedy-allocator.cc b/src/compiler/greedy-allocator.cc
new file mode 100644
index 0000000..683b75d
--- /dev/null
+++ b/src/compiler/greedy-allocator.cc
@@ -0,0 +1,629 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/greedy-allocator.h"
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+ } while (false)
+
+
+const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
+
+
+namespace {
+
+void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
+ int reg_id = range->assigned_register();
+ range->SetUseHints(reg_id);
+ if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
+ data->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg_id);
+ }
+}
+
+
+void UnsetOperands(LiveRange* range, RegisterAllocationData* data) {
+ range->UnsetUseHints();
+ if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
+ data->GetPhiMapValueFor(range->TopLevel())->UnsetAssignedRegister();
+ }
+}
+
+
+LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
+ LifetimePosition pos) {
+ DCHECK(range->Start() < pos && pos < range->End());
+ DCHECK(pos.IsStart() || pos.IsGapPosition() ||
+ (data->code()
+ ->GetInstructionBlock(pos.ToInstructionIndex())
+ ->last_instruction_index() != pos.ToInstructionIndex()));
+ LiveRange* result = range->SplitAt(pos, data->allocation_zone());
+ return result;
+}
+
+
+} // namespace
+
+
+AllocationCandidate AllocationScheduler::GetNext() {
+ DCHECK(!queue_.empty());
+ AllocationCandidate ret = queue_.top();
+ queue_.pop();
+ return ret;
+}
+
+
+void AllocationScheduler::Schedule(LiveRange* range) {
+ TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(),
+ range->relative_id());
+ queue_.push(AllocationCandidate(range));
+}
+
+
+void AllocationScheduler::Schedule(LiveRangeGroup* group) {
+ queue_.push(AllocationCandidate(group));
+}
+
+GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
+ RegisterKind kind, Zone* local_zone)
+ : RegisterAllocator(data, kind),
+ local_zone_(local_zone),
+ allocations_(local_zone),
+ scheduler_(local_zone),
+ groups_(local_zone) {}
+
+
+void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
+ TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id),
+ range->TopLevel()->vreg(), range->relative_id());
+
+ DCHECK(!range->HasRegisterAssigned());
+
+ AllocateRegisterToRange(reg_id, range);
+
+ TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id),
+ range->TopLevel()->vreg(), range->relative_id());
+ range->set_assigned_register(reg_id);
+ UpdateOperands(range, data());
+}
+
+
+void GreedyAllocator::PreallocateFixedRanges() {
+ allocations_.resize(num_registers());
+ for (int i = 0; i < num_registers(); i++) {
+ allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
+ }
+
+ for (LiveRange* fixed_range : GetFixedRegisters()) {
+ if (fixed_range != nullptr) {
+ DCHECK_EQ(mode(), fixed_range->kind());
+ DCHECK(fixed_range->TopLevel()->IsFixed());
+
+ int reg_nr = fixed_range->assigned_register();
+ EnsureValidRangeWeight(fixed_range);
+ AllocateRegisterToRange(reg_nr, fixed_range);
+ }
+ }
+}
+
+
+void GreedyAllocator::GroupLiveRanges() {
+ CoalescedLiveRanges grouper(local_zone());
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
+ grouper.clear();
+ // Skip splinters, because we do not want to optimize for them, and moves
+ // due to assigning them to different registers occur in deferred blocks.
+ if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) {
+ continue;
+ }
+
+ // A phi can't be a memory operand, so it couldn't have been split.
+ DCHECK(!range->spilled());
+
+ // Maybe this phi range is itself an input to another phi which was already
+ // processed.
+ LiveRangeGroup* latest_grp = range->group() != nullptr
+ ? range->group()
+ : new (local_zone())
+ LiveRangeGroup(local_zone());
+
+ // Populate the grouper.
+ if (range->group() == nullptr) {
+ grouper.AllocateRange(range);
+ } else {
+ for (LiveRange* member : range->group()->ranges()) {
+ grouper.AllocateRange(member);
+ }
+ }
+ for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) {
+ // skip output also in input, which may happen for loops.
+ if (j == range->vreg()) continue;
+
+ TopLevelLiveRange* other_top = data()->live_ranges()[j];
+
+ if (other_top->IsSplinter()) continue;
+ // If the other was a memory operand, it might have been split.
+ // So get the unsplit part.
+ LiveRange* other =
+ other_top->next() == nullptr ? other_top : other_top->next();
+
+ if (other->spilled()) continue;
+
+ LiveRangeGroup* other_group = other->group();
+ if (other_group != nullptr) {
+ bool can_merge = true;
+ for (LiveRange* member : other_group->ranges()) {
+ if (grouper.GetConflicts(member).Current() != nullptr) {
+ can_merge = false;
+ break;
+ }
+ }
+ // If each member doesn't conflict with the current group, then since
+ // the members don't conflict with eachother either, we can merge them.
+ if (can_merge) {
+ latest_grp->ranges().insert(latest_grp->ranges().end(),
+ other_group->ranges().begin(),
+ other_group->ranges().end());
+ for (LiveRange* member : other_group->ranges()) {
+ grouper.AllocateRange(member);
+ member->set_group(latest_grp);
+ }
+ // Clear the other range, so we avoid scheduling it.
+ other_group->ranges().clear();
+ }
+ } else if (grouper.GetConflicts(other).Current() == nullptr) {
+ grouper.AllocateRange(other);
+ latest_grp->ranges().push_back(other);
+ other->set_group(latest_grp);
+ }
+ }
+
+ if (latest_grp->ranges().size() > 0 && range->group() == nullptr) {
+ latest_grp->ranges().push_back(range);
+ DCHECK(latest_grp->ranges().size() > 1);
+ groups().push_back(latest_grp);
+ range->set_group(latest_grp);
+ }
+ }
+}
+
+
+void GreedyAllocator::ScheduleAllocationCandidates() {
+ for (LiveRangeGroup* group : groups()) {
+ if (group->ranges().size() > 0) {
+ // We shouldn't have added single-range groups.
+ DCHECK(group->ranges().size() != 1);
+ scheduler().Schedule(group);
+ }
+ }
+ for (LiveRange* range : data()->live_ranges()) {
+ if (CanProcessRange(range)) {
+ for (LiveRange* child = range; child != nullptr; child = child->next()) {
+ if (!child->spilled() && child->group() == nullptr) {
+ scheduler().Schedule(child);
+ }
+ }
+ }
+ }
+}
+
+
+void GreedyAllocator::TryAllocateCandidate(
+ const AllocationCandidate& candidate) {
+ if (candidate.is_group()) {
+ TryAllocateGroup(candidate.group());
+ } else {
+ TryAllocateLiveRange(candidate.live_range());
+ }
+}
+
+
+void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
+ float group_weight = 0.0;
+ for (LiveRange* member : group->ranges()) {
+ EnsureValidRangeWeight(member);
+ group_weight = Max(group_weight, member->weight());
+ }
+
+ float eviction_weight = group_weight;
+ int eviction_reg = -1;
+ int free_reg = -1;
+ for (int i = 0; i < num_allocatable_registers(); ++i) {
+ int reg = allocatable_register_code(i);
+ float weight = GetMaximumConflictingWeight(reg, group, group_weight);
+ if (weight == LiveRange::kInvalidWeight) {
+ free_reg = reg;
+ break;
+ }
+ if (weight < eviction_weight) {
+ eviction_weight = weight;
+ eviction_reg = reg;
+ }
+ }
+ if (eviction_reg < 0 && free_reg < 0) {
+ for (LiveRange* member : group->ranges()) {
+ scheduler().Schedule(member);
+ }
+ return;
+ }
+ if (free_reg < 0) {
+ DCHECK(eviction_reg >= 0);
+ for (LiveRange* member : group->ranges()) {
+ EvictAndRescheduleConflicts(eviction_reg, member);
+ }
+ free_reg = eviction_reg;
+ }
+
+ DCHECK(free_reg >= 0);
+ for (LiveRange* member : group->ranges()) {
+ AssignRangeToRegister(free_reg, member);
+ }
+}
+
+
+void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
+ // TODO(mtrofin): once we introduce groups, we'll want to first try and
+ // allocate at the preferred register.
+ TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(),
+ range->relative_id());
+ int free_reg = -1;
+ int evictable_reg = -1;
+ int hinted_reg = -1;
+
+ EnsureValidRangeWeight(range);
+ float competing_weight = range->weight();
+ DCHECK(competing_weight != LiveRange::kInvalidWeight);
+
+ // Can we allocate at the hinted register?
+ if (range->FirstHintPosition(&hinted_reg) != nullptr) {
+ DCHECK(hinted_reg >= 0);
+ float max_conflict_weight =
+ GetMaximumConflictingWeight(hinted_reg, range, competing_weight);
+ if (max_conflict_weight == LiveRange::kInvalidWeight) {
+ free_reg = hinted_reg;
+ } else if (max_conflict_weight < range->weight()) {
+ evictable_reg = hinted_reg;
+ }
+ }
+
+ if (free_reg < 0 && evictable_reg < 0) {
+ // There was no hinted reg, or we cannot allocate there.
+ float smallest_weight = LiveRange::kMaxWeight;
+
+ // Seek either the first free register, or, from the set of registers
+ // where the maximum conflict is lower than the candidate's weight, the one
+ // with the smallest such weight.
+ for (int i = 0; i < num_allocatable_registers(); i++) {
+ int reg = allocatable_register_code(i);
+ // Skip unnecessarily re-visiting the hinted register, if any.
+ if (reg == hinted_reg) continue;
+ float max_conflict_weight =
+ GetMaximumConflictingWeight(reg, range, competing_weight);
+ if (max_conflict_weight == LiveRange::kInvalidWeight) {
+ free_reg = reg;
+ break;
+ }
+ if (max_conflict_weight < range->weight() &&
+ max_conflict_weight < smallest_weight) {
+ smallest_weight = max_conflict_weight;
+ evictable_reg = reg;
+ }
+ }
+ }
+
+ // We have a free register, so we use it.
+ if (free_reg >= 0) {
+ TRACE("Found free register %s for live range %d:%d.\n",
+ RegisterName(free_reg), range->TopLevel()->vreg(),
+ range->relative_id());
+ AssignRangeToRegister(free_reg, range);
+ return;
+ }
+
+ // We found a register to perform evictions, so we evict and allocate our
+ // candidate.
+ if (evictable_reg >= 0) {
+ TRACE("Found evictable register %s for live range %d:%d.\n",
+ RegisterName(free_reg), range->TopLevel()->vreg(),
+ range->relative_id());
+ EvictAndRescheduleConflicts(evictable_reg, range);
+ AssignRangeToRegister(evictable_reg, range);
+ return;
+ }
+
+ // The range needs to be split or spilled.
+ SplitOrSpillBlockedRange(range);
+}
+
+
+void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
+ const LiveRange* range) {
+ auto conflicts = current_allocations(reg_id)->GetConflicts(range);
+ for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
+ conflict = conflicts.RemoveCurrentAndGetNext()) {
+ DCHECK(conflict->HasRegisterAssigned());
+ CHECK(!conflict->TopLevel()->IsFixed());
+ conflict->UnsetAssignedRegister();
+ UnsetOperands(conflict, data());
+ UpdateWeightAtEviction(conflict);
+ scheduler().Schedule(conflict);
+ TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(),
+ conflict->relative_id());
+ }
+}
+
+
+void GreedyAllocator::AllocateRegisters() {
+ CHECK(scheduler().empty());
+ CHECK(allocations_.empty());
+
+ TRACE("Begin allocating function %s with the Greedy Allocator\n",
+ data()->debug_name());
+
+ SplitAndSpillRangesDefinedByMemoryOperand(true);
+ GroupLiveRanges();
+ ScheduleAllocationCandidates();
+ PreallocateFixedRanges();
+ while (!scheduler().empty()) {
+ AllocationCandidate candidate = scheduler().GetNext();
+ TryAllocateCandidate(candidate);
+ }
+
+ for (size_t i = 0; i < allocations_.size(); ++i) {
+ if (!allocations_[i]->empty()) {
+ data()->MarkAllocated(mode(), static_cast<int>(i));
+ }
+ }
+ allocations_.clear();
+
+ TryReuseSpillRangesForGroups();
+
+ TRACE("End allocating function %s with the Greedy Allocator\n",
+ data()->debug_name());
+}
+
+
+void GreedyAllocator::TryReuseSpillRangesForGroups() {
+ for (TopLevelLiveRange* top : data()->live_ranges()) {
+ if (!CanProcessRange(top) || !top->is_phi() || top->group() == nullptr) {
+ continue;
+ }
+
+ SpillRange* spill_range = nullptr;
+ for (LiveRange* member : top->group()->ranges()) {
+ if (!member->TopLevel()->HasSpillRange()) continue;
+ SpillRange* member_range = member->TopLevel()->GetSpillRange();
+ if (spill_range == nullptr) {
+ spill_range = member_range;
+ } else {
+ // This may not always succeed, because we group non-conflicting ranges
+ // that may have been splintered, and the splinters may cause conflicts
+ // in the spill ranges.
+ // TODO(mtrofin): should the splinters own their own spill ranges?
+ spill_range->TryMerge(member_range);
+ }
+ }
+ }
+}
+
+
+float GreedyAllocator::GetMaximumConflictingWeight(
+ unsigned reg_id, const LiveRange* range, float competing_weight) const {
+ float ret = LiveRange::kInvalidWeight;
+
+ auto conflicts = current_allocations(reg_id)->GetConflicts(range);
+ for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
+ conflict = conflicts.GetNext()) {
+ DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
+ if (competing_weight <= conflict->weight()) return LiveRange::kMaxWeight;
+ ret = Max(ret, conflict->weight());
+ DCHECK(ret < LiveRange::kMaxWeight);
+ }
+
+ return ret;
+}
+
+
+float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id,
+ const LiveRangeGroup* group,
+ float group_weight) const {
+ float ret = LiveRange::kInvalidWeight;
+
+ for (LiveRange* member : group->ranges()) {
+ float member_conflict_weight =
+ GetMaximumConflictingWeight(reg_id, member, group_weight);
+ if (member_conflict_weight == LiveRange::kMaxWeight) {
+ return LiveRange::kMaxWeight;
+ }
+ if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight;
+ ret = Max(member_conflict_weight, ret);
+ }
+
+ return ret;
+}
+
+
+void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
+ // The live range weight will be invalidated when ranges are created or split.
+ // Otherwise, it is consistently updated when the range is allocated or
+ // unallocated.
+ if (range->weight() != LiveRange::kInvalidWeight) return;
+
+ if (range->TopLevel()->IsFixed()) {
+ range->set_weight(LiveRange::kMaxWeight);
+ return;
+ }
+ if (!IsProgressPossible(range)) {
+ range->set_weight(LiveRange::kMaxWeight);
+ return;
+ }
+
+ float use_count = 0.0;
+ for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
+ ++use_count;
+ }
+ range->set_weight(use_count / static_cast<float>(range->GetSize()));
+}
+
+
+void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
+ LifetimePosition start = range->Start();
+ CHECK(range->CanBeSpilled(start));
+
+ DCHECK(range->NextRegisterPosition(start) == nullptr);
+ Spill(range);
+}
+
+
+LiveRange* GreedyAllocator::GetRemainderAfterSplittingAroundFirstCall(
+ LiveRange* range) {
+ LiveRange* ret = range;
+ for (UseInterval* interval = range->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ LifetimePosition start = interval->start();
+ LifetimePosition end = interval->end();
+ // If the interval starts at instruction end, then the first instruction
+ // in the interval is the next one.
+ int first_full_instruction = (start.IsGapPosition() || start.IsStart())
+ ? start.ToInstructionIndex()
+ : start.ToInstructionIndex() + 1;
+ // If the interval ends in a gap or at instruction start, then the last
+ // instruction is the previous one.
+ int last_full_instruction = (end.IsGapPosition() || end.IsStart())
+ ? end.ToInstructionIndex() - 1
+ : end.ToInstructionIndex();
+
+ for (int instruction_index = first_full_instruction;
+ instruction_index <= last_full_instruction; ++instruction_index) {
+ if (!code()->InstructionAt(instruction_index)->IsCall()) continue;
+
+ LifetimePosition before =
+ GetSplitPositionForInstruction(range, instruction_index);
+ LiveRange* second_part =
+ before.IsValid() ? Split(range, data(), before) : range;
+
+ if (range != second_part) scheduler().Schedule(range);
+
+ LifetimePosition after =
+ FindSplitPositionAfterCall(second_part, instruction_index);
+
+ if (after.IsValid()) {
+ ret = Split(second_part, data(), after);
+ } else {
+ ret = nullptr;
+ }
+ Spill(second_part);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+
+bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) {
+ bool modified = false;
+
+ while (range != nullptr) {
+ LiveRange* remainder = GetRemainderAfterSplittingAroundFirstCall(range);
+ // If we performed no modification, we're done.
+ if (remainder == range) {
+ break;
+ }
+ // We performed a modification.
+ modified = true;
+ range = remainder;
+ }
+ // If we have a remainder and we made modifications, it means the remainder
+ // has no calls and we should schedule it for further processing. If we made
+ // no modifications, we will just return false, because we want the algorithm
+ // to make progress by trying some other heuristic.
+ if (modified && range != nullptr) {
+ DCHECK(!range->spilled());
+ DCHECK(!range->HasRegisterAssigned());
+ scheduler().Schedule(range);
+ }
+ return modified;
+}
+
+
+LifetimePosition GreedyAllocator::FindSplitPositionAfterCall(
+ const LiveRange* range, int call_index) {
+ LifetimePosition after_call =
+ Max(range->Start(),
+ LifetimePosition::GapFromInstructionIndex(call_index + 1));
+ UsePosition* next_use = range->NextRegisterPosition(after_call);
+ if (!next_use) return LifetimePosition::Invalid();
+
+ LifetimePosition split_pos = FindOptimalSplitPos(after_call, next_use->pos());
+ split_pos =
+ GetSplitPositionForInstruction(range, split_pos.ToInstructionIndex());
+ return split_pos;
+}
+
+
+LifetimePosition GreedyAllocator::FindSplitPositionBeforeLoops(
+ LiveRange* range) {
+ LifetimePosition end = range->End();
+ if (end.ToInstructionIndex() >= code()->LastInstructionIndex()) {
+ end =
+ LifetimePosition::GapFromInstructionIndex(end.ToInstructionIndex() - 1);
+ }
+ LifetimePosition pos = FindOptimalSplitPos(range->Start(), end);
+ pos = GetSplitPositionForInstruction(range, pos.ToInstructionIndex());
+ return pos;
+}
+
+
+void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
+ if (TrySplitAroundCalls(range)) return;
+
+ LifetimePosition pos = FindSplitPositionBeforeLoops(range);
+
+ if (!pos.IsValid()) pos = GetLastResortSplitPosition(range);
+ if (pos.IsValid()) {
+ LiveRange* tail = Split(range, data(), pos);
+ DCHECK(tail != range);
+ scheduler().Schedule(tail);
+ scheduler().Schedule(range);
+ return;
+ }
+ SpillRangeAsLastResort(range);
+}
+
+
+// Basic heuristic for advancing the algorithm, if any other splitting heuristic
+// failed.
+LifetimePosition GreedyAllocator::GetLastResortSplitPosition(
+ const LiveRange* range) {
+ LifetimePosition previous = range->Start();
+ for (UsePosition *pos = range->NextRegisterPosition(previous); pos != nullptr;
+ previous = previous.NextFullStart(),
+ pos = range->NextRegisterPosition(previous)) {
+ LifetimePosition optimal = FindOptimalSplitPos(previous, pos->pos());
+ LifetimePosition before =
+ GetSplitPositionForInstruction(range, optimal.ToInstructionIndex());
+ if (before.IsValid()) return before;
+ LifetimePosition after = GetSplitPositionForInstruction(
+ range, pos->pos().ToInstructionIndex() + 1);
+ if (after.IsValid()) return after;
+ }
+ return LifetimePosition::Invalid();
+}
+
+
+bool GreedyAllocator::IsProgressPossible(const LiveRange* range) {
+ return range->CanBeSpilled(range->Start()) ||
+ GetLastResortSplitPosition(range).IsValid();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/greedy-allocator.h b/src/compiler/greedy-allocator.h
new file mode 100644
index 0000000..b61ba42
--- /dev/null
+++ b/src/compiler/greedy-allocator.h
@@ -0,0 +1,199 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_GREEDY_ALLOCATOR_H_
+#define V8_GREEDY_ALLOCATOR_H_
+
+#include "src/compiler/coalesced-live-ranges.h"
+#include "src/compiler/register-allocator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// The object of allocation scheduling. At minimum, this is a LiveRange, but
+// we may extend this to groups of LiveRanges. It has to be comparable.
+class AllocationCandidate {
+ public:
+ explicit AllocationCandidate(LiveRange* range)
+ : is_group_(false), size_(range->GetSize()) {
+ candidate_.range_ = range;
+ }
+
+ explicit AllocationCandidate(LiveRangeGroup* ranges)
+ : is_group_(true), size_(CalculateGroupSize(ranges)) {
+ candidate_.group_ = ranges;
+ }
+
+ // Strict ordering operators
+ bool operator<(const AllocationCandidate& other) const {
+ return size() < other.size();
+ }
+
+ bool operator>(const AllocationCandidate& other) const {
+ return size() > other.size();
+ }
+
+ bool is_group() const { return is_group_; }
+ LiveRange* live_range() const { return candidate_.range_; }
+ LiveRangeGroup* group() const { return candidate_.group_; }
+
+ private:
+ unsigned CalculateGroupSize(LiveRangeGroup* group) {
+ unsigned ret = 0;
+ for (LiveRange* range : group->ranges()) {
+ ret += range->GetSize();
+ }
+ return ret;
+ }
+
+ unsigned size() const { return size_; }
+ bool is_group_;
+ unsigned size_;
+ union {
+ LiveRange* range_;
+ LiveRangeGroup* group_;
+ } candidate_;
+};
+
+
+// Schedule processing (allocating) of AllocationCandidates.
+class AllocationScheduler final : ZoneObject {
+ public:
+ explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
+ void Schedule(LiveRange* range);
+ void Schedule(LiveRangeGroup* group);
+ AllocationCandidate GetNext();
+ bool empty() const { return queue_.empty(); }
+
+ private:
+ typedef ZonePriorityQueue<AllocationCandidate> ScheduleQueue;
+ ScheduleQueue queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
+};
+
+
+// A variant of the LLVM Greedy Register Allocator. See
+// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
+class GreedyAllocator final : public RegisterAllocator {
+ public:
+ explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
+ Zone* local_zone);
+
+ void AllocateRegisters();
+
+ private:
+ static const float kAllocatedRangeMultiplier;
+
+ static void UpdateWeightAtAllocation(LiveRange* range) {
+ DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
+ range->set_weight(range->weight() * kAllocatedRangeMultiplier);
+ }
+
+
+ static void UpdateWeightAtEviction(LiveRange* range) {
+ DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
+ range->set_weight(range->weight() / kAllocatedRangeMultiplier);
+ }
+
+ AllocationScheduler& scheduler() { return scheduler_; }
+ CoalescedLiveRanges* current_allocations(unsigned i) {
+ return allocations_[i];
+ }
+
+ CoalescedLiveRanges* current_allocations(unsigned i) const {
+ return allocations_[i];
+ }
+
+ Zone* local_zone() const { return local_zone_; }
+ ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
+ const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
+
+ // Insert fixed ranges.
+ void PreallocateFixedRanges();
+
+ void GroupLiveRanges();
+
+ // Schedule unassigned live ranges for allocation.
+ void ScheduleAllocationCandidates();
+
+ void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
+ UpdateWeightAtAllocation(range);
+ current_allocations(reg_id)->AllocateRange(range);
+ }
+ // Evict and reschedule conflicts of a given range, at a given register.
+ void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
+
+ void TryAllocateCandidate(const AllocationCandidate& candidate);
+ void TryAllocateLiveRange(LiveRange* range);
+ void TryAllocateGroup(LiveRangeGroup* group);
+
+ // Calculate the weight of a candidate for allocation.
+ void EnsureValidRangeWeight(LiveRange* range);
+
+ // Calculate the new weight of a range that is about to be allocated.
+ float GetAllocatedRangeWeight(float candidate_weight);
+
+ // Returns kInvalidWeight if there are no conflicts, or the largest weight of
+ // a range conflicting with the given range, at the given register.
+ float GetMaximumConflictingWeight(unsigned reg_id, const LiveRange* range,
+ float competing_weight) const;
+
+ // Returns kInvalidWeight if there are no conflicts, or the largest weight of
+ // a range conflicting with the given range, at the given register.
+ float GetMaximumConflictingWeight(unsigned reg_id,
+ const LiveRangeGroup* group,
+ float group_weight) const;
+
+ // This is the extension point for splitting heuristics.
+ void SplitOrSpillBlockedRange(LiveRange* range);
+
+ // Find a good position where to fill, after a range was spilled after a call.
+ LifetimePosition FindSplitPositionAfterCall(const LiveRange* range,
+ int call_index);
+ // Split a range around all calls it passes over. Returns true if any changes
+ // were made, or false if no calls were found.
+ bool TrySplitAroundCalls(LiveRange* range);
+
+ // Find a split position at the outmost loop.
+ LifetimePosition FindSplitPositionBeforeLoops(LiveRange* range);
+
+ // Finds the first call instruction in the path of this range. Splits before
+ // and requeues that segment (if any), spills the section over the call, and
+ // returns the section after the call. The return is:
+ // - same range, if no call was found
+ // - nullptr, if the range finished at the call and there's no "after the
+ // call" portion.
+ // - the portion after the call.
+ LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
+
+ // While we attempt to merge spill ranges later on in the allocation pipeline,
+ // we want to ensure group elements get merged. Waiting until later may hinder
+ // merge-ability, since the pipeline merger (being naive) may create conflicts
+ // between spill ranges of group members.
+ void TryReuseSpillRangesForGroups();
+
+ LifetimePosition GetLastResortSplitPosition(const LiveRange* range);
+
+ bool IsProgressPossible(const LiveRange* range);
+
+ // Necessary heuristic: spill when all else failed.
+ void SpillRangeAsLastResort(LiveRange* range);
+
+ void AssignRangeToRegister(int reg_id, LiveRange* range);
+
+ Zone* local_zone_;
+ ZoneVector<CoalescedLiveRanges*> allocations_;
+ AllocationScheduler scheduler_;
+ ZoneVector<LiveRangeGroup*> groups_;
+
+ DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
+};
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+#endif // V8_GREEDY_ALLOCATOR_H_
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index 55f7426..f63bc22 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -4,13 +4,14 @@
#include "src/compiler/code-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/osr.h"
#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/frames-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -19,15 +20,20 @@
#define __ masm()->
+#define kScratchDoubleReg xmm0
+
+
// Adds IA-32 specific methods for decoding operands.
class IA32OperandConverter : public InstructionOperandConverter {
public:
IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+ Operand InputOperand(size_t index, int extra = 0) {
+ return ToOperand(instr_->InputAt(index), extra);
+ }
- Immediate InputImmediate(int index) {
+ Immediate InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
@@ -42,8 +48,15 @@
return Operand(ToDoubleRegister(op));
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
+ return Operand(offset.from_stack_pointer() ? esp : ebp,
+ offset.offset() + extra);
+ }
+
+ Operand ToMaterializableOperand(int materializable_offset) {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ Frame::FPOffsetToSlot(materializable_offset));
return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
}
@@ -76,8 +89,8 @@
return Immediate(-1);
}
- static int NextOffset(int* offset) {
- int i = *offset;
+ static size_t NextOffset(size_t* offset) {
+ size_t i = *offset;
(*offset)++;
return i;
}
@@ -92,7 +105,7 @@
return static_cast<ScaleFactor>(scale);
}
- Operand MemoryOperand(int* offset) {
+ Operand MemoryOperand(size_t* offset) {
AddressingMode mode = AddressingModeField::decode(instr_->opcode());
switch (mode) {
case kMode_MR: {
@@ -155,7 +168,7 @@
return Operand(no_reg, 0);
}
- Operand MemoryOperand(int first_input = 0) {
+ Operand MemoryOperand(size_t first_input = 0) {
return MemoryOperand(&first_input);
}
};
@@ -163,42 +176,42 @@
namespace {
-bool HasImmediateInput(Instruction* instr, int index) {
+bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+class OutOfLineLoadInteger final : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL { __ xor_(result_, result_); }
+ void Generate() final { __ xor_(result_, result_); }
private:
Register const result_;
};
-class OutOfLineLoadFloat FINAL : public OutOfLineCode {
+class OutOfLineLoadFloat final : public OutOfLineCode {
public:
OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL { __ pcmpeqd(result_, result_); }
+ void Generate() final { __ pcmpeqd(result_, result_); }
private:
XMMRegister const result_;
};
-class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
+class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
XMMRegister input)
: OutOfLineCode(gen), result_(result), input_(input) {}
- void Generate() FINAL {
+ void Generate() final {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(MemOperand(esp, 0), input_);
__ SlowTruncateToI(result_, esp, 0);
@@ -210,6 +223,46 @@
XMMRegister const input_;
};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ operand_(operand),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ lea(scratch1_, operand_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Operand const operand_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
} // namespace
@@ -279,6 +332,28 @@
} while (false)
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ add(esp, Immediate(sp_slot_delta * kPointerSize));
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ mov(ebp, MemOperand(ebp, 0));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
IA32OperandConverter i(this, instr);
@@ -291,9 +366,25 @@
__ call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(reg);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ jmp(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(reg);
+ }
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -305,21 +396,81 @@
__ Assert(equal, kWrongFunctionContext);
}
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
+ __ Assert(equal, kWrongFunctionContext);
+ }
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), esp);
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), ebp);
+ break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
@@ -330,6 +481,24 @@
__ bind(ool->exit());
break;
}
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ Register value = i.InputRegister(index);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
+ scratch0, scratch1, mode);
+ __ mov(operand, value);
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ not_zero, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kIA32Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -434,6 +603,63 @@
__ ror_cl(i.OutputOperand());
}
break;
+ case kIA32Lzcnt:
+ __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kIA32Tzcnt:
+ __ Tzcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kIA32Popcnt:
+ __ Popcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kSSEFloat32Cmp:
+ __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat32Add:
+ __ addss(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat32Sub:
+ __ subss(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat32Mul:
+ __ mulss(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat32Div:
+ __ divss(i.InputDoubleRegister(0), i.InputOperand(1));
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a (v)mulss depending on the result.
+ __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ break;
+ case kSSEFloat32Max:
+ __ maxss(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat32Min:
+ __ minss(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat32Sqrt:
+ __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
+ break;
+ case kSSEFloat32Abs: {
+ // TODO(bmeurer): Use 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 33);
+ __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kSSEFloat32Neg: {
+ // TODO(bmeurer): Use 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 31);
+ __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kSSEFloat32Round: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ break;
+ }
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
@@ -448,6 +674,15 @@
break;
case kSSEFloat64Div:
__ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a (v)mulsd depending on the result.
+ __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ break;
+ case kSSEFloat64Max:
+ __ maxsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat64Min:
+ __ minsd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
case kSSEFloat64Mod: {
// TODO(dcarney): alignment is wrong.
@@ -474,41 +709,43 @@
__ add(esp, Immediate(kDoubleSize));
break;
}
+ case kSSEFloat64Abs: {
+ // TODO(bmeurer): Use 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 1);
+ __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kSSEFloat64Neg: {
+ // TODO(bmeurer): Use 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+ __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
case kSSEFloat64Sqrt:
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat64Floor: {
+ case kSSEFloat64Round: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundDown);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
- case kSSEFloat64Ceil: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundUp);
- break;
- }
- case kSSEFloat64RoundTruncate: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundToZero);
- break;
- }
- case kSSECvtss2sd:
+ case kSSEFloat32ToFloat64:
__ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSECvtsd2ss:
+ case kSSEFloat64ToFloat32:
__ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat64ToInt32:
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
case kSSEFloat64ToUint32: {
- XMMRegister scratch = xmm0;
- __ Move(scratch, -2147483648.0);
- __ addsd(scratch, i.InputOperand(0));
- __ cvttsd2si(i.OutputRegister(), scratch);
+ __ Move(kScratchDoubleReg, -2147483648.0);
+ __ addsd(kScratchDoubleReg, i.InputOperand(0));
+ __ cvttsd2si(i.OutputRegister(), kScratchDoubleReg);
__ add(i.OutputRegister(), Immediate(0x80000000));
break;
}
@@ -518,6 +755,68 @@
case kSSEUint32ToFloat64:
__ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
break;
+ case kSSEFloat64ExtractLowWord32:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ mov(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kSSEFloat64ExtractHighWord32:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
+ } else {
+ __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
+ }
+ break;
+ case kSSEFloat64InsertLowWord32:
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
+ break;
+ case kSSEFloat64InsertHighWord32:
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
+ break;
+ case kSSEFloat64LoadLowWord32:
+ __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
+ break;
+ case kAVXFloat32Add: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kAVXFloat32Sub: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kAVXFloat32Mul: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kAVXFloat32Div: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a (v)mulss depending on the result.
+ __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ break;
+ }
+ case kAVXFloat32Max: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmaxss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kAVXFloat32Min: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vminss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
case kAVXFloat64Add: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -540,6 +839,53 @@
CpuFeatureScope avx_scope(masm(), AVX);
__ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a (v)mulsd depending on the result.
+ __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ break;
+ }
+ case kAVXFloat64Max: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmaxsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kAVXFloat64Min: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vminsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kAVXFloat32Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 33);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ break;
+ }
+ case kAVXFloat32Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 31);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ break;
+ }
+ case kAVXFloat64Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 1);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ break;
+ }
+ case kAVXFloat64Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
case kIA32Movsxbl:
@@ -549,7 +895,7 @@
__ movzx_b(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32Movb: {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov_b(operand, i.InputInt8(index));
@@ -565,7 +911,7 @@
__ movzx_w(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32Movw: {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov_w(operand, i.InputInt16(index));
@@ -578,7 +924,7 @@
if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov(operand, i.InputImmediate(index));
@@ -591,7 +937,7 @@
if (instr->HasOutput()) {
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ movsd(operand, i.InputDoubleRegister(index));
}
@@ -600,11 +946,25 @@
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ movss(operand, i.InputDoubleRegister(index));
}
break;
+ case kIA32BitcastFI:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ mov(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kIA32BitcastIF:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
case kIA32Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
@@ -640,22 +1000,60 @@
}
break;
}
- case kIA32Push:
- if (HasImmediateInput(instr, 0)) {
- __ push(i.InputImmediate(0));
+ case kIA32PushFloat32:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movss(Operand(esp, 0), i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else if (HasImmediateInput(instr, 0)) {
+ __ Move(kScratchDoubleReg, i.InputDouble(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movss(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
- __ push(i.InputOperand(0));
+ __ movsd(kScratchDoubleReg, i.InputOperand(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movss(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
}
break;
- case kIA32StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ mov(Operand(object, index, times_1, 0), value);
- __ lea(index, Operand(object, index, times_1, 0));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- __ RecordWrite(object, index, value, mode);
+ case kIA32PushFloat64:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else if (HasImmediateInput(instr, 0)) {
+ __ Move(kScratchDoubleReg, i.InputDouble(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ movsd(kScratchDoubleReg, i.InputOperand(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ }
+ break;
+ case kIA32Push:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else if (HasImmediateInput(instr, 0)) {
+ __ push(i.InputImmediate(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ } else {
+ __ push(i.InputOperand(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
+ break;
+ case kIA32Poke: {
+ int const slot = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
+ } else {
+ __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
+ }
break;
}
case kCheckedLoadInt8:
@@ -694,8 +1092,18 @@
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
break;
+ case kIA32StackCheck: {
+ ExternalReference const stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ break;
+ }
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord64:
+ UNREACHABLE(); // currently unsupported checked int64 load/store.
+ break;
}
-}
+} // NOLINT(readability/fn_size)
// Assembles a branch after an instruction.
@@ -730,27 +1138,15 @@
case kSignedGreaterThan:
__ j(greater, tlabel);
break;
- case kUnorderedLessThan:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kUnsignedLessThan:
__ j(below, tlabel);
break;
- case kUnorderedGreaterThanOrEqual:
- __ j(parity_even, tlabel);
- // Fall through.
case kUnsignedGreaterThanOrEqual:
__ j(above_equal, tlabel);
break;
- case kUnorderedLessThanOrEqual:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kUnsignedLessThanOrEqual:
__ j(below_equal, tlabel);
break;
- case kUnorderedGreaterThan:
- __ j(parity_even, tlabel);
- // Fall through.
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
@@ -760,13 +1156,16 @@
case kNotOverflow:
__ j(no_overflow, tlabel);
break;
+ default:
+ UNREACHABLE();
+ break;
}
// Add a jump if not falling through to the next block.
if (!branch->fallthru) __ jmp(flabel);
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
@@ -780,7 +1179,7 @@
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label check;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
@@ -812,35 +1211,15 @@
case kSignedGreaterThan:
cc = greater;
break;
- case kUnorderedLessThan:
- __ j(parity_odd, &check, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedLessThan:
cc = below;
break;
- case kUnorderedGreaterThanOrEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ mov(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedGreaterThanOrEqual:
cc = above_equal;
break;
- case kUnorderedLessThanOrEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedLessThanOrEqual:
cc = below_equal;
break;
- case kUnorderedGreaterThan:
- __ j(parity_odd, &check, Label::kNear);
- __ mov(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedGreaterThan:
cc = above;
break;
@@ -850,6 +1229,9 @@
case kNotOverflow:
cc = no_overflow;
break;
+ default:
+ UNREACHABLE();
+ break;
}
__ bind(&check);
if (reg.is_byte_register()) {
@@ -869,9 +1251,36 @@
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ IA32OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmp(input, Immediate(i.InputInt32(index + 0)));
+ __ j(equal, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ IA32OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (size_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ cmp(input, Immediate(case_count));
+ __ j(above_equal, GetLabel(i.InputRpo(1)));
+ __ jmp(Operand::JumpTable(input, times_4, table));
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
@@ -1006,77 +1415,91 @@
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- Frame* frame = this->frame();
- int stack_slots = frame->GetSpillSlotCount();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
// Assemble a prologue similar the to cdecl calling convention.
__ push(ebp);
__ mov(ebp, esp);
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- int register_save_area_size = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- __ push(Register::from_code(i));
- register_save_area_size += kPointerSize;
- }
- frame->SetRegisterSaveAreaSize(register_save_area_size);
- }
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- frame->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ // TODO(turbofan): this prologue is redundant with OSR, but still needed for
+ // code aging.
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
- frame->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
- if (stack_slots > 0) {
- __ sub(esp, Immediate(stack_slots * kPointerSize));
+ frame_access_state()->SetFrameAccessToDefault();
+
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (stack_shrink_slots > 0) {
+ __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
+ }
+
+ if (saves != 0) { // Save callee-saved registers.
+ DCHECK(!info()->is_osr());
+ int pushed = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ __ push(Register::from_code(i));
+ ++pushed;
+ }
+ frame()->AllocateSavedCalleeRegisterSlots(pushed);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ add(esp, Immediate(stack_slots * kPointerSize));
- }
- // Restore registers.
- if (saves != 0) {
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (!((1 << i) & saves)) continue;
- __ pop(Register::from_code(i));
- }
- }
- __ pop(ebp); // Pop caller's frame pointer.
- __ ret(0);
- } else {
- // No saved registers.
- __ mov(esp, ebp); // Move stack pointer back to frame pointer.
- __ pop(ebp); // Pop caller's frame pointer.
- __ ret(0);
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ // Restore registers.
+ if (saves != 0) {
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ if (!((1 << i) & saves)) continue;
+ __ pop(Register::from_code(i));
}
- } else {
+ }
+
+ if (descriptor->IsCFunctionCall()) {
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ ret(pop_count * kPointerSize);
+ } else if (frame()->needs_frame()) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
+ }
}
+ size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ // Might need ecx for scratch if pop_size is too big.
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
+ __ Ret(static_cast<int>(pop_size), ecx);
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- IA32OperandConverter g(this, NULL);
+ IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1099,7 +1522,18 @@
Constant src_constant = g.ToConstant(source);
if (src_constant.type() == Constant::kHeapObject) {
Handle<HeapObject> src = src_constant.ToHeapObject();
- if (destination->IsRegister()) {
+ int offset;
+ if (IsMaterializableFromFrame(src, &offset)) {
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mov(dst, g.ToMaterializableOperand(offset));
+ } else {
+ DCHECK(destination->IsStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ push(g.ToMaterializableOperand(offset));
+ __ pop(dst);
+ }
+ } else if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ LoadHeapObject(dst, src);
} else {
@@ -1163,10 +1597,9 @@
XMMRegister dst = g.ToDoubleRegister(destination);
__ movsd(dst, src);
} else {
- // We rely on having xmm0 available as a fixed scratch register.
Operand dst = g.ToOperand(destination);
- __ movsd(xmm0, src);
- __ movsd(dst, xmm0);
+ __ movsd(kScratchDoubleReg, src);
+ __ movsd(dst, kScratchDoubleReg);
}
} else {
UNREACHABLE();
@@ -1176,53 +1609,64 @@
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- IA32OperandConverter g(this, NULL);
+ IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
Register src = g.ToRegister(source);
Register dst = g.ToRegister(destination);
- __ xchg(dst, src);
+ __ push(src);
+ __ mov(src, dst);
+ __ pop(dst);
} else if (source->IsRegister() && destination->IsStackSlot()) {
// Register-memory.
- __ xchg(g.ToRegister(source), g.ToOperand(destination));
+ Register src = g.ToRegister(source);
+ __ push(src);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand dst = g.ToOperand(destination);
+ __ mov(src, dst);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
+ __ pop(dst);
} else if (source->IsStackSlot() && destination->IsStackSlot()) {
// Memory-memory.
- Operand src = g.ToOperand(source);
- Operand dst = g.ToOperand(destination);
- __ push(dst);
- __ push(src);
- __ pop(dst);
- __ pop(src);
+ Operand dst1 = g.ToOperand(destination);
+ __ push(dst1);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand src1 = g.ToOperand(source);
+ __ push(src1);
+ Operand dst2 = g.ToOperand(destination);
+ __ pop(dst2);
+ frame_access_state()->IncreaseSPDelta(-1);
+ Operand src2 = g.ToOperand(source);
+ __ pop(src2);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- // XMM register-register swap. We rely on having xmm0
- // available as a fixed scratch register.
+ // XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(xmm0, src);
+ __ movaps(kScratchDoubleReg, src);
__ movaps(src, dst);
- __ movaps(dst, xmm0);
+ __ movaps(dst, kScratchDoubleReg);
} else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
- // XMM register-memory swap. We rely on having xmm0
- // available as a fixed scratch register.
+ // XMM register-memory swap.
XMMRegister reg = g.ToDoubleRegister(source);
Operand other = g.ToOperand(destination);
- __ movsd(xmm0, other);
+ __ movsd(kScratchDoubleReg, other);
__ movsd(other, reg);
- __ movaps(reg, xmm0);
+ __ movaps(reg, kScratchDoubleReg);
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
// Double-width memory-to-memory.
Operand src0 = g.ToOperand(source);
Operand src1 = g.HighOperand(source);
Operand dst0 = g.ToOperand(destination);
Operand dst1 = g.HighOperand(destination);
- __ movsd(xmm0, dst0); // Save destination in xmm0.
- __ push(src0); // Then use stack to copy source to destination.
+ __ movsd(kScratchDoubleReg, dst0); // Save destination in scratch register.
+ __ push(src0); // Then use stack to copy source to destination.
__ pop(dst0);
__ push(src1);
__ pop(dst1);
- __ movsd(src0, xmm0);
+ __ movsd(src0, kScratchDoubleReg);
} else {
// No other combinations are possible.
UNREACHABLE();
@@ -1230,21 +1674,29 @@
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ dd(targets[index]);
+ }
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
void CodeGenerator::EnsureSpaceForLazyDeopt() {
- int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
}
- MarkLazyDeoptSite();
+
+ int space_needed = Deoptimizer::patch_size();
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
}
#undef __
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index ec9fd18..816487d 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -30,26 +30,59 @@
V(IA32Shr) \
V(IA32Sar) \
V(IA32Ror) \
+ V(IA32Lzcnt) \
+ V(IA32Tzcnt) \
+ V(IA32Popcnt) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat32Abs) \
+ V(SSEFloat32Neg) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
+ V(SSEFloat64Max) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64Abs) \
+ V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \
- V(SSEFloat64Floor) \
- V(SSEFloat64Ceil) \
- V(SSEFloat64RoundTruncate) \
- V(SSECvtss2sd) \
- V(SSECvtsd2ss) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat64ToFloat32) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat32Max) \
+ V(AVXFloat32Min) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
+ V(AVXFloat64Max) \
+ V(AVXFloat64Min) \
+ V(AVXFloat64Abs) \
+ V(AVXFloat64Neg) \
+ V(AVXFloat32Abs) \
+ V(AVXFloat32Neg) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
@@ -59,9 +92,14 @@
V(IA32Movl) \
V(IA32Movss) \
V(IA32Movsd) \
+ V(IA32BitcastFI) \
+ V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
- V(IA32StoreWriteBarrier)
+ V(IA32PushFloat32) \
+ V(IA32PushFloat64) \
+ V(IA32Poke) \
+ V(IA32StackCheck)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
new file mode 100644
index 0000000..0a8fcac
--- /dev/null
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -0,0 +1,135 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kIA32Add:
+ case kIA32And:
+ case kIA32Cmp:
+ case kIA32Test:
+ case kIA32Or:
+ case kIA32Xor:
+ case kIA32Sub:
+ case kIA32Imul:
+ case kIA32ImulHigh:
+ case kIA32UmulHigh:
+ case kIA32Idiv:
+ case kIA32Udiv:
+ case kIA32Not:
+ case kIA32Neg:
+ case kIA32Shl:
+ case kIA32Shr:
+ case kIA32Sar:
+ case kIA32Ror:
+ case kIA32Lzcnt:
+ case kIA32Tzcnt:
+ case kIA32Popcnt:
+ case kIA32Lea:
+ case kSSEFloat32Cmp:
+ case kSSEFloat32Add:
+ case kSSEFloat32Sub:
+ case kSSEFloat32Mul:
+ case kSSEFloat32Div:
+ case kSSEFloat32Max:
+ case kSSEFloat32Min:
+ case kSSEFloat32Abs:
+ case kSSEFloat32Neg:
+ case kSSEFloat32Sqrt:
+ case kSSEFloat32Round:
+ case kSSEFloat64Cmp:
+ case kSSEFloat64Add:
+ case kSSEFloat64Sub:
+ case kSSEFloat64Mul:
+ case kSSEFloat64Div:
+ case kSSEFloat64Mod:
+ case kSSEFloat64Max:
+ case kSSEFloat64Min:
+ case kSSEFloat64Abs:
+ case kSSEFloat64Neg:
+ case kSSEFloat64Sqrt:
+ case kSSEFloat64Round:
+ case kSSEFloat32ToFloat64:
+ case kSSEFloat64ToFloat32:
+ case kSSEFloat64ToInt32:
+ case kSSEFloat64ToUint32:
+ case kSSEInt32ToFloat64:
+ case kSSEUint32ToFloat64:
+ case kSSEFloat64ExtractLowWord32:
+ case kSSEFloat64ExtractHighWord32:
+ case kSSEFloat64InsertLowWord32:
+ case kSSEFloat64InsertHighWord32:
+ case kSSEFloat64LoadLowWord32:
+ case kAVXFloat32Add:
+ case kAVXFloat32Sub:
+ case kAVXFloat32Mul:
+ case kAVXFloat32Div:
+ case kAVXFloat32Max:
+ case kAVXFloat32Min:
+ case kAVXFloat64Add:
+ case kAVXFloat64Sub:
+ case kAVXFloat64Mul:
+ case kAVXFloat64Div:
+ case kAVXFloat64Max:
+ case kAVXFloat64Min:
+ case kAVXFloat64Abs:
+ case kAVXFloat64Neg:
+ case kAVXFloat32Abs:
+ case kAVXFloat32Neg:
+ case kIA32BitcastFI:
+ case kIA32BitcastIF:
+ return (instr->addressing_mode() == kMode_None)
+ ? kNoOpcodeFlags
+ : kIsLoadOperation | kHasSideEffect;
+
+ case kIA32Movsxbl:
+ case kIA32Movzxbl:
+ case kIA32Movb:
+ case kIA32Movsxwl:
+ case kIA32Movzxwl:
+ case kIA32Movw:
+ case kIA32Movl:
+ case kIA32Movss:
+ case kIA32Movsd:
+ // Moves are used for memory load/store operations.
+ return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
+
+ case kIA32StackCheck:
+ return kIsLoadOperation;
+
+ case kIA32Push:
+ case kIA32PushFloat32:
+ case kIA32PushFloat64:
+ case kIA32Poke:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 16063ab..0906452 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -2,25 +2,31 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/adapters.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
namespace compiler {
// Adds IA32-specific methods for generating operands.
-class IA32OperandGenerator FINAL : public OperandGenerator {
+class IA32OperandGenerator final : public OperandGenerator {
public:
explicit IA32OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* UseByteRegister(Node* node) {
- // TODO(dcarney): relax constraint.
+ InstructionOperand UseByteRegister(Node* node) {
+ // TODO(titzer): encode byte register use constraints.
return UseFixed(node, edx);
}
+ InstructionOperand DefineAsByteRegister(Node* node) {
+ // TODO(titzer): encode byte register def constraints.
+ return DefineAsRegister(node);
+ }
+
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
@@ -30,8 +36,9 @@
case IrOpcode::kHeapConstant: {
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
- Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
- return !isolate()->heap()->InNewSpace(*value.handle());
+ Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
+ Isolate* isolate = value->GetIsolate();
+ return !isolate->heap()->InNewSpace(*value);
}
default:
return false;
@@ -40,21 +47,21 @@
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
Node* displacement_node,
- InstructionOperand* inputs[],
+ InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
- int32_t displacement = (displacement_node == NULL)
+ int32_t displacement = (displacement_node == nullptr)
? 0
: OpParameter<int32_t>(displacement_node);
- if (base != NULL) {
+ if (base != nullptr) {
if (base->opcode() == IrOpcode::kInt32Constant) {
displacement += OpParameter<int32_t>(base);
- base = NULL;
+ base = nullptr;
}
}
- if (base != NULL) {
+ if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
- if (index != NULL) {
+ if (index != nullptr) {
DCHECK(scale >= 0 && scale <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
@@ -77,7 +84,7 @@
}
} else {
DCHECK(scale >= 0 && scale <= 3);
- if (index != NULL) {
+ if (index != nullptr) {
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
@@ -98,11 +105,11 @@
}
AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
- InstructionOperand* inputs[],
+ InstructionOperand inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement32Matcher m(node, true);
DCHECK(m.matches());
- if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+ if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
@@ -118,47 +125,81 @@
};
-static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+namespace {
+
+void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void VisitRR(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
IA32OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+void VisitRROFloat(InstructionSelector* selector, Node* node,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.Use(node->InputAt(1));
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
+ }
+}
- ArchOpcode opcode;
- // TODO(titzer): signed/unsigned small loads
- switch (rep) {
- case kRepFloat32:
+
+void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ IA32OperandGenerator g(selector);
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
+ }
+}
+
+
+} // namespace
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kIA32Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
IA32OperandGenerator g(this);
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand* inputs[3];
+ InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
@@ -173,94 +214,128 @@
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
- Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
- g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
- temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kIA32Movss;
- break;
- case kRepFloat64:
- opcode = kIA32Movsd;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kIA32Movb;
- break;
- case kRepWord16:
- opcode = kIA32Movw;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kIA32Movl;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- InstructionOperand* val;
- if (g.CanBeImmediate(value)) {
- val = g.UseImmediate(value);
- } else if (rep == kRepWord8 || rep == kRepBit) {
- val = g.UseByteRegister(value);
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- val = g.UseRegister(value);
- }
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kIA32Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kIA32Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kIA32Movb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kIA32Movw;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kIA32Movl;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
- InstructionOperand* inputs[4];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
+ InstructionOperand val;
+ if (g.CanBeImmediate(value)) {
+ val = g.UseImmediate(value);
+ } else if (rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code =
+ opcode | AddressingModeField::encode(addressing_mode);
+ inputs[input_count++] = val;
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+ inputs);
+ }
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
IA32OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.UseRegister(offset);
- InstructionOperand* length_operand =
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
if (g.CanBeImmediate(buffer)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
@@ -275,47 +350,51 @@
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
IA32OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* value_operand =
- g.CanBeImmediate(value)
- ? g.UseImmediate(value)
- : ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
- : g.UseRegister(value));
- InstructionOperand* offset_operand = g.UseRegister(offset);
- InstructionOperand* length_operand =
+ InstructionOperand value_operand =
+ g.CanBeImmediate(value) ? g.UseImmediate(value)
+ : ((rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit)
+ ? g.UseByteRegister(value)
+ : g.UseRegister(value));
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
offset_operand, length_operand, value_operand, offset_operand,
g.UseImmediate(buffer));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
+ Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
offset_operand, length_operand, value_operand, g.UseRegister(buffer),
offset_operand);
}
@@ -329,9 +408,9 @@
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
@@ -343,7 +422,7 @@
// mov eax, [ebp-0x10]
// add eax, [ebp-0x10]
// jo label
- InstructionOperand* const input = g.UseRegister(left);
+ InstructionOperand const input = g.UseRegister(left);
inputs[input_count++] = input;
inputs[input_count++] = input;
} else if (g.CanBeImmediate(right)) {
@@ -365,18 +444,16 @@
outputs[output_count++] = g.DefineSameAsFirst(node);
if (cont->IsSet()) {
- // TODO(turbofan): Use byte register here.
- outputs[output_count++] = g.DefineAsRegister(cont->result());
+ outputs[output_count++] = g.DefineAsByteRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -439,7 +516,7 @@
void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand* temps[] = {g.TempRegister(edx)};
+ InstructionOperand temps[] = {g.TempRegister(edx)};
selector->Emit(opcode, g.DefineAsFixed(node, eax),
g.UseFixed(node->InputAt(0), eax),
g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
@@ -456,15 +533,15 @@
void EmitLea(InstructionSelector* selector, Node* result, Node* index,
int scale, Node* base, Node* displacement) {
IA32OperandGenerator g(selector);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
index, scale, base, displacement, inputs, &input_count);
- DCHECK_NE(0, static_cast<int>(input_count));
+ DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(result);
InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
@@ -479,8 +556,8 @@
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
return;
}
VisitShift(this, node, kIA32Shl);
@@ -502,22 +579,40 @@
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
// Try to match the Add to a lea pattern
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
- (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
- InstructionOperand* inputs[4];
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
+ InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
- DCHECK_NE(0, static_cast<int>(input_count));
+ DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
@@ -545,8 +640,8 @@
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
return;
}
IA32OperandGenerator g(this);
@@ -596,119 +691,202 @@
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat32ToFloat64);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEInt32ToFloat64);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEUint32ToFloat64);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat64ToInt32);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- IA32OperandGenerator g(this);
- Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat64ToUint32);
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ VisitRO(this, node, kSSEFloat64ToFloat32);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, node, kArchTruncateDoubleToI);
+ case TruncationMode::kRoundToZero:
+ return VisitRO(this, node, kSSEFloat64ToInt32);
+ }
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
IA32OperandGenerator g(this);
- Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ Emit(kIA32BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRROFloat(this, node, kAVXFloat32Add, kSSEFloat32Add);
}
void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRROFloat(this, node, kAVXFloat64Add, kSSEFloat64Add);
+}
+
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
IA32OperandGenerator g(this);
- if (IsSupported(AVX)) {
- Emit(kAVXFloat64Add, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- } else {
- Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ Float32BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
+ kSSEFloat32Neg);
+ return;
}
+ VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
IA32OperandGenerator g(this);
- if (IsSupported(AVX)) {
- Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- } else {
- Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ if (m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
+ g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
+ VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
+ kSSEFloat64Neg);
+ return;
}
+ VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
+}
+
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRROFloat(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
- IA32OperandGenerator g(this);
- if (IsSupported(AVX)) {
- Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- } else {
- Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- }
+ VisitRROFloat(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
+}
+
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRROFloat(this, node, kAVXFloat32Div, kSSEFloat32Div);
}
void InstructionSelector::VisitFloat64Div(Node* node) {
- IA32OperandGenerator g(this);
- if (IsSupported(AVX)) {
- Emit(kAVXFloat64Div, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- } else {
- Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- }
+ VisitRROFloat(this, node, kAVXFloat64Div, kSSEFloat64Div);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand* temps[] = {g.TempRegister(eax)};
+ InstructionOperand temps[] = {g.TempRegister(eax)};
Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
temps);
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ VisitRROFloat(this, node, kAVXFloat32Max, kSSEFloat32Max);
+}
+
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ VisitRROFloat(this, node, kAVXFloat64Max, kSSEFloat64Max);
+}
+
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ VisitRROFloat(this, node, kAVXFloat32Min, kSSEFloat32Min);
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ VisitRROFloat(this, node, kAVXFloat64Min, kSSEFloat64Min);
+}
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
IA32OperandGenerator g(this);
- Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Floor, node);
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ IA32OperandGenerator g(this);
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Ceil, node);
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRO(this, node, kSSEFloat32Sqrt);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRO(this, node, kSSEFloat64Sqrt);
+}
+
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
+}
+
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
}
@@ -717,71 +895,80 @@
}
-void InstructionSelector::VisitCall(Node* node) {
- IA32OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = NULL;
-
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Push any stack arguments.
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- // TODO(titzer): handle pushing double parameters.
- Emit(kIA32Push, NULL,
- g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- InstructionOperand** first_output =
- buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), first_output,
- buffer.instruction_args.size(), &buffer.instruction_args.front());
- call_instr->MarkAsCall();
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
}
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ IA32OperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ InstructionOperand temps[] = {g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr, temp_count, temps);
+
+ // Poke any stack arguments.
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ int const slot = static_cast<int>(n);
+ InstructionOperand value = g.CanBeImmediate(node)
+ ? g.UseImmediate(input.node())
+ : g.UseRegister(input.node());
+ Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
+ }
+ }
+ } else {
+ // Push any stack arguments.
+ for (PushParameter input : base::Reversed(*arguments)) {
+ // Skip any alignment holes in pushed nodes.
+ if (input.node() == nullptr) continue;
+ InstructionOperand value =
+ g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
+ : IsSupported(ATOM) ||
+ sequence()->IsFloat(GetVirtualRegister(input.node()))
+ ? g.UseRegister(input.node())
+ : g.Use(input.node());
+ if (input.type() == MachineType::Float32()) {
+ Emit(kIA32PushFloat32, g.NoOutput(), value);
+ } else if (input.type() == MachineType::Float64()) {
+ Emit(kIA32PushFloat64, g.NoOutput(), value);
+ } else {
+ Emit(kIA32Push, g.NoOutput(), value);
+ }
+ }
+ }
+}
+
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
+
+
namespace {
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
+ InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
IA32OperandGenerator g(selector);
if (cont->IsBranch()) {
- selector->Emit(cont->Encode(opcode), NULL, left, right,
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
- // TODO(titzer): Needs byte register.
- selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
+ selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
left, right);
}
}
@@ -799,11 +986,21 @@
}
-// Shared routine for multiple float compare operations.
+// Shared routine for multiple float32 compare operations (inputs commuted).
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Node* const left = node->InputAt(0);
+ Node* const right = node->InputAt(1);
+ VisitCompare(selector, kSSEFloat32Cmp, right, left, cont, false);
+}
+
+
+// Shared routine for multiple float64 compare operations (inputs commuted).
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitCompare(selector, kSSEFloat64Cmp, node->InputAt(0), node->InputAt(1),
- cont, node->op()->HasProperty(Operator::kCommutative));
+ Node* const left = node->InputAt(0);
+ Node* const right = node->InputAt(1);
+ VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
}
@@ -829,6 +1026,26 @@
void VisitWordCompare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
+ IA32OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
+ LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
+ ExternalReference js_stack_limit =
+ ExternalReference::address_of_stack_limit(selector->isolate());
+ if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
+ // Compare(Load(js_stack_limit), LoadStackPointer)
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode = cont->Encode(kIA32StackCheck);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()));
+ }
+ return;
+ }
+ }
VisitWordCompare(selector, node, kIA32Cmp, cont);
}
@@ -864,27 +1081,36 @@
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
- Node* node = value->InputAt(0);
- Node* result = node->FindProjection(0);
- if (result == NULL || selector->IsDefined(result)) {
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -923,6 +1149,34 @@
}
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ IA32OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -958,7 +1212,7 @@
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kIA32Add, &cont);
}
@@ -968,7 +1222,7 @@
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kIA32Sub, &cont);
}
@@ -977,6 +1231,24 @@
}
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont(kUnorderedEqual, node);
VisitFloat64Compare(this, node, &cont);
@@ -984,28 +1256,80 @@
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedGreaterThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Float64Matcher mleft(left);
+ if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
+ Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
+ return;
+ }
+ Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.Use(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.Use(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- if (CpuFeatures::IsSupported(SSE4_1)) {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
- MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kWord32ShiftIsSafe;
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kWord32Ctz;
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ flags |= MachineOperatorBuilder::kWord32Popcnt;
}
- return MachineOperatorBuilder::Flag::kNoFlags;
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
+ }
+ return flags;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/ia32/linkage-ia32.cc b/src/compiler/ia32/linkage-ia32.cc
deleted file mode 100644
index 12cc34f..0000000
--- a/src/compiler/ia32/linkage-ia32.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct IA32LinkageHelperTraits {
- static Register ReturnValueReg() { return eax; }
- static Register ReturnValue2Reg() { return edx; }
- static Register JSCallFunctionReg() { return edi; }
- static Register ContextReg() { return esi; }
- static Register RuntimeCallFunctionReg() { return ebx; }
- static Register RuntimeCallArgCountReg() { return eax; }
- static RegList CCalleeSaveRegisters() {
- return esi.bit() | edi.bit() | ebx.bit();
- }
- static Register CRegisterParameter(int i) { return no_reg; }
- static int CRegisterParametersLength() { return 0; }
-};
-
-typedef LinkageHelper<IA32LinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index ea17854..6c31ac8 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -19,6 +19,10 @@
#include "src/compiler/mips64/instruction-codes-mips64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/x64/instruction-codes-x64.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/compiler/ppc/instruction-codes-ppc.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/compiler/x87/instruction-codes-x87.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
@@ -29,28 +33,49 @@
namespace internal {
namespace compiler {
+// Modes for ArchStoreWithWriteBarrier below.
+enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
+
+
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define ARCH_OPCODE_LIST(V) \
- V(ArchCallCodeObject) \
- V(ArchCallJSFunction) \
- V(ArchJmp) \
- V(ArchNop) \
- V(ArchRet) \
- V(ArchStackPointer) \
- V(ArchTruncateDoubleToI) \
- V(CheckedLoadInt8) \
- V(CheckedLoadUint8) \
- V(CheckedLoadInt16) \
- V(CheckedLoadUint16) \
- V(CheckedLoadWord32) \
- V(CheckedLoadFloat32) \
- V(CheckedLoadFloat64) \
- V(CheckedStoreWord8) \
- V(CheckedStoreWord16) \
- V(CheckedStoreWord32) \
- V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64) \
+#define COMMON_ARCH_OPCODE_LIST(V) \
+ V(ArchCallCodeObject) \
+ V(ArchTailCallCodeObject) \
+ V(ArchCallJSFunction) \
+ V(ArchTailCallJSFunction) \
+ V(ArchPrepareCallCFunction) \
+ V(ArchCallCFunction) \
+ V(ArchPrepareTailCall) \
+ V(ArchLazyBailout) \
+ V(ArchJmp) \
+ V(ArchLookupSwitch) \
+ V(ArchTableSwitch) \
+ V(ArchNop) \
+ V(ArchThrowTerminator) \
+ V(ArchDeoptimize) \
+ V(ArchRet) \
+ V(ArchStackPointer) \
+ V(ArchFramePointer) \
+ V(ArchTruncateDoubleToI) \
+ V(ArchStoreWithWriteBarrier) \
+ V(CheckedLoadInt8) \
+ V(CheckedLoadUint8) \
+ V(CheckedLoadInt16) \
+ V(CheckedLoadUint16) \
+ V(CheckedLoadWord32) \
+ V(CheckedLoadWord64) \
+ V(CheckedLoadFloat32) \
+ V(CheckedLoadFloat64) \
+ V(CheckedStoreWord8) \
+ V(CheckedStoreWord16) \
+ V(CheckedStoreWord32) \
+ V(CheckedStoreWord64) \
+ V(CheckedStoreFloat32) \
+ V(CheckedStoreFloat64)
+
+#define ARCH_OPCODE_LIST(V) \
+ COMMON_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_LIST(V)
enum ArchOpcode {
@@ -100,12 +125,16 @@
kUnsignedGreaterThanOrEqual,
kUnsignedLessThanOrEqual,
kUnsignedGreaterThan,
+ kFloatLessThanOrUnordered,
+ kFloatGreaterThanOrEqual,
+ kFloatLessThanOrEqual,
+ kFloatGreaterThanOrUnordered,
+ kFloatLessThan,
+ kFloatGreaterThanOrEqualOrUnordered,
+ kFloatLessThanOrEqualOrUnordered,
+ kFloatGreaterThan,
kUnorderedEqual,
kUnorderedNotEqual,
- kUnorderedLessThan,
- kUnorderedGreaterThanOrEqual,
- kUnorderedLessThanOrEqual,
- kUnorderedGreaterThan,
kOverflow,
kNotOverflow
};
@@ -114,6 +143,8 @@
return static_cast<FlagsCondition>(condition ^ 1);
}
+FlagsCondition CommuteFlagsCondition(FlagsCondition condition);
+
std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc);
// The InstructionCode is an opaque, target-specific integer that encodes
@@ -126,11 +157,11 @@
// for code generation. We encode the instruction, addressing mode, and flags
// continuation into a single InstructionCode which is stored as part of
// the instruction.
-typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
-typedef BitField<AddressingMode, 7, 5> AddressingModeField;
-typedef BitField<FlagsMode, 12, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 14, 5> FlagsConditionField;
-typedef BitField<int, 14, 18> MiscField;
+typedef BitField<ArchOpcode, 0, 8> ArchOpcodeField;
+typedef BitField<AddressingMode, 8, 5> AddressingModeField;
+typedef BitField<FlagsMode, 13, 2> FlagsModeField;
+typedef BitField<FlagsCondition, 15, 5> FlagsConditionField;
+typedef BitField<int, 20, 12> MiscField;
} // namespace compiler
} // namespace internal
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
new file mode 100644
index 0000000..2f329ea
--- /dev/null
+++ b/src/compiler/instruction-scheduler.cc
@@ -0,0 +1,280 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+#include "src/base/adapters.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+InstructionScheduler::ScheduleGraphNode::ScheduleGraphNode(
+ Zone* zone,
+ Instruction* instr)
+ : instr_(instr),
+ successors_(zone),
+ unscheduled_predecessors_count_(0),
+ latency_(GetInstructionLatency(instr)),
+ total_latency_(-1),
+ start_cycle_(-1) {
+}
+
+
+void InstructionScheduler::ScheduleGraphNode::AddSuccessor(
+ ScheduleGraphNode* node) {
+ successors_.push_back(node);
+ node->unscheduled_predecessors_count_++;
+}
+
+
+InstructionScheduler::InstructionScheduler(Zone* zone,
+ InstructionSequence* sequence)
+ : zone_(zone),
+ sequence_(sequence),
+ graph_(zone),
+ last_side_effect_instr_(nullptr),
+ pending_loads_(zone),
+ last_live_in_reg_marker_(nullptr) {
+}
+
+
+void InstructionScheduler::StartBlock(RpoNumber rpo) {
+ DCHECK(graph_.empty());
+ DCHECK(last_side_effect_instr_ == nullptr);
+ DCHECK(pending_loads_.empty());
+ DCHECK(last_live_in_reg_marker_ == nullptr);
+ sequence()->StartBlock(rpo);
+}
+
+
+void InstructionScheduler::EndBlock(RpoNumber rpo) {
+ ScheduleBlock();
+ sequence()->EndBlock(rpo);
+ graph_.clear();
+ last_side_effect_instr_ = nullptr;
+ pending_loads_.clear();
+ last_live_in_reg_marker_ = nullptr;
+}
+
+
+void InstructionScheduler::AddInstruction(Instruction* instr) {
+ ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr);
+
+ if (IsBlockTerminator(instr)) {
+ // Make sure that basic block terminators are not moved by adding them
+ // as successor of every instruction.
+ for (auto node : graph_) {
+ node->AddSuccessor(new_node);
+ }
+ } else if (IsFixedRegisterParameter(instr)) {
+ if (last_live_in_reg_marker_ != nullptr) {
+ last_live_in_reg_marker_->AddSuccessor(new_node);
+ }
+ last_live_in_reg_marker_ = new_node;
+ } else {
+ if (last_live_in_reg_marker_ != nullptr) {
+ last_live_in_reg_marker_->AddSuccessor(new_node);
+ }
+
+ // Instructions with side effects and memory operations can't be
+ // reordered with respect to each other.
+ if (HasSideEffect(instr)) {
+ if (last_side_effect_instr_ != nullptr) {
+ last_side_effect_instr_->AddSuccessor(new_node);
+ }
+ for (auto load : pending_loads_) {
+ load->AddSuccessor(new_node);
+ }
+ pending_loads_.clear();
+ last_side_effect_instr_ = new_node;
+ } else if (IsLoadOperation(instr)) {
+ // Load operations can't be reordered with side effects instructions but
+ // independent loads can be reordered with respect to each other.
+ if (last_side_effect_instr_ != nullptr) {
+ last_side_effect_instr_->AddSuccessor(new_node);
+ }
+ pending_loads_.push_back(new_node);
+ }
+
+ // Look for operand dependencies.
+ for (auto node : graph_) {
+ if (HasOperandDependency(node->instruction(), instr)) {
+ node->AddSuccessor(new_node);
+ }
+ }
+ }
+
+ graph_.push_back(new_node);
+}
+
+
+bool InstructionScheduler::CompareNodes(ScheduleGraphNode *node1,
+ ScheduleGraphNode *node2) const {
+ return node1->total_latency() > node2->total_latency();
+}
+
+
+void InstructionScheduler::ScheduleBlock() {
+ ZoneLinkedList<ScheduleGraphNode*> ready_list(zone());
+
+ // Compute total latencies so that we can schedule the critical path first.
+ ComputeTotalLatencies();
+
+ // Add nodes which don't have dependencies to the ready list.
+ for (auto node : graph_) {
+ if (!node->HasUnscheduledPredecessor()) {
+ ready_list.push_back(node);
+ }
+ }
+
+ // Go through the ready list and schedule the instructions.
+ int cycle = 0;
+ while (!ready_list.empty()) {
+ auto candidate = ready_list.end();
+ for (auto iterator = ready_list.begin(); iterator != ready_list.end();
+ ++iterator) {
+ // Look for the best candidate to schedule.
+ // We only consider instructions that have all their operands ready and
+ // we try to schedule the critical path first (we look for the instruction
+ // with the highest latency on the path to reach the end of the graph).
+ if (cycle >= (*iterator)->start_cycle()) {
+ if ((candidate == ready_list.end()) ||
+ CompareNodes(*iterator, *candidate)) {
+ candidate = iterator;
+ }
+ }
+ }
+
+ if (candidate != ready_list.end()) {
+ sequence()->AddInstruction((*candidate)->instruction());
+
+ for (auto successor : (*candidate)->successors()) {
+ successor->DropUnscheduledPredecessor();
+ successor->set_start_cycle(
+ std::max(successor->start_cycle(),
+ cycle + (*candidate)->latency()));
+
+ if (!successor->HasUnscheduledPredecessor()) {
+ ready_list.push_back(successor);
+ }
+ }
+
+ ready_list.erase(candidate);
+ }
+
+ cycle++;
+ }
+}
+
+
+int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kArchNop:
+ case kArchStackPointer:
+ case kArchFramePointer:
+ case kArchTruncateDoubleToI:
+ return kNoOpcodeFlags;
+
+ case kArchPrepareCallCFunction:
+ case kArchPrepareTailCall:
+ case kArchCallCFunction:
+ case kArchCallCodeObject:
+ case kArchCallJSFunction:
+ case kArchLazyBailout:
+ return kHasSideEffect;
+
+ case kArchTailCallCodeObject:
+ case kArchTailCallJSFunction:
+ return kHasSideEffect | kIsBlockTerminator;
+
+ case kArchDeoptimize:
+ case kArchJmp:
+ case kArchLookupSwitch:
+ case kArchTableSwitch:
+ case kArchRet:
+ case kArchThrowTerminator:
+ return kIsBlockTerminator;
+
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedLoadWord64:
+ case kCheckedLoadFloat32:
+ case kCheckedLoadFloat64:
+ return kIsLoadOperation;
+
+ case kCheckedStoreWord8:
+ case kCheckedStoreWord16:
+ case kCheckedStoreWord32:
+ case kCheckedStoreWord64:
+ case kCheckedStoreFloat32:
+ case kCheckedStoreFloat64:
+ case kArchStoreWithWriteBarrier:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ TARGET_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ return GetTargetInstructionFlags(instr);
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+bool InstructionScheduler::HasOperandDependency(
+ const Instruction* instr1, const Instruction* instr2) const {
+ for (size_t i = 0; i < instr1->OutputCount(); ++i) {
+ for (size_t j = 0; j < instr2->InputCount(); ++j) {
+ const InstructionOperand* output = instr1->OutputAt(i);
+ const InstructionOperand* input = instr2->InputAt(j);
+
+ if (output->IsUnallocated() && input->IsUnallocated() &&
+ (UnallocatedOperand::cast(output)->virtual_register() ==
+ UnallocatedOperand::cast(input)->virtual_register())) {
+ return true;
+ }
+
+ if (output->IsConstant() && input->IsUnallocated() &&
+ (ConstantOperand::cast(output)->virtual_register() ==
+ UnallocatedOperand::cast(input)->virtual_register())) {
+ return true;
+ }
+ }
+ }
+
+ // TODO(bafsa): Do we need to look for anti-dependencies/output-dependencies?
+
+ return false;
+}
+
+
+bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
+ return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
+ (instr->flags_mode() == kFlags_branch));
+}
+
+
+void InstructionScheduler::ComputeTotalLatencies() {
+ for (auto node : base::Reversed(graph_)) {
+ int max_latency = 0;
+
+ for (auto successor : node->successors()) {
+ DCHECK(successor->total_latency() != -1);
+ if (successor->total_latency() > max_latency) {
+ max_latency = successor->total_latency();
+ }
+ }
+
+ node->set_total_latency(max_latency + node->latency());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/instruction-scheduler.h b/src/compiler/instruction-scheduler.h
new file mode 100644
index 0000000..fafbe47
--- /dev/null
+++ b/src/compiler/instruction-scheduler.h
@@ -0,0 +1,162 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SCHEDULER_H_
+#define V8_COMPILER_INSTRUCTION_SCHEDULER_H_
+
+#include "src/compiler/instruction.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A set of flags describing properties of the instructions so that the
+// scheduler is aware of dependencies between instructions.
+enum ArchOpcodeFlags {
+ kNoOpcodeFlags = 0,
+ kIsBlockTerminator = 1, // The instruction marks the end of a basic block
+ // e.g.: jump and return instructions.
+ kHasSideEffect = 2, // The instruction has some side effects (memory
+ // store, function call...)
+ kIsLoadOperation = 4, // The instruction is a memory load.
+};
+
+
+class InstructionScheduler final : public ZoneObject {
+ public:
+ InstructionScheduler(Zone* zone, InstructionSequence* sequence);
+
+ void StartBlock(RpoNumber rpo);
+ void EndBlock(RpoNumber rpo);
+
+ void AddInstruction(Instruction* instr);
+
+ static bool SchedulerSupported();
+
+ private:
+ // A scheduling graph node.
+ // Represent an instruction and their dependencies.
+ class ScheduleGraphNode: public ZoneObject {
+ public:
+ ScheduleGraphNode(Zone* zone, Instruction* instr);
+
+ // Mark the instruction represented by 'node' as a dependecy of this one.
+ // The current instruction will be registered as an unscheduled predecessor
+ // of 'node' (i.e. it must be scheduled before 'node').
+ void AddSuccessor(ScheduleGraphNode* node);
+
+ // Check if all the predecessors of this instruction have been scheduled.
+ bool HasUnscheduledPredecessor() {
+ return unscheduled_predecessors_count_ != 0;
+ }
+
+ // Record that we have scheduled one of the predecessors of this node.
+ void DropUnscheduledPredecessor() {
+ DCHECK(unscheduled_predecessors_count_ > 0);
+ unscheduled_predecessors_count_--;
+ }
+
+ Instruction* instruction() { return instr_; }
+ ZoneDeque<ScheduleGraphNode*>& successors() { return successors_; }
+ int latency() const { return latency_; }
+
+ int total_latency() const { return total_latency_; }
+ void set_total_latency(int latency) { total_latency_ = latency; }
+
+ int start_cycle() const { return start_cycle_; }
+ void set_start_cycle(int start_cycle) { start_cycle_ = start_cycle; }
+
+ private:
+ Instruction* instr_;
+ ZoneDeque<ScheduleGraphNode*> successors_;
+
+ // Number of unscheduled predecessors for this node.
+ int unscheduled_predecessors_count_;
+
+ // Estimate of the instruction latency (the number of cycles it takes for
+ // instruction to complete).
+ int latency_;
+
+ // The sum of all the latencies on the path from this node to the end of
+ // the graph (i.e. a node with no successor).
+ int total_latency_;
+
+ // The scheduler keeps a nominal cycle count to keep track of when the
+ // result of an instruction is available. This field is updated by the
+ // scheduler to indicate when the value of all the operands of this
+ // instruction will be available.
+ int start_cycle_;
+ };
+
+ // Compare the two nodes and return true if node1 is a better candidate than
+ // node2 (i.e. node1 should be scheduled before node2).
+ bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
+
+ // Perform scheduling for the current block.
+ void ScheduleBlock();
+
+ // Return the scheduling properties of the given instruction.
+ int GetInstructionFlags(const Instruction* instr) const;
+ int GetTargetInstructionFlags(const Instruction* instr) const;
+
+ // Return true if instr2 uses any value defined by instr1.
+ bool HasOperandDependency(const Instruction* instr1,
+ const Instruction* instr2) const;
+
+ // Return true if the instruction is a basic block terminator.
+ bool IsBlockTerminator(const Instruction* instr) const;
+
+ // Check whether the given instruction has side effects (e.g. function call,
+ // memory store).
+ bool HasSideEffect(const Instruction* instr) const {
+ return GetInstructionFlags(instr) & kHasSideEffect;
+ }
+
+ // Return true if the instruction is a memory load.
+ bool IsLoadOperation(const Instruction* instr) const {
+ return GetInstructionFlags(instr) & kIsLoadOperation;
+ }
+
+ // Identify nops used as a definition point for live-in registers at
+ // function entry.
+ bool IsFixedRegisterParameter(const Instruction* instr) const {
+ return (instr->arch_opcode() == kArchNop) &&
+ (instr->OutputCount() == 1) &&
+ (instr->OutputAt(0)->IsUnallocated()) &&
+ UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy();
+ }
+
+ void ComputeTotalLatencies();
+
+ static int GetInstructionLatency(const Instruction* instr);
+
+ Zone* zone() { return zone_; }
+ InstructionSequence* sequence() { return sequence_; }
+
+ Zone* zone_;
+ InstructionSequence* sequence_;
+ ZoneVector<ScheduleGraphNode*> graph_;
+
+ // Last side effect instruction encountered while building the graph.
+ ScheduleGraphNode* last_side_effect_instr_;
+
+ // Set of load instructions encountered since the last side effect instruction
+ // which will be added as predecessors of the next instruction with side
+ // effects.
+ ZoneVector<ScheduleGraphNode*> pending_loads_;
+
+ // Live-in register markers are nop instructions which are emitted at the
+ // beginning of a basic block so that the register allocator will find a
+ // defining instruction for live-in values. They must not be moved.
+ // All these nops are chained together and added as a predecessor of every
+ // other instructions in the basic block.
+ ScheduleGraphNode* last_live_in_reg_marker_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_SCHEDULER_H_
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index bdcd952..5cca888 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -8,12 +8,24 @@
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/schedule.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Helper struct containing data about a table or lookup switch.
+struct SwitchInfo {
+ int32_t min_value; // minimum value of {case_values}
+ int32_t max_value; // maximum value of {case_values}
+ size_t value_range; // |max_value - min_value| + 1
+ size_t case_count; // number of cases
+ int32_t* case_values; // actual case values, unsorted
+ BasicBlock** case_branches; // basic blocks corresponding to case values
+ BasicBlock* default_branch; // default branch target
+};
+
// A helper class for the instruction selector that simplifies construction of
// Operands. This class implements a base for architecture-specific helpers.
class OperandGenerator {
@@ -21,132 +33,176 @@
explicit OperandGenerator(InstructionSelector* selector)
: selector_(selector) {}
- InstructionOperand* DefineAsRegister(Node* node) {
- return Define(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ InstructionOperand NoOutput() {
+ return InstructionOperand(); // Generates an invalid operand.
}
- InstructionOperand* DefineSameAsFirst(Node* result) {
- return Define(result, new (zone())
- UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT));
+ InstructionOperand DefineAsRegister(Node* node) {
+ return Define(node,
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ GetVReg(node)));
}
- InstructionOperand* DefineAsFixed(Node* node, Register reg) {
- return Define(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg)));
+ InstructionOperand DefineSameAsFirst(Node* node) {
+ return Define(node,
+ UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT,
+ GetVReg(node)));
}
- InstructionOperand* DefineAsFixed(Node* node, DoubleRegister reg) {
- return Define(node, new (zone())
+ InstructionOperand DefineAsFixed(Node* node, Register reg) {
+ return Define(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ reg.code(), GetVReg(node)));
+ }
+
+ InstructionOperand DefineAsFixed(Node* node, DoubleRegister reg) {
+ return Define(node,
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg)));
+ reg.code(), GetVReg(node)));
}
- InstructionOperand* DefineAsConstant(Node* node) {
+ InstructionOperand DefineAsConstant(Node* node) {
selector()->MarkAsDefined(node);
- int virtual_register = selector_->GetVirtualRegister(node);
+ int virtual_register = GetVReg(node);
sequence()->AddConstant(virtual_register, ToConstant(node));
- return ConstantOperand::Create(virtual_register, zone());
+ return ConstantOperand(virtual_register);
}
- InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location,
- MachineType type) {
- return Define(node, ToUnallocatedOperand(location, type));
+ InstructionOperand DefineAsLocation(Node* node, LinkageLocation location,
+ MachineRepresentation rep) {
+ return Define(node, ToUnallocatedOperand(location, rep, GetVReg(node)));
}
- InstructionOperand* Use(Node* node) {
- return Use(
- node, new (zone()) UnallocatedOperand(
- UnallocatedOperand::NONE, UnallocatedOperand::USED_AT_START));
+ InstructionOperand DefineAsDualLocation(Node* node,
+ LinkageLocation primary_location,
+ LinkageLocation secondary_location) {
+ return Define(node,
+ ToDualLocationUnallocatedOperand(
+ primary_location, secondary_location, GetVReg(node)));
}
- InstructionOperand* UseRegister(Node* node) {
- return Use(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
- UnallocatedOperand::USED_AT_START));
+ InstructionOperand Use(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::NONE,
+ UnallocatedOperand::USED_AT_START,
+ GetVReg(node)));
+ }
+
+ InstructionOperand UseAny(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::ANY,
+ UnallocatedOperand::USED_AT_START,
+ GetVReg(node)));
+ }
+
+ InstructionOperand UseRegister(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START,
+ GetVReg(node)));
+ }
+
+ InstructionOperand UseUniqueSlot(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_SLOT,
+ GetVReg(node)));
}
// Use register or operand for the node. If a register is chosen, it won't
// alias any temporary or output registers.
- InstructionOperand* UseUnique(Node* node) {
- return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::NONE));
+ InstructionOperand UseUnique(Node* node) {
+ return Use(node,
+ UnallocatedOperand(UnallocatedOperand::NONE, GetVReg(node)));
}
// Use a unique register for the node that does not alias any temporary or
// output registers.
- InstructionOperand* UseUniqueRegister(Node* node) {
- return Use(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ InstructionOperand UseUniqueRegister(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ GetVReg(node)));
}
- InstructionOperand* UseFixed(Node* node, Register reg) {
- return Use(node, new (zone())
- UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg)));
+ InstructionOperand UseFixed(Node* node, Register reg) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ reg.code(), GetVReg(node)));
}
- InstructionOperand* UseFixed(Node* node, DoubleRegister reg) {
- return Use(node, new (zone())
+ InstructionOperand UseFixed(Node* node, DoubleRegister reg) {
+ return Use(node,
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg)));
+ reg.code(), GetVReg(node)));
}
- InstructionOperand* UseImmediate(Node* node) {
- int index = sequence()->AddImmediate(ToConstant(node));
- return ImmediateOperand::Create(index, zone());
+ InstructionOperand UseExplicit(LinkageLocation location) {
+ MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
+ if (location.IsRegister()) {
+ return ExplicitOperand(LocationOperand::REGISTER, rep,
+ location.AsRegister());
+ } else {
+ return ExplicitOperand(LocationOperand::STACK_SLOT, rep,
+ location.GetLocation());
+ }
}
- InstructionOperand* UseLocation(Node* node, LinkageLocation location,
- MachineType type) {
- return Use(node, ToUnallocatedOperand(location, type));
+ InstructionOperand UseImmediate(Node* node) {
+ return sequence()->AddImmediate(ToConstant(node));
}
- InstructionOperand* TempRegister() {
- UnallocatedOperand* op =
- new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
- UnallocatedOperand::USED_AT_START);
- op->set_virtual_register(sequence()->NextVirtualRegister());
+ InstructionOperand UseLocation(Node* node, LinkageLocation location,
+ MachineRepresentation rep) {
+ return Use(node, ToUnallocatedOperand(location, rep, GetVReg(node)));
+ }
+
+ // Used to force gap moves from the from_location to the to_location
+ // immediately before an instruction.
+ InstructionOperand UsePointerLocation(LinkageLocation to_location,
+ LinkageLocation from_location) {
+ MachineRepresentation rep = MachineType::PointerRepresentation();
+ UnallocatedOperand casted_from_operand =
+ UnallocatedOperand::cast(TempLocation(from_location, rep));
+ selector_->Emit(kArchNop, casted_from_operand);
+ return ToUnallocatedOperand(to_location, rep,
+ casted_from_operand.virtual_register());
+ }
+
+ InstructionOperand TempRegister() {
+ return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START,
+ sequence()->NextVirtualRegister());
+ }
+
+ InstructionOperand TempDoubleRegister() {
+ UnallocatedOperand op = UnallocatedOperand(
+ UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister());
+ sequence()->MarkAsRepresentation(MachineRepresentation::kFloat64,
+ op.virtual_register());
return op;
}
- InstructionOperand* TempDoubleRegister() {
- UnallocatedOperand* op =
- new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
- UnallocatedOperand::USED_AT_START);
- op->set_virtual_register(sequence()->NextVirtualRegister());
- sequence()->MarkAsDouble(op->virtual_register());
- return op;
+ InstructionOperand TempRegister(Register reg) {
+ return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, reg.code(),
+ InstructionOperand::kInvalidVirtualRegister);
}
- InstructionOperand* TempRegister(Register reg) {
- return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ InstructionOperand TempImmediate(int32_t imm) {
+ return sequence()->AddImmediate(Constant(imm));
}
- InstructionOperand* TempImmediate(int32_t imm) {
- int index = sequence()->AddImmediate(Constant(imm));
- return ImmediateOperand::Create(index, zone());
+ InstructionOperand TempLocation(LinkageLocation location,
+ MachineRepresentation rep) {
+ return ToUnallocatedOperand(location, rep,
+ sequence()->NextVirtualRegister());
}
- InstructionOperand* TempLocation(LinkageLocation location, MachineType type) {
- UnallocatedOperand* op = ToUnallocatedOperand(location, type);
- op->set_virtual_register(sequence()->NextVirtualRegister());
- return op;
- }
-
- InstructionOperand* Label(BasicBlock* block) {
- int index = sequence()->AddImmediate(Constant(block->GetRpoNumber()));
- return ImmediateOperand::Create(index, zone());
+ InstructionOperand Label(BasicBlock* block) {
+ return sequence()->AddImmediate(
+ Constant(RpoNumber::FromInt(block->rpo_number())));
}
protected:
InstructionSelector* selector() const { return selector_; }
InstructionSequence* sequence() const { return selector()->sequence(); }
- Isolate* isolate() const { return zone()->isolate(); }
Zone* zone() const { return selector()->instruction_zone(); }
private:
+ int GetVReg(Node* node) const { return selector_->GetVirtualRegister(node); }
+
static Constant ToConstant(const Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
@@ -161,7 +217,7 @@
case IrOpcode::kExternalConstant:
return Constant(OpParameter<ExternalReference>(node));
case IrOpcode::kHeapConstant:
- return Constant(OpParameter<Unique<HeapObject> >(node).handle());
+ return Constant(OpParameter<Handle<HeapObject>>(node));
default:
break;
}
@@ -169,38 +225,57 @@
return Constant(static_cast<int32_t>(0));
}
- UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
+ UnallocatedOperand Define(Node* node, UnallocatedOperand operand) {
DCHECK_NOT_NULL(node);
- DCHECK_NOT_NULL(operand);
- operand->set_virtual_register(selector_->GetVirtualRegister(node));
+ DCHECK_EQ(operand.virtual_register(), GetVReg(node));
selector()->MarkAsDefined(node);
return operand;
}
- UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
+ UnallocatedOperand Use(Node* node, UnallocatedOperand operand) {
DCHECK_NOT_NULL(node);
- DCHECK_NOT_NULL(operand);
- operand->set_virtual_register(selector_->GetVirtualRegister(node));
+ DCHECK_EQ(operand.virtual_register(), GetVReg(node));
selector()->MarkAsUsed(node);
return operand;
}
- UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location,
- MachineType type) {
- if (location.location_ == LinkageLocation::ANY_REGISTER) {
- return new (zone())
- UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER);
+ UnallocatedOperand ToDualLocationUnallocatedOperand(
+ LinkageLocation primary_location, LinkageLocation secondary_location,
+ int virtual_register) {
+ // We only support the primary location being a register and the secondary
+ // one a slot.
+ DCHECK(primary_location.IsRegister() &&
+ secondary_location.IsCalleeFrameSlot());
+ int reg_id = primary_location.AsRegister();
+ int slot_id = secondary_location.AsCalleeFrameSlot();
+ return UnallocatedOperand(reg_id, slot_id, virtual_register);
+ }
+
+ UnallocatedOperand ToUnallocatedOperand(LinkageLocation location,
+ MachineRepresentation rep,
+ int virtual_register) {
+ if (location.IsAnyRegister()) {
+ // any machine register.
+ return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ virtual_register);
}
- if (location.location_ < 0) {
- return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
- location.location_);
+ if (location.IsCallerFrameSlot()) {
+ // a location on the caller frame.
+ return UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
+ location.AsCallerFrameSlot(), virtual_register);
}
- if (RepresentationOf(type) == kRepFloat64) {
- return new (zone()) UnallocatedOperand(
- UnallocatedOperand::FIXED_DOUBLE_REGISTER, location.location_);
+ if (location.IsCalleeFrameSlot()) {
+ // a spill location on this (callee) frame.
+ return UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
+ location.AsCalleeFrameSlot(), virtual_register);
}
- return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- location.location_);
+ // a fixed register.
+ if (IsFloatingPoint(rep)) {
+ return UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+ location.AsRegister(), virtual_register);
+ }
+ return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ location.AsRegister(), virtual_register);
}
InstructionSelector* selector_;
@@ -212,7 +287,7 @@
// The whole instruction is treated as a unit by the register allocator, and
// thus no spills or moves can be introduced between the flags-setting
// instruction and the branch or set it should be combined with.
-class FlagsContinuation FINAL {
+class FlagsContinuation final {
public:
FlagsContinuation() : mode_(kFlags_none) {}
@@ -261,53 +336,7 @@
void Commute() {
DCHECK(!IsNone());
- switch (condition_) {
- case kEqual:
- case kNotEqual:
- case kOverflow:
- case kNotOverflow:
- return;
- case kSignedLessThan:
- condition_ = kSignedGreaterThan;
- return;
- case kSignedGreaterThanOrEqual:
- condition_ = kSignedLessThanOrEqual;
- return;
- case kSignedLessThanOrEqual:
- condition_ = kSignedGreaterThanOrEqual;
- return;
- case kSignedGreaterThan:
- condition_ = kSignedLessThan;
- return;
- case kUnsignedLessThan:
- condition_ = kUnsignedGreaterThan;
- return;
- case kUnsignedGreaterThanOrEqual:
- condition_ = kUnsignedLessThanOrEqual;
- return;
- case kUnsignedLessThanOrEqual:
- condition_ = kUnsignedGreaterThanOrEqual;
- return;
- case kUnsignedGreaterThan:
- condition_ = kUnsignedLessThan;
- return;
- case kUnorderedEqual:
- case kUnorderedNotEqual:
- return;
- case kUnorderedLessThan:
- condition_ = kUnorderedGreaterThan;
- return;
- case kUnorderedGreaterThanOrEqual:
- condition_ = kUnorderedLessThanOrEqual;
- return;
- case kUnorderedLessThanOrEqual:
- condition_ = kUnorderedGreaterThanOrEqual;
- return;
- case kUnorderedGreaterThan:
- condition_ = kUnorderedLessThan;
- return;
- }
- UNREACHABLE();
+ condition_ = CommuteFlagsCondition(condition_);
}
void OverwriteAndNegateIfEqual(FlagsCondition condition) {
@@ -333,33 +362,6 @@
BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
};
-
-// An internal helper class for generating the operands to calls.
-// TODO(bmeurer): Get rid of the CallBuffer business and make
-// InstructionSelector::VisitCall platform independent instead.
-struct CallBuffer {
- CallBuffer(Zone* zone, const CallDescriptor* descriptor,
- FrameStateDescriptor* frame_state);
-
- const CallDescriptor* descriptor;
- FrameStateDescriptor* frame_state_descriptor;
- NodeVector output_nodes;
- InstructionOperandVector outputs;
- InstructionOperandVector instruction_args;
- NodeVector pushed_nodes;
-
- size_t input_count() const { return descriptor->InputCount(); }
-
- size_t frame_state_count() const { return descriptor->FrameStateCount(); }
-
- size_t frame_state_value_count() const {
- return (frame_state_descriptor == NULL)
- ? 0
- : (frame_state_descriptor->GetTotalSize() +
- 1); // Include deopt id.
- }
-};
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index ffb8f9f..86868e5 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -4,101 +4,143 @@
#include "src/compiler/instruction-selector.h"
-#include "src/compiler/graph.h"
+#include <limits>
+
+#include "src/base/adapters.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/pipeline.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/state-values-utils.h"
+#include "src/deoptimizer.h"
namespace v8 {
namespace internal {
namespace compiler {
-InstructionSelector::InstructionSelector(Zone* local_zone, Graph* graph,
- Linkage* linkage,
- InstructionSequence* sequence,
- Schedule* schedule,
- SourcePositionTable* source_positions,
- Features features)
- : zone_(local_zone),
+InstructionSelector::InstructionSelector(
+ Zone* zone, size_t node_count, Linkage* linkage,
+ InstructionSequence* sequence, Schedule* schedule,
+ SourcePositionTable* source_positions,
+ SourcePositionMode source_position_mode, Features features)
+ : zone_(zone),
linkage_(linkage),
sequence_(sequence),
source_positions_(source_positions),
+ source_position_mode_(source_position_mode),
features_(features),
schedule_(schedule),
- node_map_(graph->NodeCount(), kNodeUnmapped, zone()),
- current_block_(NULL),
- instructions_(zone()),
- defined_(graph->NodeCount(), false, zone()),
- used_(graph->NodeCount(), false, zone()) {}
+ current_block_(nullptr),
+ instructions_(zone),
+ defined_(node_count, false, zone),
+ used_(node_count, false, zone),
+ virtual_registers_(node_count,
+ InstructionOperand::kInvalidVirtualRegister, zone),
+ scheduler_(nullptr) {
+ instructions_.reserve(node_count);
+}
void InstructionSelector::SelectInstructions() {
// Mark the inputs of all phis in loop headers as used.
BasicBlockVector* blocks = schedule()->rpo_order();
- for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
- BasicBlock* block = *i;
+ for (auto const block : *blocks) {
if (!block->IsLoopHeader()) continue;
- DCHECK_NE(0, static_cast<int>(block->PredecessorCount()));
- DCHECK_NE(1, static_cast<int>(block->PredecessorCount()));
- for (BasicBlock::const_iterator j = block->begin(); j != block->end();
- ++j) {
- Node* phi = *j;
+ DCHECK_LE(2u, block->PredecessorCount());
+ for (Node* const phi : *block) {
if (phi->opcode() != IrOpcode::kPhi) continue;
// Mark all inputs as used.
- for (Node* const k : phi->inputs()) {
- MarkAsUsed(k);
+ for (Node* const input : phi->inputs()) {
+ MarkAsUsed(input);
}
}
}
// Visit each basic block in post order.
- for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
+ for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
VisitBlock(*i);
}
// Schedule the selected instructions.
- for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
- BasicBlock* block = *i;
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
+ }
+
+ for (auto const block : *blocks) {
InstructionBlock* instruction_block =
- sequence()->InstructionBlockAt(block->GetRpoNumber());
+ sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
size_t end = instruction_block->code_end();
size_t start = instruction_block->code_start();
- sequence()->StartBlock(block->GetRpoNumber());
+ DCHECK_LE(end, start);
+ StartBlock(RpoNumber::FromInt(block->rpo_number()));
while (start-- > end) {
- sequence()->AddInstruction(instructions_[start]);
+ AddInstruction(instructions_[start]);
}
- sequence()->EndBlock(block->GetRpoNumber());
+ EndBlock(RpoNumber::FromInt(block->rpo_number()));
+ }
+}
+
+
+void InstructionSelector::StartBlock(RpoNumber rpo) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->StartBlock(rpo);
+ } else {
+ sequence()->StartBlock(rpo);
+ }
+}
+
+
+void InstructionSelector::EndBlock(RpoNumber rpo) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->EndBlock(rpo);
+ } else {
+ sequence()->EndBlock(rpo);
+ }
+}
+
+
+void InstructionSelector::AddInstruction(Instruction* instr) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->AddInstruction(instr);
+ } else {
+ sequence()->AddInstruction(instr);
}
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
+ InstructionOperand output,
size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
- InstructionOperand* a, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand output,
+ InstructionOperand a, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
- InstructionOperand* a,
- InstructionOperand* b, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b};
+ InstructionOperand output,
+ InstructionOperand a,
+ InstructionOperand b, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -106,13 +148,13 @@
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
- InstructionOperand* a,
- InstructionOperand* b,
- InstructionOperand* c, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c};
+ InstructionOperand output,
+ InstructionOperand a,
+ InstructionOperand b,
+ InstructionOperand c, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -120,11 +162,11 @@
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
- InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
- size_t temp_count, InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c, d};
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ InstructionOperand b, InstructionOperand c, InstructionOperand d,
+ size_t temp_count, InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c, d};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -132,11 +174,11 @@
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
- InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
- InstructionOperand* e, size_t temp_count, InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c, d, e};
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ InstructionOperand b, InstructionOperand c, InstructionOperand d,
+ InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c, d, e};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -144,12 +186,12 @@
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
- InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
- InstructionOperand* e, InstructionOperand* f, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c, d, e, f};
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ InstructionOperand b, InstructionOperand c, InstructionOperand d,
+ InstructionOperand e, InstructionOperand f, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c, d, e, f};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -157,9 +199,9 @@
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
- size_t input_count, InstructionOperand** inputs, size_t temp_count,
- InstructionOperand** temps) {
+ InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
+ size_t input_count, InstructionOperand* inputs, size_t temp_count,
+ InstructionOperand* temps) {
Instruction* instr =
Instruction::New(instruction_zone(), opcode, output_count, outputs,
input_count, inputs, temp_count, temps);
@@ -180,141 +222,281 @@
int InstructionSelector::GetVirtualRegister(const Node* node) {
- if (node_map_[node->id()] == kNodeUnmapped) {
- node_map_[node->id()] = sequence()->NextVirtualRegister();
+ DCHECK_NOT_NULL(node);
+ size_t const id = node->id();
+ DCHECK_LT(id, virtual_registers_.size());
+ int virtual_register = virtual_registers_[id];
+ if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
+ virtual_register = sequence()->NextVirtualRegister();
+ virtual_registers_[id] = virtual_register;
}
- return node_map_[node->id()];
+ return virtual_register;
}
-int InstructionSelector::GetMappedVirtualRegister(const Node* node) const {
- return node_map_[node->id()];
+const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
+ const {
+ std::map<NodeId, int> virtual_registers;
+ for (size_t n = 0; n < virtual_registers_.size(); ++n) {
+ if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
+ NodeId const id = static_cast<NodeId>(n);
+ virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
+ }
+ }
+ return virtual_registers;
}
bool InstructionSelector::IsDefined(Node* node) const {
DCHECK_NOT_NULL(node);
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(defined_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, defined_.size());
return defined_[id];
}
void InstructionSelector::MarkAsDefined(Node* node) {
DCHECK_NOT_NULL(node);
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(defined_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, defined_.size());
defined_[id] = true;
}
bool InstructionSelector::IsUsed(Node* node) const {
+ DCHECK_NOT_NULL(node);
if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(used_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, used_.size());
return used_[id];
}
void InstructionSelector::MarkAsUsed(Node* node) {
DCHECK_NOT_NULL(node);
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(used_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, used_.size());
used_[id] = true;
}
-bool InstructionSelector::IsDouble(const Node* node) const {
- DCHECK_NOT_NULL(node);
- int virtual_register = GetMappedVirtualRegister(node);
- if (virtual_register == kNodeUnmapped) return false;
- return sequence()->IsDouble(virtual_register);
+void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
+ const InstructionOperand& op) {
+ UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
+ sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
}
-void InstructionSelector::MarkAsDouble(Node* node) {
- DCHECK_NOT_NULL(node);
- DCHECK(!IsReference(node));
- sequence()->MarkAsDouble(GetVirtualRegister(node));
+void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
+ Node* node) {
+ sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
}
-bool InstructionSelector::IsReference(const Node* node) const {
- DCHECK_NOT_NULL(node);
- int virtual_register = GetMappedVirtualRegister(node);
- if (virtual_register == kNodeUnmapped) return false;
- return sequence()->IsReference(virtual_register);
-}
+namespace {
+
+enum class FrameStateInputKind { kAny, kStackSlot };
-void InstructionSelector::MarkAsReference(Node* node) {
- DCHECK_NOT_NULL(node);
- DCHECK(!IsDouble(node));
- sequence()->MarkAsReference(GetVirtualRegister(node));
-}
-
-
-void InstructionSelector::MarkAsRepresentation(MachineType rep,
- InstructionOperand* op) {
- UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
- switch (RepresentationOf(rep)) {
- case kRepFloat32:
- case kRepFloat64:
- sequence()->MarkAsDouble(unalloc->virtual_register());
- break;
- case kRepTagged:
- sequence()->MarkAsReference(unalloc->virtual_register());
+InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
+ FrameStateInputKind kind) {
+ switch (input->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat32Constant:
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kHeapConstant:
+ return g->UseImmediate(input);
+ case IrOpcode::kObjectState:
+ UNREACHABLE();
break;
default:
+ switch (kind) {
+ case FrameStateInputKind::kStackSlot:
+ return g->UseUniqueSlot(input);
+ case FrameStateInputKind::kAny:
+ return g->UseAny(input);
+ }
+ }
+ UNREACHABLE();
+ return InstructionOperand();
+}
+
+
+class StateObjectDeduplicator {
+ public:
+ explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {}
+ static const size_t kNotDuplicated = SIZE_MAX;
+
+ size_t GetObjectId(Node* node) {
+ for (size_t i = 0; i < objects_.size(); ++i) {
+ if (objects_[i] == node) {
+ return i;
+ }
+ }
+ return kNotDuplicated;
+ }
+
+ size_t InsertObject(Node* node) {
+ size_t id = objects_.size();
+ objects_.push_back(node);
+ return id;
+ }
+
+ private:
+ ZoneVector<Node*> objects_;
+};
+
+
+// Returns the number of instruction operands added to inputs.
+size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
+ InstructionOperandVector* inputs,
+ OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ Node* input, MachineType type,
+ FrameStateInputKind kind, Zone* zone) {
+ switch (input->opcode()) {
+ case IrOpcode::kObjectState: {
+ size_t id = deduplicator->GetObjectId(input);
+ if (id == StateObjectDeduplicator::kNotDuplicated) {
+ size_t entries = 0;
+ id = deduplicator->InsertObject(input);
+ descriptor->fields().push_back(
+ StateValueDescriptor::Recursive(zone, id));
+ StateValueDescriptor* new_desc = &descriptor->fields().back();
+ for (Edge edge : input->input_edges()) {
+ entries += AddOperandToStateValueDescriptor(
+ new_desc, inputs, g, deduplicator, edge.to(),
+ MachineType::AnyTagged(), kind, zone);
+ }
+ return entries;
+ } else {
+ // Crankshaft counts duplicate objects for the running id, so we have
+ // to push the input again.
+ deduplicator->InsertObject(input);
+ descriptor->fields().push_back(
+ StateValueDescriptor::Duplicate(zone, id));
+ return 0;
+ }
break;
+ }
+ default: {
+ inputs->push_back(OperandForDeopt(g, input, kind));
+ descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
+ return 1;
+ }
}
}
-void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
- DCHECK_NOT_NULL(node);
- switch (RepresentationOf(rep)) {
- case kRepFloat32:
- case kRepFloat64:
- MarkAsDouble(node);
- break;
- case kRepTagged:
- MarkAsReference(node);
- break;
- default:
- break;
+// Returns the number of instruction operands added to inputs.
+size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
+ Node* state, OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ InstructionOperandVector* inputs,
+ FrameStateInputKind kind, Zone* zone) {
+ DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
+
+ size_t entries = 0;
+ size_t initial_size = inputs->size();
+ USE(initial_size); // initial_size is only used for debug.
+
+ if (descriptor->outer_state()) {
+ entries += AddInputsToFrameStateDescriptor(
+ descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
+ g, deduplicator, inputs, kind, zone);
}
+
+ Node* parameters = state->InputAt(kFrameStateParametersInput);
+ Node* locals = state->InputAt(kFrameStateLocalsInput);
+ Node* stack = state->InputAt(kFrameStateStackInput);
+ Node* context = state->InputAt(kFrameStateContextInput);
+ Node* function = state->InputAt(kFrameStateFunctionInput);
+
+ DCHECK_EQ(descriptor->parameters_count(),
+ StateValuesAccess(parameters).size());
+ DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
+ DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
+
+ StateValueDescriptor* values_descriptor =
+ descriptor->GetStateValueDescriptor();
+ entries += AddOperandToStateValueDescriptor(
+ values_descriptor, inputs, g, deduplicator, function,
+ MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
+ for (StateValuesAccess::TypedNode input_node :
+ StateValuesAccess(parameters)) {
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
+ }
+ if (descriptor->HasContext()) {
+ entries += AddOperandToStateValueDescriptor(
+ values_descriptor, inputs, g, deduplicator, context,
+ MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
+ }
+ for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
+ }
+ for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
+ }
+ DCHECK_EQ(initial_size + entries, inputs->size());
+ return entries;
}
+} // namespace
+
+// An internal helper class for generating the operands to calls.
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
-CallBuffer::CallBuffer(Zone* zone, const CallDescriptor* d,
- FrameStateDescriptor* frame_desc)
- : descriptor(d),
- frame_state_descriptor(frame_desc),
- output_nodes(zone),
- outputs(zone),
- instruction_args(zone),
- pushed_nodes(zone) {
- output_nodes.reserve(d->ReturnCount());
- outputs.reserve(d->ReturnCount());
- pushed_nodes.reserve(input_count());
- instruction_args.reserve(input_count() + frame_state_value_count());
-}
+struct CallBuffer {
+ CallBuffer(Zone* zone, const CallDescriptor* descriptor,
+ FrameStateDescriptor* frame_state)
+ : descriptor(descriptor),
+ frame_state_descriptor(frame_state),
+ output_nodes(zone),
+ outputs(zone),
+ instruction_args(zone),
+ pushed_nodes(zone) {
+ output_nodes.reserve(descriptor->ReturnCount());
+ outputs.reserve(descriptor->ReturnCount());
+ pushed_nodes.reserve(input_count());
+ instruction_args.reserve(input_count() + frame_state_value_count());
+ }
+
+
+ const CallDescriptor* descriptor;
+ FrameStateDescriptor* frame_state_descriptor;
+ NodeVector output_nodes;
+ InstructionOperandVector outputs;
+ InstructionOperandVector instruction_args;
+ ZoneVector<PushParameter> pushed_nodes;
+
+ size_t input_count() const { return descriptor->InputCount(); }
+
+ size_t frame_state_count() const { return descriptor->FrameStateCount(); }
+
+ size_t frame_state_value_count() const {
+ return (frame_state_descriptor == nullptr)
+ ? 0
+ : (frame_state_descriptor->GetTotalSize() +
+ 1); // Include deopt id.
+ }
+};
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
- bool call_code_immediate,
- bool call_address_immediate) {
+ CallBufferFlags flags,
+ int stack_param_delta) {
OperandGenerator g(this);
- DCHECK_EQ(call->op()->ValueOutputCount(),
+ DCHECK_LE(call->op()->ValueOutputCount(),
static_cast<int>(buffer->descriptor->ReturnCount()));
DCHECK_EQ(
call->op()->ValueInputCount(),
@@ -325,19 +507,25 @@
if (buffer->descriptor->ReturnCount() == 1) {
buffer->output_nodes.push_back(call);
} else {
- buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), NULL);
- call->CollectProjections(&buffer->output_nodes);
+ buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
+ for (auto use : call->uses()) {
+ if (use->opcode() != IrOpcode::kProjection) continue;
+ size_t const index = ProjectionIndexOf(use->op());
+ DCHECK_LT(index, buffer->output_nodes.size());
+ DCHECK(!buffer->output_nodes[index]);
+ buffer->output_nodes[index] = use;
+ }
}
// Filter out the outputs that aren't live because no projection uses them.
size_t outputs_needed_by_framestate =
- buffer->frame_state_descriptor == NULL
+ buffer->frame_state_descriptor == nullptr
? 0
: buffer->frame_state_descriptor->state_combine()
.ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
- bool output_is_live =
- buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate;
+ bool output_is_live = buffer->output_nodes[i] != nullptr ||
+ i < outputs_needed_by_framestate;
if (output_is_live) {
MachineType type =
buffer->descriptor->GetReturnType(static_cast<int>(i));
@@ -345,10 +533,11 @@
buffer->descriptor->GetReturnLocation(static_cast<int>(i));
Node* output = buffer->output_nodes[i];
- InstructionOperand* op =
- output == NULL ? g.TempLocation(location, type)
- : g.DefineAsLocation(output, location, type);
- MarkAsRepresentation(type, op);
+ InstructionOperand op =
+ output == nullptr
+ ? g.TempLocation(location, type.representation())
+ : g.DefineAsLocation(output, location, type.representation());
+ MarkAsRepresentation(type.representation(), op);
buffer->outputs.push_back(op);
}
@@ -357,6 +546,8 @@
// The first argument is always the callee code.
Node* callee = call->InputAt(0);
+ bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
+ bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
buffer->instruction_args.push_back(
@@ -367,35 +558,46 @@
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
(call_address_immediate &&
- (callee->opcode() == IrOpcode::kInt32Constant ||
- callee->opcode() == IrOpcode::kInt64Constant))
+ callee->opcode() == IrOpcode::kExternalConstant)
? g.UseImmediate(callee)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
- buffer->descriptor->GetInputType(0)));
+ buffer->descriptor->GetInputType(0).representation()));
+ break;
+ case CallDescriptor::kLazyBailout:
+ // The target is ignored, but we still need to pass a value here.
+ buffer->instruction_args.push_back(g.UseImmediate(callee));
break;
}
- DCHECK_EQ(1, static_cast<int>(buffer->instruction_args.size()));
+ DCHECK_EQ(1u, buffer->instruction_args.size());
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
// arg 1 : deoptimization id.
// arg 2 - arg (n + 1) : value inputs to the frame state.
- if (buffer->frame_state_descriptor != NULL) {
+ size_t frame_state_entries = 0;
+ USE(frame_state_entries); // frame_state_entries is only used for debug.
+ if (buffer->frame_state_descriptor != nullptr) {
InstructionSequence::StateId state_id =
sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
Node* frame_state =
call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
- AddFrameStateInputs(frame_state, &buffer->instruction_args,
- buffer->frame_state_descriptor);
+
+ StateObjectDeduplicator deduplicator(instruction_zone());
+
+ frame_state_entries =
+ 1 + AddInputsToFrameStateDescriptor(
+ buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
+ &buffer->instruction_args, FrameStateInputKind::kStackSlot,
+ instruction_zone());
+
+ DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
}
- DCHECK(1 + buffer->frame_state_value_count() ==
- buffer->instruction_args.size());
size_t input_count = static_cast<size_t>(buffer->input_count());
@@ -404,35 +606,53 @@
// not appear as arguments to the call. Everything else ends up
// as an InstructionOperand argument to the call.
auto iter(call->inputs().begin());
- int pushed_count = 0;
+ size_t pushed_count = 0;
+ bool call_tail = (flags & kCallTail) != 0;
for (size_t index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
if (index == 0) continue; // The first argument (callee) is already done.
- InstructionOperand* op =
- g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
- buffer->descriptor->GetInputType(index));
- if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
- int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
+
+ LinkageLocation location = buffer->descriptor->GetInputLocation(index);
+ if (call_tail) {
+ location = LinkageLocation::ConvertToTailCallerLocation(
+ location, stack_param_delta);
+ }
+ InstructionOperand op =
+ g.UseLocation(*iter, location,
+ buffer->descriptor->GetInputType(index).representation());
+ if (UnallocatedOperand::cast(op).HasFixedSlotPolicy() && !call_tail) {
+ int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
- buffer->pushed_nodes.resize(stack_index + 1, NULL);
+ buffer->pushed_nodes.resize(stack_index + 1);
}
- DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]);
- buffer->pushed_nodes[stack_index] = *iter;
+ PushParameter parameter(*iter, buffer->descriptor->GetInputType(index));
+ buffer->pushed_nodes[stack_index] = parameter;
pushed_count++;
} else {
buffer->instruction_args.push_back(op);
}
}
- CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
- DCHECK(static_cast<size_t>(input_count) ==
- (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
- buffer->frame_state_value_count()));
+ DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
+ frame_state_entries);
+ if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
+ stack_param_delta != 0) {
+ // For tail calls that change the size of their parameter list and keep
+ // their return address on the stack, move the return address to just above
+ // the parameters.
+ LinkageLocation saved_return_location =
+ LinkageLocation::ForSavedCallerReturnAddress();
+ InstructionOperand return_address =
+ g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation(
+ saved_return_location, stack_param_delta),
+ saved_return_location);
+ buffer->instruction_args.push_back(return_address);
+ }
}
void InstructionSelector::VisitBlock(BasicBlock* block) {
- DCHECK_EQ(NULL, current_block_);
+ DCHECK(!current_block_);
current_block_ = block;
int current_block_end = static_cast<int>(instructions_.size());
@@ -443,9 +663,7 @@
// Visit code in reverse control flow order, because architecture-specific
// matching may cover more than one node at a time.
- for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
- ++i) {
- Node* node = *i;
+ for (auto node : base::Reversed(*block)) {
// Skip nodes that are unused or already defined.
if (!IsUsed(node) || IsDefined(node)) continue;
// Generate code for this node "top down", but schedule the code "bottom
@@ -453,57 +671,104 @@
size_t current_node_end = instructions_.size();
VisitNode(node);
std::reverse(instructions_.begin() + current_node_end, instructions_.end());
+ if (instructions_.size() == current_node_end) continue;
+ // Mark source position on first instruction emitted.
+ SourcePosition source_position = source_positions_->GetSourcePosition(node);
+ if (source_position.IsKnown() &&
+ (source_position_mode_ == kAllSourcePositions ||
+ node->opcode() == IrOpcode::kCall)) {
+ sequence()->SetSourcePosition(instructions_[current_node_end],
+ source_position);
+ }
}
// We're done with the block.
InstructionBlock* instruction_block =
- sequence()->InstructionBlockAt(block->GetRpoNumber());
+ sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
instruction_block->set_code_start(static_cast<int>(instructions_.size()));
instruction_block->set_code_end(current_block_end);
- current_block_ = NULL;
-}
-
-
-static inline void CheckNoPhis(const BasicBlock* block) {
-#ifdef DEBUG
- // Branch targets should not have phis.
- for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
- const Node* node = *i;
- CHECK_NE(IrOpcode::kPhi, node->opcode());
- }
-#endif
+ current_block_ = nullptr;
}
void InstructionSelector::VisitControl(BasicBlock* block) {
+#ifdef DEBUG
+ // SSA deconstruction requires targets of branches not to have phis.
+ // Edge split form guarantees this property, but is more strict.
+ if (block->SuccessorCount() > 1) {
+ for (BasicBlock* const successor : block->successors()) {
+ for (Node* const node : *successor) {
+ CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
+ }
+ }
+ }
+#endif
+
Node* input = block->control_input();
switch (block->control()) {
case BasicBlock::kGoto:
return VisitGoto(block->SuccessorAt(0));
+ case BasicBlock::kCall: {
+ DCHECK_EQ(IrOpcode::kCall, input->opcode());
+ BasicBlock* success = block->SuccessorAt(0);
+ BasicBlock* exception = block->SuccessorAt(1);
+ return VisitCall(input, exception), VisitGoto(success);
+ }
+ case BasicBlock::kTailCall: {
+ DCHECK_EQ(IrOpcode::kTailCall, input->opcode());
+ return VisitTailCall(input);
+ }
case BasicBlock::kBranch: {
DCHECK_EQ(IrOpcode::kBranch, input->opcode());
BasicBlock* tbranch = block->SuccessorAt(0);
BasicBlock* fbranch = block->SuccessorAt(1);
- // SSA deconstruction requires targets of branches not to have phis.
- // Edge split form guarantees this property, but is more strict.
- CheckNoPhis(tbranch);
- CheckNoPhis(fbranch);
if (tbranch == fbranch) return VisitGoto(tbranch);
return VisitBranch(input, tbranch, fbranch);
}
+ case BasicBlock::kSwitch: {
+ DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
+ SwitchInfo sw;
+ // Last successor must be Default.
+ sw.default_branch = block->successors().back();
+ DCHECK_EQ(IrOpcode::kIfDefault, sw.default_branch->front()->opcode());
+ // All other successors must be cases.
+ sw.case_count = block->SuccessorCount() - 1;
+ sw.case_branches = &block->successors().front();
+ // Determine case values and their min/max.
+ sw.case_values = zone()->NewArray<int32_t>(sw.case_count);
+ sw.min_value = std::numeric_limits<int32_t>::max();
+ sw.max_value = std::numeric_limits<int32_t>::min();
+ for (size_t index = 0; index < sw.case_count; ++index) {
+ BasicBlock* branch = sw.case_branches[index];
+ int32_t value = OpParameter<int32_t>(branch->front()->op());
+ sw.case_values[index] = value;
+ if (sw.min_value > value) sw.min_value = value;
+ if (sw.max_value < value) sw.max_value = value;
+ }
+ DCHECK_LE(sw.min_value, sw.max_value);
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and
+ // {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
+ bit_cast<uint32_t>(sw.min_value);
+ return VisitSwitch(input, sw);
+ }
case BasicBlock::kReturn: {
- // If the result itself is a return, return its input.
- Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
- ? input->InputAt(0)
- : input;
- return VisitReturn(value);
+ DCHECK_EQ(IrOpcode::kReturn, input->opcode());
+ return VisitReturn(input);
+ }
+ case BasicBlock::kDeoptimize: {
+ DeoptimizeKind kind = DeoptimizeKindOf(input->op());
+ Node* value = input->InputAt(0);
+ return VisitDeoptimize(kind, value);
}
case BasicBlock::kThrow:
- return VisitThrow(input);
+ DCHECK_EQ(IrOpcode::kThrow, input->opcode());
+ return VisitThrow(input->InputAt(0));
case BasicBlock::kNone: {
// TODO(titzer): exit block doesn't have control.
- DCHECK(input == NULL);
+ DCHECK_NULL(input);
break;
}
default:
@@ -513,7 +778,7 @@
}
-MachineType InstructionSelector::GetMachineType(Node* node) {
+void InstructionSelector::VisitNode(Node* node) {
DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
switch (node->opcode()) {
case IrOpcode::kStart:
@@ -522,156 +787,33 @@
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
+ case IrOpcode::kIfSuccess:
+ case IrOpcode::kSwitch:
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
case IrOpcode::kTerminate:
- // No code needed for these graph artifacts.
- return kMachNone;
- case IrOpcode::kFinish:
- return kMachAnyTagged;
- case IrOpcode::kParameter:
- return linkage()->GetParameterType(OpParameter<int>(node));
- case IrOpcode::kPhi:
- return OpParameter<MachineType>(node);
- case IrOpcode::kProjection:
- // TODO(jarin) Really project from outputs.
- return kMachAnyTagged;
- case IrOpcode::kInt32Constant:
- return kMachInt32;
- case IrOpcode::kInt64Constant:
- return kMachInt64;
- case IrOpcode::kExternalConstant:
- return kMachPtr;
- case IrOpcode::kFloat64Constant:
- return kMachFloat64;
- case IrOpcode::kHeapConstant:
- case IrOpcode::kNumberConstant:
- return kMachAnyTagged;
- case IrOpcode::kCall:
- return kMachAnyTagged;
- case IrOpcode::kFrameState:
- case IrOpcode::kStateValues:
- return kMachNone;
- case IrOpcode::kLoad:
- return OpParameter<LoadRepresentation>(node);
- case IrOpcode::kStore:
- return kMachNone;
- case IrOpcode::kCheckedLoad:
- return OpParameter<MachineType>(node);
- case IrOpcode::kCheckedStore:
- return kMachNone;
- case IrOpcode::kWord32And:
- case IrOpcode::kWord32Or:
- case IrOpcode::kWord32Xor:
- case IrOpcode::kWord32Shl:
- case IrOpcode::kWord32Shr:
- case IrOpcode::kWord32Sar:
- case IrOpcode::kWord32Ror:
- return kMachInt32;
- case IrOpcode::kWord32Equal:
- return kMachBool;
- case IrOpcode::kWord64And:
- case IrOpcode::kWord64Or:
- case IrOpcode::kWord64Xor:
- case IrOpcode::kWord64Shl:
- case IrOpcode::kWord64Shr:
- case IrOpcode::kWord64Sar:
- case IrOpcode::kWord64Ror:
- return kMachInt64;
- case IrOpcode::kWord64Equal:
- return kMachBool;
- case IrOpcode::kInt32Add:
- case IrOpcode::kInt32AddWithOverflow:
- case IrOpcode::kInt32Sub:
- case IrOpcode::kInt32SubWithOverflow:
- case IrOpcode::kInt32Mul:
- case IrOpcode::kInt32Div:
- case IrOpcode::kInt32Mod:
- return kMachInt32;
- case IrOpcode::kInt32LessThan:
- case IrOpcode::kInt32LessThanOrEqual:
- case IrOpcode::kUint32LessThan:
- case IrOpcode::kUint32LessThanOrEqual:
- return kMachBool;
- case IrOpcode::kInt64Add:
- case IrOpcode::kInt64Sub:
- case IrOpcode::kInt64Mul:
- case IrOpcode::kInt64Div:
- case IrOpcode::kInt64Mod:
- return kMachInt64;
- case IrOpcode::kInt64LessThan:
- case IrOpcode::kInt64LessThanOrEqual:
- return kMachBool;
- case IrOpcode::kChangeFloat32ToFloat64:
- case IrOpcode::kChangeInt32ToFloat64:
- case IrOpcode::kChangeUint32ToFloat64:
- return kMachFloat64;
- case IrOpcode::kChangeFloat64ToInt32:
- return kMachInt32;
- case IrOpcode::kChangeFloat64ToUint32:
- return kMachUint32;
- case IrOpcode::kChangeInt32ToInt64:
- return kMachInt64;
- case IrOpcode::kChangeUint32ToUint64:
- return kMachUint64;
- case IrOpcode::kTruncateFloat64ToFloat32:
- return kMachFloat32;
- case IrOpcode::kTruncateFloat64ToInt32:
- case IrOpcode::kTruncateInt64ToInt32:
- return kMachInt32;
- case IrOpcode::kFloat64Add:
- case IrOpcode::kFloat64Sub:
- case IrOpcode::kFloat64Mul:
- case IrOpcode::kFloat64Div:
- case IrOpcode::kFloat64Mod:
- case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64Floor:
- case IrOpcode::kFloat64Ceil:
- case IrOpcode::kFloat64RoundTruncate:
- case IrOpcode::kFloat64RoundTiesAway:
- return kMachFloat64;
- case IrOpcode::kFloat64Equal:
- case IrOpcode::kFloat64LessThan:
- case IrOpcode::kFloat64LessThanOrEqual:
- return kMachBool;
- default:
- V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
- node->opcode(), node->op()->mnemonic(), node->id());
- }
- return kMachNone;
-}
-
-
-void InstructionSelector::VisitNode(Node* node) {
- DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
- SourcePosition source_position = source_positions_->GetSourcePosition(node);
- if (!source_position.IsUnknown()) {
- DCHECK(!source_position.IsInvalid());
- if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
- Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
- }
- }
- switch (node->opcode()) {
- case IrOpcode::kStart:
- case IrOpcode::kLoop:
- case IrOpcode::kEnd:
- case IrOpcode::kBranch:
- case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- case IrOpcode::kEffectPhi:
- case IrOpcode::kMerge:
+ case IrOpcode::kBeginRegion:
// No code needed for these graph artifacts.
return;
- case IrOpcode::kFinish:
- return MarkAsReference(node), VisitFinish(node);
+ case IrOpcode::kIfException:
+ return MarkAsReference(node), VisitIfException(node);
+ case IrOpcode::kFinishRegion:
+ return MarkAsReference(node), VisitFinishRegion(node);
+ case IrOpcode::kGuard:
+ return MarkAsReference(node), VisitGuard(node);
case IrOpcode::kParameter: {
- MachineType type = linkage()->GetParameterType(OpParameter<int>(node));
- MarkAsRepresentation(type, node);
+ MachineType type =
+ linkage()->GetParameterType(ParameterIndexOf(node->op()));
+ MarkAsRepresentation(type.representation(), node);
return VisitParameter(node);
}
+ case IrOpcode::kOsrValue:
+ return MarkAsReference(node), VisitOsrValue(node);
case IrOpcode::kPhi: {
- MachineType type = OpParameter<MachineType>(node);
- MarkAsRepresentation(type, node);
+ MachineRepresentation rep = PhiRepresentationOf(node->op());
+ MarkAsRepresentation(rep, node);
return VisitPhi(node);
}
case IrOpcode::kProjection:
@@ -681,157 +823,252 @@
case IrOpcode::kExternalConstant:
return VisitConstant(node);
case IrOpcode::kFloat32Constant:
- return MarkAsDouble(node), VisitConstant(node);
+ return MarkAsFloat32(node), VisitConstant(node);
case IrOpcode::kFloat64Constant:
- return MarkAsDouble(node), VisitConstant(node);
+ return MarkAsFloat64(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
- case IrOpcode::kNumberConstant:
- // TODO(turbofan): only mark non-smis as references.
return MarkAsReference(node), VisitConstant(node);
+ case IrOpcode::kNumberConstant: {
+ double value = OpParameter<double>(node);
+ if (!IsSmiDouble(value)) MarkAsReference(node);
+ return VisitConstant(node);
+ }
case IrOpcode::kCall:
return VisitCall(node);
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
+ case IrOpcode::kObjectState:
return;
case IrOpcode::kLoad: {
- LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
- MarkAsRepresentation(rep, node);
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
return VisitLoad(node);
}
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kWord32And:
- return VisitWord32And(node);
+ return MarkAsWord32(node), VisitWord32And(node);
case IrOpcode::kWord32Or:
- return VisitWord32Or(node);
+ return MarkAsWord32(node), VisitWord32Or(node);
case IrOpcode::kWord32Xor:
- return VisitWord32Xor(node);
+ return MarkAsWord32(node), VisitWord32Xor(node);
case IrOpcode::kWord32Shl:
- return VisitWord32Shl(node);
+ return MarkAsWord32(node), VisitWord32Shl(node);
case IrOpcode::kWord32Shr:
- return VisitWord32Shr(node);
+ return MarkAsWord32(node), VisitWord32Shr(node);
case IrOpcode::kWord32Sar:
- return VisitWord32Sar(node);
+ return MarkAsWord32(node), VisitWord32Sar(node);
case IrOpcode::kWord32Ror:
- return VisitWord32Ror(node);
+ return MarkAsWord32(node), VisitWord32Ror(node);
case IrOpcode::kWord32Equal:
return VisitWord32Equal(node);
+ case IrOpcode::kWord32Clz:
+ return MarkAsWord32(node), VisitWord32Clz(node);
+ case IrOpcode::kWord32Ctz:
+ return MarkAsWord32(node), VisitWord32Ctz(node);
+ case IrOpcode::kWord32Popcnt:
+ return MarkAsWord32(node), VisitWord32Popcnt(node);
+ case IrOpcode::kWord64Popcnt:
+ return MarkAsWord32(node), VisitWord64Popcnt(node);
case IrOpcode::kWord64And:
- return VisitWord64And(node);
+ return MarkAsWord64(node), VisitWord64And(node);
case IrOpcode::kWord64Or:
- return VisitWord64Or(node);
+ return MarkAsWord64(node), VisitWord64Or(node);
case IrOpcode::kWord64Xor:
- return VisitWord64Xor(node);
+ return MarkAsWord64(node), VisitWord64Xor(node);
case IrOpcode::kWord64Shl:
- return VisitWord64Shl(node);
+ return MarkAsWord64(node), VisitWord64Shl(node);
case IrOpcode::kWord64Shr:
- return VisitWord64Shr(node);
+ return MarkAsWord64(node), VisitWord64Shr(node);
case IrOpcode::kWord64Sar:
- return VisitWord64Sar(node);
+ return MarkAsWord64(node), VisitWord64Sar(node);
case IrOpcode::kWord64Ror:
- return VisitWord64Ror(node);
+ return MarkAsWord64(node), VisitWord64Ror(node);
+ case IrOpcode::kWord64Clz:
+ return MarkAsWord64(node), VisitWord64Clz(node);
+ case IrOpcode::kWord64Ctz:
+ return MarkAsWord64(node), VisitWord64Ctz(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
- return VisitInt32Add(node);
+ return MarkAsWord32(node), VisitInt32Add(node);
case IrOpcode::kInt32AddWithOverflow:
- return VisitInt32AddWithOverflow(node);
+ return MarkAsWord32(node), VisitInt32AddWithOverflow(node);
case IrOpcode::kInt32Sub:
- return VisitInt32Sub(node);
+ return MarkAsWord32(node), VisitInt32Sub(node);
case IrOpcode::kInt32SubWithOverflow:
return VisitInt32SubWithOverflow(node);
case IrOpcode::kInt32Mul:
- return VisitInt32Mul(node);
+ return MarkAsWord32(node), VisitInt32Mul(node);
case IrOpcode::kInt32MulHigh:
return VisitInt32MulHigh(node);
case IrOpcode::kInt32Div:
- return VisitInt32Div(node);
+ return MarkAsWord32(node), VisitInt32Div(node);
case IrOpcode::kInt32Mod:
- return VisitInt32Mod(node);
+ return MarkAsWord32(node), VisitInt32Mod(node);
case IrOpcode::kInt32LessThan:
return VisitInt32LessThan(node);
case IrOpcode::kInt32LessThanOrEqual:
return VisitInt32LessThanOrEqual(node);
case IrOpcode::kUint32Div:
- return VisitUint32Div(node);
+ return MarkAsWord32(node), VisitUint32Div(node);
case IrOpcode::kUint32LessThan:
return VisitUint32LessThan(node);
case IrOpcode::kUint32LessThanOrEqual:
return VisitUint32LessThanOrEqual(node);
case IrOpcode::kUint32Mod:
- return VisitUint32Mod(node);
+ return MarkAsWord32(node), VisitUint32Mod(node);
case IrOpcode::kUint32MulHigh:
return VisitUint32MulHigh(node);
case IrOpcode::kInt64Add:
- return VisitInt64Add(node);
+ return MarkAsWord64(node), VisitInt64Add(node);
+ case IrOpcode::kInt64AddWithOverflow:
+ return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
case IrOpcode::kInt64Sub:
- return VisitInt64Sub(node);
+ return MarkAsWord64(node), VisitInt64Sub(node);
+ case IrOpcode::kInt64SubWithOverflow:
+ return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
case IrOpcode::kInt64Mul:
- return VisitInt64Mul(node);
+ return MarkAsWord64(node), VisitInt64Mul(node);
case IrOpcode::kInt64Div:
- return VisitInt64Div(node);
+ return MarkAsWord64(node), VisitInt64Div(node);
case IrOpcode::kInt64Mod:
- return VisitInt64Mod(node);
+ return MarkAsWord64(node), VisitInt64Mod(node);
case IrOpcode::kInt64LessThan:
return VisitInt64LessThan(node);
case IrOpcode::kInt64LessThanOrEqual:
return VisitInt64LessThanOrEqual(node);
case IrOpcode::kUint64Div:
- return VisitUint64Div(node);
+ return MarkAsWord64(node), VisitUint64Div(node);
case IrOpcode::kUint64LessThan:
return VisitUint64LessThan(node);
+ case IrOpcode::kUint64LessThanOrEqual:
+ return VisitUint64LessThanOrEqual(node);
case IrOpcode::kUint64Mod:
- return VisitUint64Mod(node);
+ return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kChangeFloat32ToFloat64:
- return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
+ return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
- return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
+ return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
case IrOpcode::kChangeUint32ToFloat64:
- return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
+ return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
case IrOpcode::kChangeFloat64ToInt32:
- return VisitChangeFloat64ToInt32(node);
+ return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
- return VisitChangeFloat64ToUint32(node);
+ return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node);
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
+ case IrOpcode::kTryTruncateFloat64ToUint64:
+ return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
case IrOpcode::kChangeInt32ToInt64:
- return VisitChangeInt32ToInt64(node);
+ return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
case IrOpcode::kChangeUint32ToUint64:
- return VisitChangeUint32ToUint64(node);
+ return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
case IrOpcode::kTruncateFloat64ToFloat32:
- return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
+ return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
case IrOpcode::kTruncateFloat64ToInt32:
- return VisitTruncateFloat64ToInt32(node);
+ return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
case IrOpcode::kTruncateInt64ToInt32:
- return VisitTruncateInt64ToInt32(node);
+ return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
+ case IrOpcode::kRoundInt64ToFloat32:
+ return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
+ case IrOpcode::kRoundInt64ToFloat64:
+ return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
+ case IrOpcode::kBitcastFloat32ToInt32:
+ return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
+ case IrOpcode::kRoundUint64ToFloat32:
+ return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
+ case IrOpcode::kRoundUint64ToFloat64:
+ return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node);
+ case IrOpcode::kBitcastFloat64ToInt64:
+ return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
+ case IrOpcode::kBitcastInt32ToFloat32:
+ return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
+ case IrOpcode::kBitcastInt64ToFloat64:
+ return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
+ case IrOpcode::kFloat32Add:
+ return MarkAsFloat32(node), VisitFloat32Add(node);
+ case IrOpcode::kFloat32Sub:
+ return MarkAsFloat32(node), VisitFloat32Sub(node);
+ case IrOpcode::kFloat32Mul:
+ return MarkAsFloat32(node), VisitFloat32Mul(node);
+ case IrOpcode::kFloat32Div:
+ return MarkAsFloat32(node), VisitFloat32Div(node);
+ case IrOpcode::kFloat32Min:
+ return MarkAsFloat32(node), VisitFloat32Min(node);
+ case IrOpcode::kFloat32Max:
+ return MarkAsFloat32(node), VisitFloat32Max(node);
+ case IrOpcode::kFloat32Abs:
+ return MarkAsFloat32(node), VisitFloat32Abs(node);
+ case IrOpcode::kFloat32Sqrt:
+ return MarkAsFloat32(node), VisitFloat32Sqrt(node);
+ case IrOpcode::kFloat32Equal:
+ return VisitFloat32Equal(node);
+ case IrOpcode::kFloat32LessThan:
+ return VisitFloat32LessThan(node);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ return VisitFloat32LessThanOrEqual(node);
case IrOpcode::kFloat64Add:
- return MarkAsDouble(node), VisitFloat64Add(node);
+ return MarkAsFloat64(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
- return MarkAsDouble(node), VisitFloat64Sub(node);
+ return MarkAsFloat64(node), VisitFloat64Sub(node);
case IrOpcode::kFloat64Mul:
- return MarkAsDouble(node), VisitFloat64Mul(node);
+ return MarkAsFloat64(node), VisitFloat64Mul(node);
case IrOpcode::kFloat64Div:
- return MarkAsDouble(node), VisitFloat64Div(node);
+ return MarkAsFloat64(node), VisitFloat64Div(node);
case IrOpcode::kFloat64Mod:
- return MarkAsDouble(node), VisitFloat64Mod(node);
+ return MarkAsFloat64(node), VisitFloat64Mod(node);
+ case IrOpcode::kFloat64Min:
+ return MarkAsFloat64(node), VisitFloat64Min(node);
+ case IrOpcode::kFloat64Max:
+ return MarkAsFloat64(node), VisitFloat64Max(node);
+ case IrOpcode::kFloat64Abs:
+ return MarkAsFloat64(node), VisitFloat64Abs(node);
case IrOpcode::kFloat64Sqrt:
- return MarkAsDouble(node), VisitFloat64Sqrt(node);
+ return MarkAsFloat64(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Equal:
return VisitFloat64Equal(node);
case IrOpcode::kFloat64LessThan:
return VisitFloat64LessThan(node);
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
- case IrOpcode::kFloat64Floor:
- return MarkAsDouble(node), VisitFloat64Floor(node);
- case IrOpcode::kFloat64Ceil:
- return MarkAsDouble(node), VisitFloat64Ceil(node);
+ case IrOpcode::kFloat32RoundDown:
+ return MarkAsFloat32(node), VisitFloat32RoundDown(node);
+ case IrOpcode::kFloat64RoundDown:
+ return MarkAsFloat64(node), VisitFloat64RoundDown(node);
+ case IrOpcode::kFloat32RoundUp:
+ return MarkAsFloat32(node), VisitFloat32RoundUp(node);
+ case IrOpcode::kFloat64RoundUp:
+ return MarkAsFloat64(node), VisitFloat64RoundUp(node);
+ case IrOpcode::kFloat32RoundTruncate:
+ return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
case IrOpcode::kFloat64RoundTruncate:
- return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
+ return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
- return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
+ return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
+ case IrOpcode::kFloat32RoundTiesEven:
+ return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node);
+ case IrOpcode::kFloat64RoundTiesEven:
+ return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node);
+ case IrOpcode::kFloat64ExtractLowWord32:
+ return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
+ case IrOpcode::kFloat64ExtractHighWord32:
+ return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node);
+ case IrOpcode::kFloat64InsertLowWord32:
+ return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
+ case IrOpcode::kFloat64InsertHighWord32:
+ return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
case IrOpcode::kLoadStackPointer:
return VisitLoadStackPointer(node);
+ case IrOpcode::kLoadFramePointer:
+ return VisitLoadFramePointer(node);
case IrOpcode::kCheckedLoad: {
- MachineType rep = OpParameter<MachineType>(node);
+ MachineRepresentation rep =
+ CheckedLoadRepresentationOf(node->op()).representation();
MarkAsRepresentation(rep, node);
return VisitCheckedLoad(node);
}
@@ -845,24 +1082,56 @@
}
-#if V8_TURBOFAN_BACKEND
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- OperandGenerator g(this);
- Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
Emit(kArchStackPointer, g.DefineAsRegister(node));
}
-#endif // V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitLoadFramePointer(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchFramePointer, g.DefineAsRegister(node));
+}
+
+
+void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
+ InstructionOperand& index_operand) {
+ OperandGenerator g(this);
+ size_t input_count = 2 + sw.value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ InstructionOperand default_operand = g.Label(sw.default_branch);
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < sw.case_count; ++index) {
+ size_t value = sw.case_values[index] - sw.min_value;
+ BasicBlock* branch = sw.case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
+}
+
+
+void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
+ InstructionOperand& value_operand) {
+ OperandGenerator g(this);
+ size_t input_count = 2 + sw.case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = g.Label(sw.default_branch);
+ for (size_t index = 0; index < sw.case_count; ++index) {
+ int32_t value = sw.case_values[index];
+ BasicBlock* branch = sw.case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
+}
+
// 32 bit targets do not implement the following instructions.
-#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
+#if V8_TARGET_ARCH_32_BIT
void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
@@ -885,15 +1154,34 @@
void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
+
+
void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
@@ -917,6 +1205,11 @@
void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
@@ -930,14 +1223,71 @@
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
-#endif // V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ UNIMPLEMENTED();
+}
-void InstructionSelector::VisitFinish(Node* node) {
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+#endif // V8_TARGET_ARCH_32_BIT
+
+
+void InstructionSelector::VisitFinishRegion(Node* node) {
+ OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
+
+void InstructionSelector::VisitGuard(Node* node) {
OperandGenerator g(this);
Node* value = node->InputAt(0);
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
@@ -946,10 +1296,36 @@
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
- int index = OpParameter<int>(node);
+ int index = ParameterIndexOf(node->op());
+ InstructionOperand op =
+ linkage()->ParameterHasSecondaryLocation(index)
+ ? g.DefineAsDualLocation(
+ node, linkage()->GetParameterLocation(index),
+ linkage()->GetParameterSecondaryLocation(index))
+ : g.DefineAsLocation(
+ node, linkage()->GetParameterLocation(index),
+ linkage()->GetParameterType(index).representation());
+
+ Emit(kArchNop, op);
+}
+
+
+void InstructionSelector::VisitIfException(Node* node) {
+ OperandGenerator g(this);
+ Node* call = node->InputAt(1);
+ DCHECK_EQ(IrOpcode::kCall, call->opcode());
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(call);
Emit(kArchNop,
- g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
- linkage()->GetParameterType(index)));
+ g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
+ descriptor->GetReturnType(0).representation()));
+}
+
+
+void InstructionSelector::VisitOsrValue(Node* node) {
+ OperandGenerator g(this);
+ int index = OpParameter<int>(node);
+ Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index),
+ MachineRepresentation::kTagged));
}
@@ -958,11 +1334,13 @@
PhiInstruction* phi = new (instruction_zone())
PhiInstruction(instruction_zone(), GetVirtualRegister(node),
static_cast<size_t>(input_count));
- sequence()->InstructionBlockAt(current_block_->GetRpoNumber())->AddPhi(phi);
+ sequence()
+ ->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number()))
+ ->AddPhi(phi);
for (int i = 0; i < input_count; ++i) {
Node* const input = node->InputAt(i);
MarkAsUsed(input);
- phi->Extend(instruction_zone(), GetVirtualRegister(input));
+ phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
}
}
@@ -973,10 +1351,16 @@
switch (value->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
- if (OpParameter<size_t>(node) == 0) {
+ case IrOpcode::kInt64AddWithOverflow:
+ case IrOpcode::kInt64SubWithOverflow:
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ case IrOpcode::kTryTruncateFloat64ToUint64:
+ if (ProjectionIndexOf(node->op()) == 0u) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
- DCHECK(OpParameter<size_t>(node) == 1u);
+ DCHECK(ProjectionIndexOf(node->op()) == 1u);
MarkAsUsed(value);
}
break;
@@ -994,153 +1378,246 @@
}
-void InstructionSelector::VisitGoto(BasicBlock* target) {
- // jump to the next block.
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
OperandGenerator g(this);
- Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor = GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on some architectures it's probably better to use
+ // the code object in a register if there are multiple uses of it.
+ // Improve constant pool and the heuristics in the register allocator
+ // for where to emit constants.
+ CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
+ InitializeCallBuffer(node, &buffer, call_buffer_flags);
+
+ EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode = kArchNop;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
+ break;
+ case CallDescriptor::kLazyBailout:
+ opcode = kArchLazyBailout | MiscField::encode(flags);
+ break;
+ }
+
+ // Emit the call instruction.
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())
+ ->MarkAsCall();
}
-void InstructionSelector::VisitReturn(Node* value) {
+void InstructionSelector::VisitTailCall(Node* node) {
OperandGenerator g(this);
- if (value != NULL) {
- Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation(),
- linkage()->GetReturnType()));
+ CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+
+ int stack_param_delta = 0;
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node,
+ &stack_param_delta)) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ CallBufferFlags flags(kCallCodeImmediate | kCallTail);
+ if (IsTailCallAddressImmediate()) {
+ flags |= kCallAddressImmediate;
+ }
+ InitializeCallBuffer(node, &buffer, flags, stack_param_delta);
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ buffer.instruction_args.push_back(g.TempImmediate(stack_param_delta));
+
+ Emit(kArchPrepareTailCall, g.NoOutput(),
+ g.TempImmediate(stack_param_delta));
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
} else {
- Emit(kArchRet, NULL);
+ FrameStateDescriptor* frame_state_descriptor =
+ descriptor->NeedsFrameState()
+ ? GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())))
+ : nullptr;
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ CallBufferFlags flags = kCallCodeImmediate;
+ if (IsTailCallAddressImmediate()) {
+ flags |= kCallAddressImmediate;
+ }
+ InitializeCallBuffer(node, &buffer, flags);
+
+ EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t output_count = buffer.outputs.size();
+ auto* outputs = &buffer.outputs.front();
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())
+ ->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
}
}
+void InstructionSelector::VisitGoto(BasicBlock* target) {
+ // jump to the next block.
+ OperandGenerator g(this);
+ Emit(kArchJmp, g.NoOutput(), g.Label(target));
+}
+
+
+void InstructionSelector::VisitReturn(Node* ret) {
+ OperandGenerator g(this);
+ if (linkage()->GetIncomingDescriptor()->ReturnCount() == 0) {
+ Emit(kArchRet, g.NoOutput());
+ } else {
+ const int ret_count = ret->op()->ValueInputCount();
+ auto value_locations = zone()->NewArray<InstructionOperand>(ret_count);
+ for (int i = 0; i < ret_count; ++i) {
+ value_locations[i] =
+ g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i),
+ linkage()->GetReturnType(i).representation());
+ }
+ Emit(kArchRet, 0, nullptr, ret_count, value_locations);
+ }
+}
+
+
+void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
+ OperandGenerator g(this);
+
+ FrameStateDescriptor* desc = GetFrameStateDescriptor(value);
+
+ InstructionOperandVector args(instruction_zone());
+ args.reserve(desc->GetTotalSize() + 1); // Include deopt id.
+
+ InstructionSequence::StateId state_id =
+ sequence()->AddFrameStateDescriptor(desc);
+ args.push_back(g.TempImmediate(state_id.ToInt()));
+
+ StateObjectDeduplicator deduplicator(instruction_zone());
+
+ AddInputsToFrameStateDescriptor(desc, value, &g, &deduplicator, &args,
+ FrameStateInputKind::kAny,
+ instruction_zone());
+
+ InstructionCode opcode = kArchDeoptimize;
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ opcode |= MiscField::encode(Deoptimizer::EAGER);
+ break;
+ case DeoptimizeKind::kSoft:
+ opcode |= MiscField::encode(Deoptimizer::SOFT);
+ break;
+ }
+ Emit(opcode, 0, nullptr, args.size(), &args.front(), 0, nullptr);
+}
+
+
void InstructionSelector::VisitThrow(Node* value) {
- UNIMPLEMENTED(); // TODO(titzer)
-}
-
-
-void InstructionSelector::FillTypeVectorFromStateValues(
- ZoneVector<MachineType>* types, Node* state_values) {
- DCHECK(state_values->opcode() == IrOpcode::kStateValues);
- int count = state_values->InputCount();
- types->reserve(static_cast<size_t>(count));
- for (int i = 0; i < count; i++) {
- types->push_back(GetMachineType(state_values->InputAt(i)));
- }
+ OperandGenerator g(this);
+ Emit(kArchThrowTerminator, g.NoOutput()); // TODO(titzer)
}
FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
Node* state) {
DCHECK(state->opcode() == IrOpcode::kFrameState);
- DCHECK_EQ(5, state->InputCount());
- DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(0)->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(1)->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(2)->opcode());
- FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
+ DCHECK_EQ(kFrameStateInputCount, state->InputCount());
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(state);
- int parameters = state->InputAt(0)->InputCount();
- int locals = state->InputAt(1)->InputCount();
- int stack = state->InputAt(2)->InputCount();
+ int parameters = static_cast<int>(
+ StateValuesAccess(state->InputAt(kFrameStateParametersInput)).size());
+ int locals = static_cast<int>(
+ StateValuesAccess(state->InputAt(kFrameStateLocalsInput)).size());
+ int stack = static_cast<int>(
+ StateValuesAccess(state->InputAt(kFrameStateStackInput)).size());
- FrameStateDescriptor* outer_state = NULL;
- Node* outer_node = state->InputAt(4);
+ DCHECK_EQ(parameters, state_info.parameter_count());
+ DCHECK_EQ(locals, state_info.local_count());
+
+ FrameStateDescriptor* outer_state = nullptr;
+ Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
if (outer_node->opcode() == IrOpcode::kFrameState) {
outer_state = GetFrameStateDescriptor(outer_node);
}
return new (instruction_zone()) FrameStateDescriptor(
- instruction_zone(), state_info, parameters, locals, stack, outer_state);
+ instruction_zone(), state_info.type(), state_info.bailout_id(),
+ state_info.state_combine(), parameters, locals, stack,
+ state_info.shared_info(), outer_state);
}
-static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) {
- switch (input->opcode()) {
- case IrOpcode::kInt32Constant:
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- case IrOpcode::kHeapConstant:
- return g->UseImmediate(input);
- default:
- return g->UseUnique(input);
- }
-}
-
-
-void InstructionSelector::AddFrameStateInputs(
- Node* state, InstructionOperandVector* inputs,
- FrameStateDescriptor* descriptor) {
- DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
-
- if (descriptor->outer_state() != NULL) {
- AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
- }
-
- Node* parameters = state->InputAt(0);
- Node* locals = state->InputAt(1);
- Node* stack = state->InputAt(2);
- Node* context = state->InputAt(3);
-
- DCHECK_EQ(IrOpcode::kStateValues, parameters->op()->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, locals->op()->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, stack->op()->opcode());
-
- DCHECK_EQ(static_cast<int>(descriptor->parameters_count()),
- parameters->InputCount());
- DCHECK_EQ(static_cast<int>(descriptor->locals_count()), locals->InputCount());
- DCHECK_EQ(static_cast<int>(descriptor->stack_count()), stack->InputCount());
-
- ZoneVector<MachineType> types(instruction_zone());
- types.reserve(descriptor->GetSize());
-
- OperandGenerator g(this);
- size_t value_index = 0;
- for (int i = 0; i < static_cast<int>(descriptor->parameters_count()); i++) {
- Node* input_node = parameters->InputAt(i);
- inputs->push_back(UseOrImmediate(&g, input_node));
- descriptor->SetType(value_index++, GetMachineType(input_node));
- }
- if (descriptor->HasContext()) {
- inputs->push_back(UseOrImmediate(&g, context));
- descriptor->SetType(value_index++, kMachAnyTagged);
- }
- for (int i = 0; i < static_cast<int>(descriptor->locals_count()); i++) {
- Node* input_node = locals->InputAt(i);
- inputs->push_back(UseOrImmediate(&g, input_node));
- descriptor->SetType(value_index++, GetMachineType(input_node));
- }
- for (int i = 0; i < static_cast<int>(descriptor->stack_count()); i++) {
- Node* input_node = stack->InputAt(i);
- inputs->push_back(UseOrImmediate(&g, input_node));
- descriptor->SetType(value_index++, GetMachineType(input_node));
- }
- DCHECK(value_index == descriptor->GetSize());
-}
-
-
-#if !V8_TURBOFAN_BACKEND
-
-#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
- void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
-MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
-#undef DECLARE_UNIMPLEMENTED_SELECTOR
-
-
-void InstructionSelector::VisitCall(Node* node) { UNIMPLEMENTED(); }
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- UNIMPLEMENTED();
-}
-
-
-// static
-MachineOperatorBuilder::Flags
-InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::Flag::kNoFlags;
-}
-
-#endif // !V8_TURBOFAN_BACKEND
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 5e3c52f..52aea70 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -5,11 +5,13 @@
#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
#define V8_COMPILER_INSTRUCTION_SELECTOR_H_
-#include <deque>
+#include <map>
#include "src/compiler/common-operator.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-scheduler.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -17,69 +19,91 @@
namespace compiler {
// Forward declarations.
+class BasicBlock;
struct CallBuffer; // TODO(bmeurer): Remove this.
class FlagsContinuation;
class Linkage;
+class OperandGenerator;
+struct SwitchInfo;
-typedef IntVector NodeToVregMap;
-
-class InstructionSelector FINAL {
+// This struct connects nodes of parameters which are going to be pushed on the
+// call stack with their parameter index in the call descriptor of the callee.
+class PushParameter {
public:
- static const int kNodeUnmapped = -1;
+ PushParameter() : node_(nullptr), type_(MachineType::None()) {}
+ PushParameter(Node* node, MachineType type) : node_(node), type_(type) {}
+ Node* node() const { return node_; }
+ MachineType type() const { return type_; }
+
+ private:
+ Node* node_;
+ MachineType type_;
+};
+
+// Instruction selection generates an InstructionSequence for a given Schedule.
+class InstructionSelector final {
+ public:
// Forward declarations.
class Features;
- // TODO(dcarney): pass in vreg mapping instead of graph.
- InstructionSelector(Zone* local_zone, Graph* graph, Linkage* linkage,
- InstructionSequence* sequence, Schedule* schedule,
- SourcePositionTable* source_positions,
- Features features = SupportedFeatures());
+ enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
+
+ InstructionSelector(
+ Zone* zone, size_t node_count, Linkage* linkage,
+ InstructionSequence* sequence, Schedule* schedule,
+ SourcePositionTable* source_positions,
+ SourcePositionMode source_position_mode = kCallSourcePositions,
+ Features features = SupportedFeatures());
// Visit code for the entire graph with the included schedule.
void SelectInstructions();
+ void StartBlock(RpoNumber rpo);
+ void EndBlock(RpoNumber rpo);
+ void AddInstruction(Instruction* instr);
+
// ===========================================================================
// ============= Architecture-independent code emission methods. =============
// ===========================================================================
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- size_t temp_count = 0, InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, size_t temp_count = 0,
- InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, InstructionOperand* b,
- size_t temp_count = 0, InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, InstructionOperand* b,
- InstructionOperand* c, size_t temp_count = 0,
- InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, InstructionOperand* b,
- InstructionOperand* c, InstructionOperand* d,
- size_t temp_count = 0, InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, InstructionOperand* b,
- InstructionOperand* c, InstructionOperand* d,
- InstructionOperand* e, size_t temp_count = 0,
- InstructionOperand* *temps = NULL);
- Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
- InstructionOperand* a, InstructionOperand* b,
- InstructionOperand* c, InstructionOperand* d,
- InstructionOperand* e, InstructionOperand* f,
- size_t temp_count = 0, InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, size_t temp_count = 0,
+ InstructionOperand* temps = nullptr);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ InstructionOperand c, size_t temp_count = 0,
+ InstructionOperand* temps = nullptr);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ InstructionOperand c, InstructionOperand d,
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ InstructionOperand c, InstructionOperand d,
+ InstructionOperand e, size_t temp_count = 0,
+ InstructionOperand* temps = nullptr);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, InstructionOperand b,
+ InstructionOperand c, InstructionOperand d,
+ InstructionOperand e, InstructionOperand f,
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, size_t output_count,
- InstructionOperand** outputs, size_t input_count,
- InstructionOperand** inputs, size_t temp_count = 0,
- InstructionOperand* *temps = NULL);
+ InstructionOperand* outputs, size_t input_count,
+ InstructionOperand* inputs, size_t temp_count = 0,
+ InstructionOperand* temps = nullptr);
Instruction* Emit(Instruction* instr);
// ===========================================================================
// ============== Architecture-independent CPU feature methods. ==============
// ===========================================================================
- class Features FINAL {
+ class Features final {
public:
Features() : bits_(0) {}
explicit Features(unsigned bits) : bits_(bits) {}
@@ -126,13 +150,17 @@
bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
int GetVirtualRegister(const Node* node);
- // Gets the current mapping if it exists, kNodeUnmapped otherwise.
- int GetMappedVirtualRegister(const Node* node) const;
- const NodeToVregMap& GetNodeMapForTesting() const { return node_map_; }
+ const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
+
+ Isolate* isolate() const { return sequence()->isolate(); }
private:
friend class OperandGenerator;
+ void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
+ void EmitLookupSwitch(const SwitchInfo& sw,
+ InstructionOperand& value_operand);
+
// Inform the instruction selection that {node} was just defined.
void MarkAsDefined(Node* node);
@@ -140,25 +168,36 @@
// will need to generate code for it.
void MarkAsUsed(Node* node);
- // Checks if {node} is marked as double.
- bool IsDouble(const Node* node) const;
-
- // Inform the register allocator of a double result.
- void MarkAsDouble(Node* node);
-
- // Checks if {node} is marked as reference.
- bool IsReference(const Node* node) const;
-
- // Inform the register allocator of a reference result.
- void MarkAsReference(Node* node);
-
// Inform the register allocation of the representation of the value produced
// by {node}.
- void MarkAsRepresentation(MachineType rep, Node* node);
+ void MarkAsRepresentation(MachineRepresentation rep, Node* node);
+ void MarkAsWord32(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kWord32, node);
+ }
+ void MarkAsWord64(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kWord64, node);
+ }
+ void MarkAsFloat32(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kFloat32, node);
+ }
+ void MarkAsFloat64(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kFloat64, node);
+ }
+ void MarkAsReference(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kTagged, node);
+ }
// Inform the register allocation of the representation of the unallocated
// operand {op}.
- void MarkAsRepresentation(MachineType rep, InstructionOperand* op);
+ void MarkAsRepresentation(MachineRepresentation rep,
+ const InstructionOperand& op);
+
+ enum CallBufferFlag {
+ kCallCodeImmediate = 1u << 0,
+ kCallAddressImmediate = 1u << 1,
+ kCallTail = 1u << 2
+ };
+ typedef base::Flags<CallBufferFlag> CallBufferFlags;
// Initialize the call buffer with the InstructionOperands, nodes, etc,
// corresponding
@@ -166,15 +205,10 @@
// {call_code_immediate} to generate immediate operands to calls of code.
// {call_address_immediate} to generate immediate operands to address calls.
void InitializeCallBuffer(Node* call, CallBuffer* buffer,
- bool call_code_immediate,
- bool call_address_immediate);
+ CallBufferFlags flags, int stack_param_delta = 0);
+ bool IsTailCallAddressImmediate();
FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
- void FillTypeVectorFromStateValues(ZoneVector<MachineType>* parameters,
- Node* state_values);
- void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
- FrameStateDescriptor* descriptor);
- MachineType GetMachineType(Node* node);
// ===========================================================================
// ============= Architecture-specific graph covering methods. ===============
@@ -194,17 +228,25 @@
MACHINE_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
- void VisitFinish(Node* node);
+ void VisitFinishRegion(Node* node);
+ void VisitGuard(Node* node);
void VisitParameter(Node* node);
+ void VisitIfException(Node* node);
+ void VisitOsrValue(Node* node);
void VisitPhi(Node* node);
void VisitProjection(Node* node);
void VisitConstant(Node* node);
- void VisitCall(Node* call);
+ void VisitCall(Node* call, BasicBlock* handler = nullptr);
+ void VisitTailCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
- void VisitReturn(Node* value);
+ void VisitSwitch(Node* node, const SwitchInfo& sw);
+ void VisitDeoptimize(DeoptimizeKind kind, Node* value);
+ void VisitReturn(Node* ret);
void VisitThrow(Node* value);
- void VisitDeoptimize(Node* deopt);
+
+ void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
+ const CallDescriptor* descriptor, Node* node);
// ===========================================================================
@@ -220,13 +262,15 @@
Linkage* const linkage_;
InstructionSequence* const sequence_;
SourcePositionTable* const source_positions_;
+ SourcePositionMode const source_position_mode_;
Features features_;
Schedule* const schedule_;
- NodeToVregMap node_map_;
BasicBlock* current_block_;
- ZoneDeque<Instruction*> instructions_;
+ ZoneVector<Instruction*> instructions_;
BoolVector defined_;
BoolVector used_;
+ IntVector virtual_registers_;
+ InstructionScheduler* scheduler_;
};
} // namespace compiler
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index f83cdeb..383e27d 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -5,14 +5,80 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/state-values-utils.h"
namespace v8 {
namespace internal {
namespace compiler {
+
+FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
+ switch (condition) {
+ case kSignedLessThan:
+ return kSignedGreaterThan;
+ case kSignedGreaterThanOrEqual:
+ return kSignedLessThanOrEqual;
+ case kSignedLessThanOrEqual:
+ return kSignedGreaterThanOrEqual;
+ case kSignedGreaterThan:
+ return kSignedLessThan;
+ case kUnsignedLessThan:
+ return kUnsignedGreaterThan;
+ case kUnsignedGreaterThanOrEqual:
+ return kUnsignedLessThanOrEqual;
+ case kUnsignedLessThanOrEqual:
+ return kUnsignedGreaterThanOrEqual;
+ case kUnsignedGreaterThan:
+ return kUnsignedLessThan;
+ case kFloatLessThanOrUnordered:
+ return kFloatGreaterThanOrUnordered;
+ case kFloatGreaterThanOrEqual:
+ return kFloatLessThanOrEqual;
+ case kFloatLessThanOrEqual:
+ return kFloatGreaterThanOrEqual;
+ case kFloatGreaterThanOrUnordered:
+ return kFloatLessThanOrUnordered;
+ case kFloatLessThan:
+ return kFloatGreaterThan;
+ case kFloatGreaterThanOrEqualOrUnordered:
+ return kFloatLessThanOrEqualOrUnordered;
+ case kFloatLessThanOrEqualOrUnordered:
+ return kFloatGreaterThanOrEqualOrUnordered;
+ case kFloatGreaterThan:
+ return kFloatLessThan;
+ case kEqual:
+ case kNotEqual:
+ case kOverflow:
+ case kNotOverflow:
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ return condition;
+ }
+ UNREACHABLE();
+ return condition;
+}
+
+
+void InstructionOperand::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstructionOperand wrapper;
+ wrapper.register_configuration_ = config;
+ wrapper.op_ = *this;
+ os << wrapper << std::endl;
+}
+
+
+void InstructionOperand::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& printable) {
- const InstructionOperand& op = *printable.op_;
+ const InstructionOperand& op = printable.op_;
const RegisterConfiguration* conf = printable.register_configuration_;
switch (op.kind()) {
case InstructionOperand::UNALLOCATED: {
@@ -25,13 +91,19 @@
case UnallocatedOperand::NONE:
return os;
case UnallocatedOperand::FIXED_REGISTER:
- return os << "(=" << conf->general_register_name(
- unalloc->fixed_register_index()) << ")";
+ return os << "(="
+ << conf->GetGeneralRegisterName(
+ unalloc->fixed_register_index())
+ << ")";
case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
- return os << "(=" << conf->double_register_name(
- unalloc->fixed_register_index()) << ")";
+ return os << "(="
+ << conf->GetDoubleRegisterName(
+ unalloc->fixed_register_index())
+ << ")";
case UnallocatedOperand::MUST_HAVE_REGISTER:
return os << "(R)";
+ case UnallocatedOperand::MUST_HAVE_SLOT:
+ return os << "(S)";
case UnallocatedOperand::SAME_AS_FIRST_INPUT:
return os << "(1)";
case UnallocatedOperand::ANY:
@@ -39,58 +111,88 @@
}
}
case InstructionOperand::CONSTANT:
- return os << "[constant:" << op.index() << "]";
- case InstructionOperand::IMMEDIATE:
- return os << "[immediate:" << op.index() << "]";
- case InstructionOperand::STACK_SLOT:
- return os << "[stack:" << op.index() << "]";
- case InstructionOperand::DOUBLE_STACK_SLOT:
- return os << "[double_stack:" << op.index() << "]";
- case InstructionOperand::REGISTER:
- return os << "[" << conf->general_register_name(op.index()) << "|R]";
- case InstructionOperand::DOUBLE_REGISTER:
- return os << "[" << conf->double_register_name(op.index()) << "|R]";
+ return os << "[constant:" << ConstantOperand::cast(op).virtual_register()
+ << "]";
+ case InstructionOperand::IMMEDIATE: {
+ auto imm = ImmediateOperand::cast(op);
+ switch (imm.type()) {
+ case ImmediateOperand::INLINE:
+ return os << "#" << imm.inline_value();
+ case ImmediateOperand::INDEXED:
+ return os << "[immediate:" << imm.indexed_value() << "]";
+ }
+ }
+ case InstructionOperand::EXPLICIT:
+ case InstructionOperand::ALLOCATED: {
+ auto allocated = LocationOperand::cast(op);
+ if (op.IsStackSlot()) {
+ os << "[stack:" << LocationOperand::cast(op).index();
+ } else if (op.IsDoubleStackSlot()) {
+ os << "[double_stack:" << LocationOperand::cast(op).index();
+ } else if (op.IsRegister()) {
+ os << "[" << LocationOperand::cast(op).GetRegister().ToString() << "|R";
+ } else {
+ DCHECK(op.IsDoubleRegister());
+ os << "[" << LocationOperand::cast(op).GetDoubleRegister().ToString()
+ << "|R";
+ }
+ if (allocated.IsExplicit()) {
+ os << "|E";
+ }
+ switch (allocated.representation()) {
+ case MachineRepresentation::kNone:
+ os << "|-";
+ break;
+ case MachineRepresentation::kBit:
+ os << "|b";
+ break;
+ case MachineRepresentation::kWord8:
+ os << "|w8";
+ break;
+ case MachineRepresentation::kWord16:
+ os << "|w16";
+ break;
+ case MachineRepresentation::kWord32:
+ os << "|w32";
+ break;
+ case MachineRepresentation::kWord64:
+ os << "|w64";
+ break;
+ case MachineRepresentation::kFloat32:
+ os << "|f32";
+ break;
+ case MachineRepresentation::kFloat64:
+ os << "|f64";
+ break;
+ case MachineRepresentation::kTagged:
+ os << "|t";
+ break;
+ }
+ return os << "]";
+ }
+ case InstructionOperand::INVALID:
+ return os << "(x)";
}
UNREACHABLE();
return os;
}
-template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
-SubKindOperand<kOperandKind, kNumCachedOperands>*
- SubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
-
-
-template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
-void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
- if (cache) return;
- cache = new SubKindOperand[kNumCachedOperands];
- for (int i = 0; i < kNumCachedOperands; i++) {
- cache[i].ConvertTo(kOperandKind, i);
- }
+void MoveOperands::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstructionOperand wrapper;
+ wrapper.register_configuration_ = config;
+ wrapper.op_ = destination();
+ os << wrapper << " = ";
+ wrapper.op_ = source();
+ os << wrapper << std::endl;
}
-template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
-void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
- delete[] cache;
- cache = NULL;
-}
-
-
-void InstructionOperand::SetUpCaches() {
-#define INSTRUCTION_OPERAND_SETUP(name, type, number) \
- name##Operand::SetUpCache();
- INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_SETUP)
-#undef INSTRUCTION_OPERAND_SETUP
-}
-
-
-void InstructionOperand::TearDownCaches() {
-#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \
- name##Operand::TearDownCache();
- INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_TEARDOWN)
-#undef INSTRUCTION_OPERAND_TEARDOWN
+void MoveOperands::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
}
@@ -99,9 +201,8 @@
const MoveOperands& mo = *printable.move_operands_;
PrintableInstructionOperand printable_op = {printable.register_configuration_,
mo.destination()};
-
os << printable_op;
- if (!mo.source()->Equals(mo.destination())) {
+ if (!mo.source().Equals(mo.destination())) {
printable_op.op_ = mo.source();
os << " = " << printable_op;
}
@@ -110,29 +211,114 @@
bool ParallelMove::IsRedundant() const {
- for (int i = 0; i < move_operands_.length(); ++i) {
- if (!move_operands_[i].IsRedundant()) return false;
+ for (auto move : *this) {
+ if (!move->IsRedundant()) return false;
}
return true;
}
-bool GapInstruction::IsRedundant() const {
- for (int i = GapInstruction::FIRST_INNER_POSITION;
- i <= GapInstruction::LAST_INNER_POSITION; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant())
+MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
+ MoveOperands* replacement = nullptr;
+ MoveOperands* to_eliminate = nullptr;
+ for (auto curr : *this) {
+ if (curr->IsEliminated()) continue;
+ if (curr->destination().EqualsCanonicalized(move->source())) {
+ DCHECK(!replacement);
+ replacement = curr;
+ if (to_eliminate != nullptr) break;
+ } else if (curr->destination().EqualsCanonicalized(move->destination())) {
+ DCHECK(!to_eliminate);
+ to_eliminate = curr;
+ if (replacement != nullptr) break;
+ }
+ }
+ DCHECK_IMPLIES(replacement == to_eliminate, replacement == nullptr);
+ if (replacement != nullptr) move->set_source(replacement->source());
+ return to_eliminate;
+}
+
+
+ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep,
+ int index)
+ : LocationOperand(EXPLICIT, kind, rep, index) {
+ DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep),
+ Register::from_code(index).IsAllocatable());
+ DCHECK_IMPLIES(kind == REGISTER && IsFloatingPoint(rep),
+ DoubleRegister::from_code(index).IsAllocatable());
+}
+
+
+Instruction::Instruction(InstructionCode opcode)
+ : opcode_(opcode),
+ bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
+ TempCountField::encode(0) | IsCallField::encode(false)),
+ reference_map_(nullptr) {
+ parallel_moves_[0] = nullptr;
+ parallel_moves_[1] = nullptr;
+}
+
+
+Instruction::Instruction(InstructionCode opcode, size_t output_count,
+ InstructionOperand* outputs, size_t input_count,
+ InstructionOperand* inputs, size_t temp_count,
+ InstructionOperand* temps)
+ : opcode_(opcode),
+ bit_field_(OutputCountField::encode(output_count) |
+ InputCountField::encode(input_count) |
+ TempCountField::encode(temp_count) |
+ IsCallField::encode(false)),
+ reference_map_(nullptr) {
+ parallel_moves_[0] = nullptr;
+ parallel_moves_[1] = nullptr;
+ size_t offset = 0;
+ for (size_t i = 0; i < output_count; ++i) {
+ DCHECK(!outputs[i].IsInvalid());
+ operands_[offset++] = outputs[i];
+ }
+ for (size_t i = 0; i < input_count; ++i) {
+ DCHECK(!inputs[i].IsInvalid());
+ operands_[offset++] = inputs[i];
+ }
+ for (size_t i = 0; i < temp_count; ++i) {
+ DCHECK(!temps[i].IsInvalid());
+ operands_[offset++] = temps[i];
+ }
+}
+
+
+bool Instruction::AreMovesRedundant() const {
+ for (int i = Instruction::FIRST_GAP_POSITION;
+ i <= Instruction::LAST_GAP_POSITION; i++) {
+ if (parallel_moves_[i] != nullptr && !parallel_moves_[i]->IsRedundant()) {
return false;
+ }
}
return true;
}
+void Instruction::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstruction wrapper;
+ wrapper.instr_ = this;
+ wrapper.register_configuration_ = config;
+ os << wrapper << std::endl;
+}
+
+
+void Instruction::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
std::ostream& operator<<(std::ostream& os,
const PrintableParallelMove& printable) {
const ParallelMove& pm = *printable.parallel_move_;
bool first = true;
- for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
- move != pm.move_operands()->end(); ++move) {
+ for (auto move : pm) {
if (move->IsEliminated()) continue;
if (!first) os << " ";
first = false;
@@ -143,42 +329,28 @@
}
-void PointerMap::RecordPointer(InstructionOperand* op, Zone* zone) {
+void ReferenceMap::RecordReference(const AllocatedOperand& op) {
// Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- pointer_operands_.Add(op, zone);
+ if (op.IsStackSlot() && LocationOperand::cast(op).index() < 0) return;
+ DCHECK(!op.IsDoubleRegister() && !op.IsDoubleStackSlot());
+ reference_operands_.push_back(op);
}
-void PointerMap::RemovePointer(InstructionOperand* op) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- for (int i = 0; i < pointer_operands_.length(); ++i) {
- if (pointer_operands_[i]->Equals(op)) {
- pointer_operands_.Remove(i);
- --i;
- }
- }
-}
-
-
-void PointerMap::RecordUntagged(InstructionOperand* op, Zone* zone) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- untagged_operands_.Add(op, zone);
-}
-
-
-std::ostream& operator<<(std::ostream& os, const PointerMap& pm) {
+std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
os << "{";
- for (ZoneList<InstructionOperand*>::iterator op =
- pm.pointer_operands_.begin();
- op != pm.pointer_operands_.end(); ++op) {
- if (op != pm.pointer_operands_.begin()) os << ";";
- os << *op;
+ bool first = true;
+ PrintableInstructionOperand poi = {
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
+ InstructionOperand()};
+ for (auto& op : pm.reference_operands_) {
+ if (!first) {
+ os << ";";
+ } else {
+ first = false;
+ }
+ poi.op_ = op;
+ os << poi;
}
return os << "}";
}
@@ -248,18 +420,26 @@
return os << "unsigned less than or equal";
case kUnsignedGreaterThan:
return os << "unsigned greater than";
+ case kFloatLessThanOrUnordered:
+ return os << "less than or unordered (FP)";
+ case kFloatGreaterThanOrEqual:
+ return os << "greater than or equal (FP)";
+ case kFloatLessThanOrEqual:
+ return os << "less than or equal (FP)";
+ case kFloatGreaterThanOrUnordered:
+ return os << "greater than or unordered (FP)";
+ case kFloatLessThan:
+ return os << "less than (FP)";
+ case kFloatGreaterThanOrEqualOrUnordered:
+ return os << "greater than, equal or unordered (FP)";
+ case kFloatLessThanOrEqualOrUnordered:
+ return os << "less than, equal or unordered (FP)";
+ case kFloatGreaterThan:
+ return os << "greater than (FP)";
case kUnorderedEqual:
return os << "unordered equal";
case kUnorderedNotEqual:
return os << "unordered not equal";
- case kUnorderedLessThan:
- return os << "unordered less than";
- case kUnorderedGreaterThanOrEqual:
- return os << "unordered greater than or equal";
- case kUnorderedLessThanOrEqual:
- return os << "unordered less than or equal";
- case kUnorderedGreaterThan:
- return os << "unordered greater than";
case kOverflow:
return os << "overflow";
case kNotOverflow:
@@ -274,49 +454,42 @@
const PrintableInstruction& printable) {
const Instruction& instr = *printable.instr_;
PrintableInstructionOperand printable_op = {printable.register_configuration_,
- NULL};
+ InstructionOperand()};
+ os << "gap ";
+ for (int i = Instruction::FIRST_GAP_POSITION;
+ i <= Instruction::LAST_GAP_POSITION; i++) {
+ os << "(";
+ if (instr.parallel_moves()[i] != nullptr) {
+ PrintableParallelMove ppm = {printable.register_configuration_,
+ instr.parallel_moves()[i]};
+ os << ppm;
+ }
+ os << ") ";
+ }
+ os << "\n ";
+
if (instr.OutputCount() > 1) os << "(";
for (size_t i = 0; i < instr.OutputCount(); i++) {
if (i > 0) os << ", ";
- printable_op.op_ = instr.OutputAt(i);
+ printable_op.op_ = *instr.OutputAt(i);
os << printable_op;
}
if (instr.OutputCount() > 1) os << ") = ";
if (instr.OutputCount() == 1) os << " = ";
- if (instr.IsGapMoves()) {
- const GapInstruction* gap = GapInstruction::cast(&instr);
- os << (instr.IsBlockStart() ? " block-start" : "gap ");
- for (int i = GapInstruction::FIRST_INNER_POSITION;
- i <= GapInstruction::LAST_INNER_POSITION; i++) {
- os << "(";
- if (gap->parallel_moves_[i] != NULL) {
- PrintableParallelMove ppm = {printable.register_configuration_,
- gap->parallel_moves_[i]};
- os << ppm;
- }
- os << ") ";
- }
- } else if (instr.IsSourcePosition()) {
- const SourcePositionInstruction* pos =
- SourcePositionInstruction::cast(&instr);
- os << "position (" << pos->source_position().raw() << ")";
- } else {
- os << ArchOpcodeField::decode(instr.opcode());
- AddressingMode am = AddressingModeField::decode(instr.opcode());
- if (am != kMode_None) {
- os << " : " << AddressingModeField::decode(instr.opcode());
- }
- FlagsMode fm = FlagsModeField::decode(instr.opcode());
- if (fm != kFlags_none) {
- os << " && " << fm << " if "
- << FlagsConditionField::decode(instr.opcode());
- }
+ os << ArchOpcodeField::decode(instr.opcode());
+ AddressingMode am = AddressingModeField::decode(instr.opcode());
+ if (am != kMode_None) {
+ os << " : " << AddressingModeField::decode(instr.opcode());
+ }
+ FlagsMode fm = FlagsModeField::decode(instr.opcode());
+ if (fm != kFlags_none) {
+ os << " && " << fm << " if " << FlagsConditionField::decode(instr.opcode());
}
if (instr.InputCount() > 0) {
for (size_t i = 0; i < instr.InputCount(); i++) {
- printable_op.op_ = instr.InputAt(i);
+ printable_op.op_ = *instr.InputAt(i);
os << " " << printable_op;
}
}
@@ -324,6 +497,9 @@
}
+Constant::Constant(int32_t v) : type_(kInt32), value_(v) {}
+
+
std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) {
case Constant::kInt32:
@@ -347,26 +523,41 @@
}
-InstructionBlock::InstructionBlock(Zone* zone, BasicBlock::Id id,
- BasicBlock::RpoNumber rpo_number,
- BasicBlock::RpoNumber loop_header,
- BasicBlock::RpoNumber loop_end,
- bool deferred)
+PhiInstruction::PhiInstruction(Zone* zone, int virtual_register,
+ size_t input_count)
+ : virtual_register_(virtual_register),
+ output_(UnallocatedOperand(UnallocatedOperand::NONE, virtual_register)),
+ operands_(input_count, InstructionOperand::kInvalidVirtualRegister,
+ zone) {}
+
+
+void PhiInstruction::SetInput(size_t offset, int virtual_register) {
+ DCHECK_EQ(InstructionOperand::kInvalidVirtualRegister, operands_[offset]);
+ operands_[offset] = virtual_register;
+}
+
+
+InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
+ RpoNumber loop_header, RpoNumber loop_end,
+ bool deferred, bool handler)
: successors_(zone),
predecessors_(zone),
phis_(zone),
- id_(id),
ao_number_(rpo_number),
rpo_number_(rpo_number),
loop_header_(loop_header),
loop_end_(loop_end),
code_start_(-1),
code_end_(-1),
- deferred_(deferred) {}
+ deferred_(deferred),
+ handler_(handler),
+ needs_frame_(false),
+ must_construct_frame_(false),
+ must_deconstruct_frame_(false),
+ last_deferred_(RpoNumber::Invalid()) {}
-size_t InstructionBlock::PredecessorIndexOf(
- BasicBlock::RpoNumber rpo_number) const {
+size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
size_t j = 0;
for (InstructionBlock::Predecessors::const_iterator i = predecessors_.begin();
i != predecessors_.end(); ++i, ++j) {
@@ -376,33 +567,33 @@
}
-static BasicBlock::RpoNumber GetRpo(BasicBlock* block) {
- if (block == NULL) return BasicBlock::RpoNumber::Invalid();
- return block->GetRpoNumber();
+static RpoNumber GetRpo(const BasicBlock* block) {
+ if (block == nullptr) return RpoNumber::Invalid();
+ return RpoNumber::FromInt(block->rpo_number());
}
-static BasicBlock::RpoNumber GetLoopEndRpo(const BasicBlock* block) {
- if (!block->IsLoopHeader()) return BasicBlock::RpoNumber::Invalid();
- return block->loop_end()->GetRpoNumber();
+static RpoNumber GetLoopEndRpo(const BasicBlock* block) {
+ if (!block->IsLoopHeader()) return RpoNumber::Invalid();
+ return RpoNumber::FromInt(block->loop_end()->rpo_number());
}
static InstructionBlock* InstructionBlockFor(Zone* zone,
const BasicBlock* block) {
- InstructionBlock* instr_block = new (zone) InstructionBlock(
- zone, block->id(), block->GetRpoNumber(), GetRpo(block->loop_header()),
- GetLoopEndRpo(block), block->deferred());
+ bool is_handler =
+ !block->empty() && block->front()->opcode() == IrOpcode::kIfException;
+ InstructionBlock* instr_block = new (zone)
+ InstructionBlock(zone, GetRpo(block), GetRpo(block->loop_header()),
+ GetLoopEndRpo(block), block->deferred(), is_handler);
// Map successors and precessors
instr_block->successors().reserve(block->SuccessorCount());
- for (auto it = block->successors_begin(); it != block->successors_end();
- ++it) {
- instr_block->successors().push_back((*it)->GetRpoNumber());
+ for (BasicBlock* successor : block->successors()) {
+ instr_block->successors().push_back(GetRpo(successor));
}
instr_block->predecessors().reserve(block->PredecessorCount());
- for (auto it = block->predecessors_begin(); it != block->predecessors_end();
- ++it) {
- instr_block->predecessors().push_back((*it)->GetRpoNumber());
+ for (BasicBlock* predecessor : block->predecessors()) {
+ instr_block->predecessors().push_back(GetRpo(predecessor));
}
return instr_block;
}
@@ -412,12 +603,12 @@
Zone* zone, const Schedule* schedule) {
InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
new (blocks) InstructionBlocks(
- static_cast<int>(schedule->rpo_order()->size()), NULL, zone);
+ static_cast<int>(schedule->rpo_order()->size()), nullptr, zone);
size_t rpo_number = 0;
for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin();
it != schedule->rpo_order()->end(); ++it, ++rpo_number) {
- DCHECK_EQ(NULL, (*blocks)[rpo_number]);
- DCHECK((*it)->GetRpoNumber().ToSize() == rpo_number);
+ DCHECK(!(*blocks)[rpo_number]);
+ DCHECK(GetRpo(*it).ToSize() == rpo_number);
(*blocks)[rpo_number] = InstructionBlockFor(zone, *it);
}
ComputeAssemblyOrder(blocks);
@@ -429,86 +620,95 @@
int ao = 0;
for (auto const block : *blocks) {
if (!block->IsDeferred()) {
- block->set_ao_number(BasicBlock::RpoNumber::FromInt(ao++));
+ block->set_ao_number(RpoNumber::FromInt(ao++));
}
}
for (auto const block : *blocks) {
if (block->IsDeferred()) {
- block->set_ao_number(BasicBlock::RpoNumber::FromInt(ao++));
+ block->set_ao_number(RpoNumber::FromInt(ao++));
}
}
}
-InstructionSequence::InstructionSequence(Zone* instruction_zone,
+InstructionSequence::InstructionSequence(Isolate* isolate,
+ Zone* instruction_zone,
InstructionBlocks* instruction_blocks)
- : zone_(instruction_zone),
+ : isolate_(isolate),
+ zone_(instruction_zone),
instruction_blocks_(instruction_blocks),
+ source_positions_(zone()),
block_starts_(zone()),
constants_(ConstantMap::key_compare(),
ConstantMap::allocator_type(zone())),
immediates_(zone()),
instructions_(zone()),
next_virtual_register_(0),
- pointer_maps_(zone()),
- doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
- references_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
+ reference_maps_(zone()),
+ representations_(zone()),
deoptimization_entries_(zone()) {
block_starts_.reserve(instruction_blocks_->size());
}
-BlockStartInstruction* InstructionSequence::GetBlockStart(
- BasicBlock::RpoNumber rpo) const {
- const InstructionBlock* block = InstructionBlockAt(rpo);
- return BlockStartInstruction::cast(InstructionAt(block->code_start()));
+int InstructionSequence::NextVirtualRegister() {
+ int virtual_register = next_virtual_register_++;
+ CHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
+ return virtual_register;
}
-void InstructionSequence::StartBlock(BasicBlock::RpoNumber rpo) {
+Instruction* InstructionSequence::GetBlockStart(RpoNumber rpo) const {
+ const InstructionBlock* block = InstructionBlockAt(rpo);
+ return InstructionAt(block->code_start());
+}
+
+
+void InstructionSequence::StartBlock(RpoNumber rpo) {
DCHECK(block_starts_.size() == rpo.ToSize());
InstructionBlock* block = InstructionBlockAt(rpo);
int code_start = static_cast<int>(instructions_.size());
block->set_code_start(code_start);
block_starts_.push_back(code_start);
- BlockStartInstruction* block_start = BlockStartInstruction::New(zone());
- AddInstruction(block_start);
}
-void InstructionSequence::EndBlock(BasicBlock::RpoNumber rpo) {
+void InstructionSequence::EndBlock(RpoNumber rpo) {
int end = static_cast<int>(instructions_.size());
InstructionBlock* block = InstructionBlockAt(rpo);
+ if (block->code_start() == end) { // Empty block. Insert a nop.
+ AddInstruction(Instruction::New(zone(), kArchNop));
+ end = static_cast<int>(instructions_.size());
+ }
DCHECK(block->code_start() >= 0 && block->code_start() < end);
block->set_code_end(end);
}
int InstructionSequence::AddInstruction(Instruction* instr) {
- // TODO(titzer): the order of these gaps is a holdover from Lithium.
- GapInstruction* gap = GapInstruction::New(zone());
- if (instr->IsControl()) instructions_.push_back(gap);
int index = static_cast<int>(instructions_.size());
instructions_.push_back(instr);
- if (!instr->IsControl()) instructions_.push_back(gap);
- if (instr->NeedsPointerMap()) {
- DCHECK(instr->pointer_map() == NULL);
- PointerMap* pointer_map = new (zone()) PointerMap(zone());
- pointer_map->set_instruction_position(index);
- instr->set_pointer_map(pointer_map);
- pointer_maps_.push_back(pointer_map);
+ if (instr->NeedsReferenceMap()) {
+ DCHECK(instr->reference_map() == nullptr);
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ reference_map->set_instruction_position(index);
+ instr->set_reference_map(reference_map);
+ reference_maps_.push_back(reference_map);
}
return index;
}
-const InstructionBlock* InstructionSequence::GetInstructionBlock(
+InstructionBlock* InstructionSequence::GetInstructionBlock(
int instruction_index) const {
DCHECK(instruction_blocks_->size() == block_starts_.size());
auto begin = block_starts_.begin();
- auto end = std::lower_bound(begin, block_starts_.end(), instruction_index,
- std::less_equal<int>());
- size_t index = std::distance(begin, end) - 1;
+ auto end = std::lower_bound(begin, block_starts_.end(), instruction_index);
+ // Post condition of std::lower_bound:
+ DCHECK(end == block_starts_.end() || *end >= instruction_index);
+ if (end == block_starts_.end() || *end > instruction_index) --end;
+ DCHECK(*end <= instruction_index);
+ size_t index = std::distance(begin, end);
auto block = instruction_blocks_->at(index);
DCHECK(block->code_start() <= instruction_index &&
instruction_index < block->code_end());
@@ -516,30 +716,48 @@
}
-bool InstructionSequence::IsReference(int virtual_register) const {
- return references_.find(virtual_register) != references_.end();
+static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ return InstructionSequence::DefaultRepresentation();
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kTagged:
+ return rep;
+ case MachineRepresentation::kNone:
+ break;
+ }
+ UNREACHABLE();
+ return MachineRepresentation::kNone;
}
-bool InstructionSequence::IsDouble(int virtual_register) const {
- return doubles_.find(virtual_register) != doubles_.end();
+MachineRepresentation InstructionSequence::GetRepresentation(
+ int virtual_register) const {
+ DCHECK_LE(0, virtual_register);
+ DCHECK_LT(virtual_register, VirtualRegisterCount());
+ if (virtual_register >= static_cast<int>(representations_.size())) {
+ return DefaultRepresentation();
+ }
+ return representations_[virtual_register];
}
-void InstructionSequence::MarkAsReference(int virtual_register) {
- references_.insert(virtual_register);
-}
-
-
-void InstructionSequence::MarkAsDouble(int virtual_register) {
- doubles_.insert(virtual_register);
-}
-
-
-void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
- InstructionOperand* to) {
- GapAt(index)->GetOrCreateParallelMove(GapInstruction::START, zone())->AddMove(
- from, to, zone());
+void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
+ int virtual_register) {
+ DCHECK_LE(0, virtual_register);
+ DCHECK_LT(virtual_register, VirtualRegisterCount());
+ if (virtual_register >= static_cast<int>(representations_.size())) {
+ representations_.resize(VirtualRegisterCount(), DefaultRepresentation());
+ }
+ rep = FilterRepresentation(rep);
+ DCHECK_IMPLIES(representations_[virtual_register] != rep,
+ representations_[virtual_register] == DefaultRepresentation());
+ representations_[virtual_register] = rep;
}
@@ -561,23 +779,66 @@
}
+RpoNumber InstructionSequence::InputRpo(Instruction* instr, size_t index) {
+ InstructionOperand* operand = instr->InputAt(index);
+ Constant constant =
+ operand->IsImmediate()
+ ? GetImmediate(ImmediateOperand::cast(operand))
+ : GetConstant(ConstantOperand::cast(operand)->virtual_register());
+ return constant.ToRpoNumber();
+}
+
+
+bool InstructionSequence::GetSourcePosition(const Instruction* instr,
+ SourcePosition* result) const {
+ auto it = source_positions_.find(instr);
+ if (it == source_positions_.end()) return false;
+ *result = it->second;
+ return true;
+}
+
+
+void InstructionSequence::SetSourcePosition(const Instruction* instr,
+ SourcePosition value) {
+ source_positions_.insert(std::make_pair(instr, value));
+}
+
+
+void InstructionSequence::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstructionSequence wrapper;
+ wrapper.register_configuration_ = config;
+ wrapper.sequence_ = this;
+ os << wrapper << std::endl;
+}
+
+
+void InstructionSequence::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
FrameStateDescriptor::FrameStateDescriptor(
- Zone* zone, const FrameStateCallInfo& state_info, size_t parameters_count,
- size_t locals_count, size_t stack_count, FrameStateDescriptor* outer_state)
- : type_(state_info.type()),
- bailout_id_(state_info.bailout_id()),
- frame_state_combine_(state_info.state_combine()),
+ Zone* zone, FrameStateType type, BailoutId bailout_id,
+ OutputFrameStateCombine state_combine, size_t parameters_count,
+ size_t locals_count, size_t stack_count,
+ MaybeHandle<SharedFunctionInfo> shared_info,
+ FrameStateDescriptor* outer_state)
+ : type_(type),
+ bailout_id_(bailout_id),
+ frame_state_combine_(state_combine),
parameters_count_(parameters_count),
locals_count_(locals_count),
stack_count_(stack_count),
- types_(zone),
- outer_state_(outer_state),
- jsfunction_(state_info.jsfunction()) {
- types_.resize(GetSize(), kMachNone);
-}
+ values_(zone),
+ shared_info_(shared_info),
+ outer_state_(outer_state) {}
+
size_t FrameStateDescriptor::GetSize(OutputFrameStateCombine combine) const {
- size_t size = parameters_count() + locals_count() + stack_count() +
+ size_t size = 1 + parameters_count() + locals_count() + stack_count() +
(HasContext() ? 1 : 0);
switch (combine.kind()) {
case OutputFrameStateCombine::kPushOutput:
@@ -592,7 +853,7 @@
size_t FrameStateDescriptor::GetTotalSize() const {
size_t total_size = 0;
- for (const FrameStateDescriptor* iter = this; iter != NULL;
+ for (const FrameStateDescriptor* iter = this; iter != nullptr;
iter = iter->outer_state_) {
total_size += iter->GetSize();
}
@@ -602,7 +863,7 @@
size_t FrameStateDescriptor::GetFrameCount() const {
size_t count = 0;
- for (const FrameStateDescriptor* iter = this; iter != NULL;
+ for (const FrameStateDescriptor* iter = this; iter != nullptr;
iter = iter->outer_state_) {
++count;
}
@@ -612,9 +873,9 @@
size_t FrameStateDescriptor::GetJSFrameCount() const {
size_t count = 0;
- for (const FrameStateDescriptor* iter = this; iter != NULL;
+ for (const FrameStateDescriptor* iter = this; iter != nullptr;
iter = iter->outer_state_) {
- if (iter->type_ == JS_FRAME) {
+ if (FrameStateFunctionInfo::IsJSFunctionType(iter->type_)) {
++count;
}
}
@@ -622,14 +883,8 @@
}
-MachineType FrameStateDescriptor::GetType(size_t index) const {
- return types_[index];
-}
-
-
-void FrameStateDescriptor::SetType(size_t index, MachineType type) {
- DCHECK(index < GetSize());
- types_[index] = type;
+std::ostream& operator<<(std::ostream& os, const RpoNumber& rpo) {
+ return os << rpo.ToSize();
}
@@ -646,14 +901,16 @@
os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
}
for (int i = 0; i < code.InstructionBlockCount(); i++) {
- BasicBlock::RpoNumber rpo = BasicBlock::RpoNumber::FromInt(i);
+ RpoNumber rpo = RpoNumber::FromInt(i);
const InstructionBlock* block = code.InstructionBlockAt(rpo);
CHECK(block->rpo_number() == rpo);
- os << "RPO#" << block->rpo_number();
+ os << "B" << block->rpo_number();
os << ": AO#" << block->ao_number();
- os << ": B" << block->id();
if (block->IsDeferred()) os << " (deferred)";
+ if (!block->needs_frame()) os << " (no frame)";
+ if (block->must_construct_frame()) os << " (construct frame)";
+ if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
if (block->IsLoopHeader()) {
os << " loop blocks: [" << block->rpo_number() << ", "
<< block->loop_end() << ")";
@@ -662,8 +919,7 @@
<< block->code_end() << ")\n predecessors:";
for (auto pred : block->predecessors()) {
- const InstructionBlock* pred_block = code.InstructionBlockAt(pred);
- os << " B" << pred_block->id();
+ os << " B" << pred.ToInt();
}
os << "\n";
@@ -671,9 +927,8 @@
PrintableInstructionOperand printable_op = {
printable.register_configuration_, phi->output()};
os << " phi: " << printable_op << " =";
- for (auto input : phi->inputs()) {
- printable_op.op_ = input;
- os << " " << printable_op;
+ for (auto input : phi->operands()) {
+ os << " v" << input;
}
os << "\n";
}
@@ -690,8 +945,7 @@
}
for (auto succ : block->successors()) {
- const InstructionBlock* succ_block = code.InstructionBlockAt(succ);
- os << " B" << succ_block->id();
+ os << " B" << succ.ToInt();
}
os << "\n";
}
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index daa83f2..8a6a0ae 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -14,80 +14,133 @@
#include "src/compiler/frame.h"
#include "src/compiler/instruction-codes.h"
#include "src/compiler/opcodes.h"
-#include "src/compiler/register-configuration.h"
-#include "src/compiler/schedule.h"
#include "src/compiler/source-position.h"
+#include "src/macro-assembler.h"
+#include "src/register-configuration.h"
#include "src/zone-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
-// A couple of reserved opcodes are used for internal use.
-const InstructionCode kGapInstruction = -1;
-const InstructionCode kBlockStartInstruction = -2;
-const InstructionCode kSourcePositionInstruction = -3;
+// Forward declarations.
+class Schedule;
-#define INSTRUCTION_OPERAND_LIST(V) \
- V(Constant, CONSTANT, 0) \
- V(Immediate, IMMEDIATE, 0) \
- V(StackSlot, STACK_SLOT, 128) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
- V(Register, REGISTER, RegisterConfiguration::kMaxGeneralRegisters) \
- V(DoubleRegister, DOUBLE_REGISTER, RegisterConfiguration::kMaxDoubleRegisters)
-class InstructionOperand : public ZoneObject {
+class InstructionOperand {
public:
- enum Kind {
- UNALLOCATED,
- CONSTANT,
- IMMEDIATE,
- STACK_SLOT,
- DOUBLE_STACK_SLOT,
- REGISTER,
- DOUBLE_REGISTER
- };
+ static const int kInvalidVirtualRegister = -1;
- InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); }
+ // TODO(dcarney): recover bit. INVALID can be represented as UNALLOCATED with
+ // kInvalidVirtualRegister and some DCHECKS.
+ enum Kind { INVALID, UNALLOCATED, CONSTANT, IMMEDIATE, EXPLICIT, ALLOCATED };
+
+ InstructionOperand() : InstructionOperand(INVALID) {}
Kind kind() const { return KindField::decode(value_); }
- int index() const { return static_cast<int>(value_) >> KindField::kSize; }
-#define INSTRUCTION_OPERAND_PREDICATE(name, type, number) \
+
+#define INSTRUCTION_OPERAND_PREDICATE(name, type) \
bool Is##name() const { return kind() == type; }
- INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
- INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+ INSTRUCTION_OPERAND_PREDICATE(Invalid, INVALID)
+ // UnallocatedOperands are place-holder operands created before register
+ // allocation. They later are assigned registers and become AllocatedOperands.
+ INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
+ // Constant operands participate in register allocation. They are allocated to
+ // registers but have a special "spilling" behavior. When a ConstantOperand
+ // value must be rematerialized, it is loaded from an immediate constant
+ // rather from an unspilled slot.
+ INSTRUCTION_OPERAND_PREDICATE(Constant, CONSTANT)
+ // ImmediateOperands do not participate in register allocation and are only
+ // embedded directly in instructions, e.g. small integers and on some
+ // platforms Objects.
+ INSTRUCTION_OPERAND_PREDICATE(Immediate, IMMEDIATE)
+ // ExplicitOperands do not participate in register allocation. They are
+ // created by the instruction selector for direct access to registers and
+ // stack slots, completely bypassing the register allocator. They are never
+ // associated with a virtual register
+ INSTRUCTION_OPERAND_PREDICATE(Explicit, EXPLICIT)
+ // AllocatedOperands are registers or stack slots that are assigned by the
+ // register allocator and are always associated with a virtual register.
+ INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED)
#undef INSTRUCTION_OPERAND_PREDICATE
- bool Equals(const InstructionOperand* other) const {
- return value_ == other->value_;
+
+ inline bool IsAnyRegister() const;
+ inline bool IsRegister() const;
+ inline bool IsDoubleRegister() const;
+ inline bool IsStackSlot() const;
+ inline bool IsDoubleStackSlot() const;
+
+ template <typename SubKindOperand>
+ static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
+ void* buffer = zone->New(sizeof(op));
+ return new (buffer) SubKindOperand(op);
}
- void ConvertTo(Kind kind, int index) {
- if (kind == REGISTER || kind == DOUBLE_REGISTER) DCHECK(index >= 0);
- value_ = KindField::encode(kind);
- value_ |= bit_cast<unsigned>(index << KindField::kSize);
- DCHECK(this->index() == index);
+ static void ReplaceWith(InstructionOperand* dest,
+ const InstructionOperand* src) {
+ *dest = *src;
}
- // Calls SetUpCache()/TearDownCache() for each subclass.
- static void SetUpCaches();
- static void TearDownCaches();
+ bool Equals(const InstructionOperand& that) const {
+ return this->value_ == that.value_;
+ }
+
+ bool Compare(const InstructionOperand& that) const {
+ return this->value_ < that.value_;
+ }
+
+ bool EqualsCanonicalized(const InstructionOperand& that) const {
+ return this->GetCanonicalizedValue() == that.GetCanonicalizedValue();
+ }
+
+ bool CompareCanonicalized(const InstructionOperand& that) const {
+ return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
+ }
+
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
protected:
- typedef BitField64<Kind, 0, 3> KindField;
+ explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
+
+ inline uint64_t GetCanonicalizedValue() const;
+
+ class KindField : public BitField64<Kind, 0, 3> {};
uint64_t value_;
};
-typedef ZoneVector<InstructionOperand*> InstructionOperandVector;
+
+typedef ZoneVector<InstructionOperand> InstructionOperandVector;
+
struct PrintableInstructionOperand {
const RegisterConfiguration* register_configuration_;
- const InstructionOperand* op_;
+ InstructionOperand op_;
};
+
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& op);
+
+#define INSTRUCTION_OPERAND_CASTS(OperandType, OperandKind) \
+ \
+ static OperandType* cast(InstructionOperand* op) { \
+ DCHECK_EQ(OperandKind, op->kind()); \
+ return static_cast<OperandType*>(op); \
+ } \
+ \
+ static const OperandType* cast(const InstructionOperand* op) { \
+ DCHECK_EQ(OperandKind, op->kind()); \
+ return static_cast<const OperandType*>(op); \
+ } \
+ \
+ static OperandType cast(const InstructionOperand& op) { \
+ DCHECK_EQ(OperandKind, op.kind()); \
+ return *static_cast<const OperandType*>(&op); \
+ }
+
class UnallocatedOperand : public InstructionOperand {
public:
enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY };
@@ -98,6 +151,7 @@
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
MUST_HAVE_REGISTER,
+ MUST_HAVE_SLOT,
SAME_AS_FIRST_INPUT
};
@@ -115,96 +169,44 @@
USED_AT_END
};
- explicit UnallocatedOperand(ExtendedPolicy policy)
- : InstructionOperand(UNALLOCATED, 0) {
- value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
+ UnallocatedOperand(ExtendedPolicy policy, int virtual_register)
+ : UnallocatedOperand(virtual_register) {
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(USED_AT_END);
}
- UnallocatedOperand(BasicPolicy policy, int index)
- : InstructionOperand(UNALLOCATED, 0) {
+ UnallocatedOperand(BasicPolicy policy, int index, int virtual_register)
+ : UnallocatedOperand(virtual_register) {
DCHECK(policy == FIXED_SLOT);
- value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
value_ |= BasicPolicyField::encode(policy);
value_ |= static_cast<int64_t>(index) << FixedSlotIndexField::kShift;
DCHECK(this->fixed_slot_index() == index);
}
- UnallocatedOperand(ExtendedPolicy policy, int index)
- : InstructionOperand(UNALLOCATED, 0) {
+ UnallocatedOperand(ExtendedPolicy policy, int index, int virtual_register)
+ : UnallocatedOperand(virtual_register) {
DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
- value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(USED_AT_END);
value_ |= FixedRegisterField::encode(index);
}
- UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
- : InstructionOperand(UNALLOCATED, 0) {
- value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
+ UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime,
+ int virtual_register)
+ : UnallocatedOperand(virtual_register) {
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(lifetime);
}
- UnallocatedOperand* CopyUnconstrained(Zone* zone) {
- UnallocatedOperand* result = new (zone) UnallocatedOperand(ANY);
- result->set_virtual_register(virtual_register());
- return result;
+ UnallocatedOperand(int reg_id, int slot_id, int virtual_register)
+ : UnallocatedOperand(FIXED_REGISTER, reg_id, virtual_register) {
+ value_ |= HasSecondaryStorageField::encode(true);
+ value_ |= SecondaryStorageField::encode(slot_id);
}
- static const UnallocatedOperand* cast(const InstructionOperand* op) {
- DCHECK(op->IsUnallocated());
- return static_cast<const UnallocatedOperand*>(op);
- }
-
- static UnallocatedOperand* cast(InstructionOperand* op) {
- DCHECK(op->IsUnallocated());
- return static_cast<UnallocatedOperand*>(op);
- }
-
- // The encoding used for UnallocatedOperand operands depends on the policy
- // that is
- // stored within the operand. The FIXED_SLOT policy uses a compact encoding
- // because it accommodates a larger pay-load.
- //
- // For FIXED_SLOT policy:
- // +------------------------------------------+
- // | slot_index | vreg | 0 | 001 |
- // +------------------------------------------+
- //
- // For all other (extended) policies:
- // +------------------------------------------+
- // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
- // +------------------------------------------+ P ... Policy
- //
- // The slot index is a signed value which requires us to decode it manually
- // instead of using the BitField64 utility class.
-
- // The superclass has a KindField.
- STATIC_ASSERT(KindField::kSize == 3);
-
- // BitFields for all unallocated operands.
- class BasicPolicyField : public BitField64<BasicPolicy, 3, 1> {};
- class VirtualRegisterField : public BitField64<unsigned, 4, 30> {};
-
- // BitFields specific to BasicPolicy::FIXED_SLOT.
- class FixedSlotIndexField : public BitField64<int, 34, 30> {};
-
- // BitFields specific to BasicPolicy::EXTENDED_POLICY.
- class ExtendedPolicyField : public BitField64<ExtendedPolicy, 34, 3> {};
- class LifetimeField : public BitField64<Lifetime, 37, 1> {};
- class FixedRegisterField : public BitField64<int, 38, 6> {};
-
- static const int kInvalidVirtualRegister = VirtualRegisterField::kMax;
- static const int kMaxVirtualRegisters = VirtualRegisterField::kMax;
- static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
- static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
- static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
-
// Predicates for the operand policy.
bool HasAnyPolicy() const {
return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
@@ -218,6 +220,10 @@
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == MUST_HAVE_REGISTER;
}
+ bool HasSlotPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == MUST_HAVE_SLOT;
+ }
bool HasSameAsInputPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == SAME_AS_FIRST_INPUT;
@@ -231,9 +237,21 @@
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == FIXED_DOUBLE_REGISTER;
}
+ bool HasSecondaryStorage() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_REGISTER &&
+ HasSecondaryStorageField::decode(value_);
+ }
+ int GetSecondaryStorage() const {
+ DCHECK(HasSecondaryStorage());
+ return SecondaryStorageField::decode(value_);
+ }
// [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
- BasicPolicy basic_policy() const { return BasicPolicyField::decode(value_); }
+ BasicPolicy basic_policy() const {
+ DCHECK_EQ(UNALLOCATED, kind());
+ return BasicPolicyField::decode(value_);
+ }
// [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
ExtendedPolicy extended_policy() const {
@@ -244,7 +262,7 @@
// [fixed_slot_index]: Only for FIXED_SLOT.
int fixed_slot_index() const {
DCHECK(HasFixedSlotPolicy());
- return static_cast<int>(bit_cast<int64_t>(value_) >>
+ return static_cast<int>(static_cast<int64_t>(value_) >>
FixedSlotIndexField::kShift);
}
@@ -255,9 +273,15 @@
}
// [virtual_register]: The virtual register ID for this operand.
- int virtual_register() const { return VirtualRegisterField::decode(value_); }
- void set_virtual_register(unsigned id) {
- value_ = VirtualRegisterField::update(value_, id);
+ int32_t virtual_register() const {
+ DCHECK_EQ(UNALLOCATED, kind());
+ return static_cast<int32_t>(VirtualRegisterField::decode(value_));
+ }
+
+ // TODO(dcarney): remove this.
+ void set_virtual_register(int32_t id) {
+ DCHECK_EQ(UNALLOCATED, kind());
+ value_ = VirtualRegisterField::update(value_, static_cast<uint32_t>(id));
}
// [lifetime]: Only for non-FIXED_SLOT.
@@ -265,46 +289,330 @@
DCHECK(basic_policy() == EXTENDED_POLICY);
return LifetimeField::decode(value_) == USED_AT_START;
}
+
+ INSTRUCTION_OPERAND_CASTS(UnallocatedOperand, UNALLOCATED);
+
+ // The encoding used for UnallocatedOperand operands depends on the policy
+ // that is
+ // stored within the operand. The FIXED_SLOT policy uses a compact encoding
+ // because it accommodates a larger pay-load.
+ //
+ // For FIXED_SLOT policy:
+ // +------------------------------------------------+
+ // | slot_index | 0 | virtual_register | 001 |
+ // +------------------------------------------------+
+ //
+ // For all other (extended) policies:
+ // +-----------------------------------------------------+
+ // | reg_index | L | PPP | 1 | virtual_register | 001 |
+ // +-----------------------------------------------------+
+ // L ... Lifetime
+ // P ... Policy
+ //
+ // The slot index is a signed value which requires us to decode it manually
+ // instead of using the BitField utility class.
+
+ STATIC_ASSERT(KindField::kSize == 3);
+
+ class VirtualRegisterField : public BitField64<uint32_t, 3, 32> {};
+
+ // BitFields for all unallocated operands.
+ class BasicPolicyField : public BitField64<BasicPolicy, 35, 1> {};
+
+ // BitFields specific to BasicPolicy::FIXED_SLOT.
+ class FixedSlotIndexField : public BitField64<int, 36, 28> {};
+
+ // BitFields specific to BasicPolicy::EXTENDED_POLICY.
+ class ExtendedPolicyField : public BitField64<ExtendedPolicy, 36, 3> {};
+ class LifetimeField : public BitField64<Lifetime, 39, 1> {};
+ class HasSecondaryStorageField : public BitField64<bool, 40, 1> {};
+ class FixedRegisterField : public BitField64<int, 41, 6> {};
+ class SecondaryStorageField : public BitField64<int, 47, 3> {};
+
+ private:
+ explicit UnallocatedOperand(int virtual_register)
+ : InstructionOperand(UNALLOCATED) {
+ value_ |=
+ VirtualRegisterField::encode(static_cast<uint32_t>(virtual_register));
+ }
};
-class MoveOperands FINAL {
+class ConstantOperand : public InstructionOperand {
public:
- MoveOperands(InstructionOperand* source, InstructionOperand* destination)
- : source_(source), destination_(destination) {}
+ explicit ConstantOperand(int virtual_register)
+ : InstructionOperand(CONSTANT) {
+ value_ |=
+ VirtualRegisterField::encode(static_cast<uint32_t>(virtual_register));
+ }
- InstructionOperand* source() const { return source_; }
- void set_source(InstructionOperand* operand) { source_ = operand; }
+ int32_t virtual_register() const {
+ return static_cast<int32_t>(VirtualRegisterField::decode(value_));
+ }
- InstructionOperand* destination() const { return destination_; }
- void set_destination(InstructionOperand* operand) { destination_ = operand; }
+ static ConstantOperand* New(Zone* zone, int virtual_register) {
+ return InstructionOperand::New(zone, ConstantOperand(virtual_register));
+ }
+
+ INSTRUCTION_OPERAND_CASTS(ConstantOperand, CONSTANT);
+
+ STATIC_ASSERT(KindField::kSize == 3);
+ class VirtualRegisterField : public BitField64<uint32_t, 3, 32> {};
+};
+
+
+class ImmediateOperand : public InstructionOperand {
+ public:
+ enum ImmediateType { INLINE, INDEXED };
+
+ explicit ImmediateOperand(ImmediateType type, int32_t value)
+ : InstructionOperand(IMMEDIATE) {
+ value_ |= TypeField::encode(type);
+ value_ |= static_cast<int64_t>(value) << ValueField::kShift;
+ }
+
+ ImmediateType type() const { return TypeField::decode(value_); }
+
+ int32_t inline_value() const {
+ DCHECK_EQ(INLINE, type());
+ return static_cast<int64_t>(value_) >> ValueField::kShift;
+ }
+
+ int32_t indexed_value() const {
+ DCHECK_EQ(INDEXED, type());
+ return static_cast<int64_t>(value_) >> ValueField::kShift;
+ }
+
+ static ImmediateOperand* New(Zone* zone, ImmediateType type, int32_t value) {
+ return InstructionOperand::New(zone, ImmediateOperand(type, value));
+ }
+
+ INSTRUCTION_OPERAND_CASTS(ImmediateOperand, IMMEDIATE);
+
+ STATIC_ASSERT(KindField::kSize == 3);
+ class TypeField : public BitField64<ImmediateType, 3, 1> {};
+ class ValueField : public BitField64<int32_t, 32, 32> {};
+};
+
+
+class LocationOperand : public InstructionOperand {
+ public:
+ enum LocationKind { REGISTER, STACK_SLOT };
+
+ LocationOperand(InstructionOperand::Kind operand_kind,
+ LocationOperand::LocationKind location_kind,
+ MachineRepresentation rep, int index)
+ : InstructionOperand(operand_kind) {
+ DCHECK_IMPLIES(location_kind == REGISTER, index >= 0);
+ DCHECK(IsSupportedRepresentation(rep));
+ value_ |= LocationKindField::encode(location_kind);
+ value_ |= RepresentationField::encode(rep);
+ value_ |= static_cast<int64_t>(index) << IndexField::kShift;
+ }
+
+ int index() const {
+ DCHECK(IsStackSlot() || IsDoubleStackSlot());
+ return static_cast<int64_t>(value_) >> IndexField::kShift;
+ }
+
+ Register GetRegister() const {
+ DCHECK(IsRegister());
+ return Register::from_code(static_cast<int64_t>(value_) >>
+ IndexField::kShift);
+ }
+
+ DoubleRegister GetDoubleRegister() const {
+ DCHECK(IsDoubleRegister());
+ return DoubleRegister::from_code(static_cast<int64_t>(value_) >>
+ IndexField::kShift);
+ }
+
+ LocationKind location_kind() const {
+ return LocationKindField::decode(value_);
+ }
+
+ MachineRepresentation representation() const {
+ return RepresentationField::decode(value_);
+ }
+
+ static bool IsSupportedRepresentation(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kTagged:
+ return true;
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kNone:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ static LocationOperand* cast(InstructionOperand* op) {
+ DCHECK(ALLOCATED == op->kind() || EXPLICIT == op->kind());
+ return static_cast<LocationOperand*>(op);
+ }
+
+ static const LocationOperand* cast(const InstructionOperand* op) {
+ DCHECK(ALLOCATED == op->kind() || EXPLICIT == op->kind());
+ return static_cast<const LocationOperand*>(op);
+ }
+
+ static LocationOperand cast(const InstructionOperand& op) {
+ DCHECK(ALLOCATED == op.kind() || EXPLICIT == op.kind());
+ return *static_cast<const LocationOperand*>(&op);
+ }
+
+ STATIC_ASSERT(KindField::kSize == 3);
+ class LocationKindField : public BitField64<LocationKind, 3, 2> {};
+ class RepresentationField : public BitField64<MachineRepresentation, 5, 8> {};
+ class IndexField : public BitField64<int32_t, 35, 29> {};
+};
+
+
+class ExplicitOperand : public LocationOperand {
+ public:
+ ExplicitOperand(LocationKind kind, MachineRepresentation rep, int index);
+
+ static ExplicitOperand* New(Zone* zone, LocationKind kind,
+ MachineRepresentation rep, int index) {
+ return InstructionOperand::New(zone, ExplicitOperand(kind, rep, index));
+ }
+
+ INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT);
+};
+
+
+class AllocatedOperand : public LocationOperand {
+ public:
+ AllocatedOperand(LocationKind kind, MachineRepresentation rep, int index)
+ : LocationOperand(ALLOCATED, kind, rep, index) {}
+
+ static AllocatedOperand* New(Zone* zone, LocationKind kind,
+ MachineRepresentation rep, int index) {
+ return InstructionOperand::New(zone, AllocatedOperand(kind, rep, index));
+ }
+
+ INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
+};
+
+
+#undef INSTRUCTION_OPERAND_CASTS
+
+
+bool InstructionOperand::IsAnyRegister() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::REGISTER;
+}
+
+
+bool InstructionOperand::IsRegister() const {
+ return IsAnyRegister() &&
+ !IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
+
+bool InstructionOperand::IsDoubleRegister() const {
+ return IsAnyRegister() &&
+ IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
+
+bool InstructionOperand::IsStackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ !IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
+
+bool InstructionOperand::IsDoubleStackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
+
+uint64_t InstructionOperand::GetCanonicalizedValue() const {
+ if (IsAllocated() || IsExplicit()) {
+ // TODO(dcarney): put machine type last and mask.
+ MachineRepresentation canonicalized_representation =
+ IsFloatingPoint(LocationOperand::cast(this)->representation())
+ ? MachineRepresentation::kFloat64
+ : MachineRepresentation::kNone;
+ return InstructionOperand::KindField::update(
+ LocationOperand::RepresentationField::update(
+ this->value_, canonicalized_representation),
+ LocationOperand::EXPLICIT);
+ }
+ return this->value_;
+}
+
+
+// Required for maps that don't care about machine type.
+struct CompareOperandModuloType {
+ bool operator()(const InstructionOperand& a,
+ const InstructionOperand& b) const {
+ return a.CompareCanonicalized(b);
+ }
+};
+
+
+class MoveOperands final : public ZoneObject {
+ public:
+ MoveOperands(const InstructionOperand& source,
+ const InstructionOperand& destination)
+ : source_(source), destination_(destination) {
+ DCHECK(!source.IsInvalid() && !destination.IsInvalid());
+ }
+
+ const InstructionOperand& source() const { return source_; }
+ InstructionOperand& source() { return source_; }
+ void set_source(const InstructionOperand& operand) { source_ = operand; }
+
+ const InstructionOperand& destination() const { return destination_; }
+ InstructionOperand& destination() { return destination_; }
+ void set_destination(const InstructionOperand& operand) {
+ destination_ = operand;
+ }
// The gap resolver marks moves as "in-progress" by clearing the
// destination (but not the source).
- bool IsPending() const { return destination_ == NULL && source_ != NULL; }
+ bool IsPending() const {
+ return destination_.IsInvalid() && !source_.IsInvalid();
+ }
+ void SetPending() { destination_ = InstructionOperand(); }
// True if this move a move into the given destination operand.
- bool Blocks(InstructionOperand* operand) const {
- return !IsEliminated() && source()->Equals(operand);
+ bool Blocks(const InstructionOperand& operand) const {
+ return !IsEliminated() && source().EqualsCanonicalized(operand);
}
- // A move is redundant if it's been eliminated, if its source and
- // destination are the same, or if its destination is constant.
+ // A move is redundant if it's been eliminated or if its source and
+ // destination are the same.
bool IsRedundant() const {
- return IsEliminated() || source_->Equals(destination_) ||
- (destination_ != NULL && destination_->IsConstant());
+ DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
+ return IsEliminated() || source_.EqualsCanonicalized(destination_);
}
// We clear both operands to indicate move that's been eliminated.
- void Eliminate() { source_ = destination_ = NULL; }
+ void Eliminate() { source_ = destination_ = InstructionOperand(); }
bool IsEliminated() const {
- DCHECK(source_ != NULL || destination_ == NULL);
- return source_ == NULL;
+ DCHECK_IMPLIES(source_.IsInvalid(), destination_.IsInvalid());
+ return source_.IsInvalid();
}
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
+
private:
- InstructionOperand* source_;
- InstructionOperand* destination_;
+ InstructionOperand source_;
+ InstructionOperand destination_;
+
+ DISALLOW_COPY_AND_ASSIGN(MoveOperands);
};
@@ -317,60 +625,29 @@
std::ostream& operator<<(std::ostream& os, const PrintableMoveOperands& mo);
-template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
-class SubKindOperand FINAL : public InstructionOperand {
+class ParallelMove final : public ZoneVector<MoveOperands*>, public ZoneObject {
public:
- static SubKindOperand* Create(int index, Zone* zone) {
- DCHECK(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new (zone) SubKindOperand(index);
+ explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {
+ reserve(4);
}
- static SubKindOperand* cast(InstructionOperand* op) {
- DCHECK(op->kind() == kOperandKind);
- return reinterpret_cast<SubKindOperand*>(op);
- }
-
- static const SubKindOperand* cast(const InstructionOperand* op) {
- DCHECK(op->kind() == kOperandKind);
- return reinterpret_cast<const SubKindOperand*>(op);
- }
-
- static void SetUpCache();
- static void TearDownCache();
-
- private:
- static SubKindOperand* cache;
-
- SubKindOperand() : InstructionOperand(kOperandKind, 0) {} // For the caches.
- explicit SubKindOperand(int index)
- : InstructionOperand(kOperandKind, index) {}
-};
-
-
-#define INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
- typedef SubKindOperand<InstructionOperand::type, number> name##Operand;
-INSTRUCTION_OPERAND_LIST(INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS)
-#undef INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS
-
-
-class ParallelMove FINAL : public ZoneObject {
- public:
- explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {}
-
- void AddMove(InstructionOperand* from, InstructionOperand* to, Zone* zone) {
- move_operands_.Add(MoveOperands(from, to), zone);
+ MoveOperands* AddMove(const InstructionOperand& from,
+ const InstructionOperand& to) {
+ auto zone = get_allocator().zone();
+ auto move = new (zone) MoveOperands(from, to);
+ push_back(move);
+ return move;
}
bool IsRedundant() const;
- ZoneList<MoveOperands>* move_operands() { return &move_operands_; }
- const ZoneList<MoveOperands>* move_operands() const {
- return &move_operands_;
- }
+ // Prepare this ParallelMove to insert move as if it happened in a subsequent
+ // ParallelMove. move->source() may be changed. The MoveOperand returned
+ // must be Eliminated.
+ MoveOperands* PrepareInsertAfter(MoveOperands* move) const;
private:
- ZoneList<MoveOperands> move_operands_;
+ DISALLOW_COPY_AND_ASSIGN(ParallelMove);
};
@@ -383,19 +660,13 @@
std::ostream& operator<<(std::ostream& os, const PrintableParallelMove& pm);
-class PointerMap FINAL : public ZoneObject {
+class ReferenceMap final : public ZoneObject {
public:
- explicit PointerMap(Zone* zone)
- : pointer_operands_(8, zone),
- untagged_operands_(0, zone),
- instruction_position_(-1) {}
+ explicit ReferenceMap(Zone* zone)
+ : reference_operands_(8, zone), instruction_position_(-1) {}
- const ZoneList<InstructionOperand*>* GetNormalizedOperands() {
- for (int i = 0; i < untagged_operands_.length(); ++i) {
- RemovePointer(untagged_operands_[i]);
- }
- untagged_operands_.Clear();
- return &pointer_operands_;
+ const ZoneVector<InstructionOperand>& reference_operands() const {
+ return reference_operands_;
}
int instruction_position() const { return instruction_position_; }
@@ -404,46 +675,51 @@
instruction_position_ = pos;
}
- void RecordPointer(InstructionOperand* op, Zone* zone);
- void RemovePointer(InstructionOperand* op);
- void RecordUntagged(InstructionOperand* op, Zone* zone);
+ void RecordReference(const AllocatedOperand& op);
private:
- friend std::ostream& operator<<(std::ostream& os, const PointerMap& pm);
+ friend std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
- ZoneList<InstructionOperand*> pointer_operands_;
- ZoneList<InstructionOperand*> untagged_operands_;
+ ZoneVector<InstructionOperand> reference_operands_;
int instruction_position_;
};
-std::ostream& operator<<(std::ostream& os, const PointerMap& pm);
+std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
-// TODO(titzer): s/PointerMap/ReferenceMap/
-class Instruction : public ZoneObject {
+class Instruction final {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
- InstructionOperand* OutputAt(size_t i) const {
+ const InstructionOperand* OutputAt(size_t i) const {
DCHECK(i < OutputCount());
- return operands_[i];
+ return &operands_[i];
+ }
+ InstructionOperand* OutputAt(size_t i) {
+ DCHECK(i < OutputCount());
+ return &operands_[i];
}
bool HasOutput() const { return OutputCount() == 1; }
- InstructionOperand* Output() const { return OutputAt(0); }
+ const InstructionOperand* Output() const { return OutputAt(0); }
+ InstructionOperand* Output() { return OutputAt(0); }
size_t InputCount() const { return InputCountField::decode(bit_field_); }
- InstructionOperand* InputAt(size_t i) const {
+ const InstructionOperand* InputAt(size_t i) const {
DCHECK(i < InputCount());
- return operands_[OutputCount() + i];
+ return &operands_[OutputCount() + i];
}
- void SetInputAt(size_t i, InstructionOperand* operand) {
+ InstructionOperand* InputAt(size_t i) {
DCHECK(i < InputCount());
- operands_[OutputCount() + i] = operand;
+ return &operands_[OutputCount() + i];
}
size_t TempCount() const { return TempCountField::decode(bit_field_); }
- InstructionOperand* TempAt(size_t i) const {
+ const InstructionOperand* TempAt(size_t i) const {
DCHECK(i < TempCount());
- return operands_[OutputCount() + InputCount() + i];
+ return &operands_[OutputCount() + InputCount() + i];
+ }
+ InstructionOperand* TempAt(size_t i) {
+ DCHECK(i < TempCount());
+ return &operands_[OutputCount() + InputCount() + i];
}
InstructionCode opcode() const { return opcode_; }
@@ -456,72 +732,51 @@
return FlagsConditionField::decode(opcode());
}
- // TODO(titzer): make control and call into flags.
+ // TODO(titzer): make call into a flags.
static Instruction* New(Zone* zone, InstructionCode opcode) {
- return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
+ return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
}
static Instruction* New(Zone* zone, InstructionCode opcode,
- size_t output_count, InstructionOperand** outputs,
- size_t input_count, InstructionOperand** inputs,
- size_t temp_count, InstructionOperand** temps) {
+ size_t output_count, InstructionOperand* outputs,
+ size_t input_count, InstructionOperand* inputs,
+ size_t temp_count, InstructionOperand* temps) {
DCHECK(opcode >= 0);
- DCHECK(output_count == 0 || outputs != NULL);
- DCHECK(input_count == 0 || inputs != NULL);
- DCHECK(temp_count == 0 || temps != NULL);
- InstructionOperand* none = NULL;
- USE(none);
- int size = static_cast<int>(RoundUp(sizeof(Instruction), kPointerSize) +
- (output_count + input_count + temp_count - 1) *
- sizeof(none));
+ DCHECK(output_count == 0 || outputs != nullptr);
+ DCHECK(input_count == 0 || inputs != nullptr);
+ DCHECK(temp_count == 0 || temps != nullptr);
+ size_t total_extra_ops = output_count + input_count + temp_count;
+ if (total_extra_ops != 0) total_extra_ops--;
+ int size = static_cast<int>(
+ RoundUp(sizeof(Instruction), sizeof(InstructionOperand)) +
+ total_extra_ops * sizeof(InstructionOperand));
return new (zone->New(size)) Instruction(
opcode, output_count, outputs, input_count, inputs, temp_count, temps);
}
- // TODO(titzer): another holdover from lithium days; register allocator
- // should not need to know about control instructions.
- Instruction* MarkAsControl() {
- bit_field_ = IsControlField::update(bit_field_, true);
- return this;
- }
Instruction* MarkAsCall() {
bit_field_ = IsCallField::update(bit_field_, true);
return this;
}
- bool IsControl() const { return IsControlField::decode(bit_field_); }
bool IsCall() const { return IsCallField::decode(bit_field_); }
- bool NeedsPointerMap() const { return IsCall(); }
- bool HasPointerMap() const { return pointer_map_ != NULL; }
-
- bool IsGapMoves() const {
- return opcode() == kGapInstruction || opcode() == kBlockStartInstruction;
- }
- bool IsBlockStart() const { return opcode() == kBlockStartInstruction; }
- bool IsSourcePosition() const {
- return opcode() == kSourcePositionInstruction;
- }
+ bool NeedsReferenceMap() const { return IsCall(); }
+ bool HasReferenceMap() const { return reference_map_ != nullptr; }
bool ClobbersRegisters() const { return IsCall(); }
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersDoubleRegisters() const { return IsCall(); }
- PointerMap* pointer_map() const { return pointer_map_; }
+ ReferenceMap* reference_map() const { return reference_map_; }
- void set_pointer_map(PointerMap* map) {
- DCHECK(NeedsPointerMap());
- DCHECK_EQ(NULL, pointer_map_);
- pointer_map_ = map;
+ void set_reference_map(ReferenceMap* map) {
+ DCHECK(NeedsReferenceMap());
+ DCHECK(!reference_map_);
+ reference_map_ = map;
}
- // Placement new operator so that we can smash instructions into
- // zone-allocated memory.
- void* operator new(size_t, void* location) { return location; }
-
- void operator delete(void* pointer, void* location) { UNREACHABLE(); }
-
void OverwriteWithNop() {
opcode_ = ArchOpcodeField::encode(kArchNop);
bit_field_ = 0;
- pointer_map_ = NULL;
+ reference_map_ = nullptr;
}
bool IsNop() const {
@@ -529,46 +784,56 @@
OutputCount() == 0 && TempCount() == 0;
}
- protected:
- explicit Instruction(InstructionCode opcode)
- : opcode_(opcode),
- bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
- TempCountField::encode(0) | IsCallField::encode(false) |
- IsControlField::encode(false)),
- pointer_map_(NULL) {}
+ enum GapPosition {
+ START,
+ END,
+ FIRST_GAP_POSITION = START,
+ LAST_GAP_POSITION = END
+ };
- Instruction(InstructionCode opcode, size_t output_count,
- InstructionOperand** outputs, size_t input_count,
- InstructionOperand** inputs, size_t temp_count,
- InstructionOperand** temps)
- : opcode_(opcode),
- bit_field_(OutputCountField::encode(output_count) |
- InputCountField::encode(input_count) |
- TempCountField::encode(temp_count) |
- IsCallField::encode(false) | IsControlField::encode(false)),
- pointer_map_(NULL) {
- for (size_t i = 0; i < output_count; ++i) {
- operands_[i] = outputs[i];
+ ParallelMove* GetOrCreateParallelMove(GapPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == nullptr) {
+ parallel_moves_[pos] = new (zone) ParallelMove(zone);
}
- for (size_t i = 0; i < input_count; ++i) {
- operands_[output_count + i] = inputs[i];
- }
- for (size_t i = 0; i < temp_count; ++i) {
- operands_[output_count + input_count + i] = temps[i];
- }
+ return parallel_moves_[pos];
}
- protected:
+ ParallelMove* GetParallelMove(GapPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ const ParallelMove* GetParallelMove(GapPosition pos) const {
+ return parallel_moves_[pos];
+ }
+
+ bool AreMovesRedundant() const;
+
+ ParallelMove* const* parallel_moves() const { return ¶llel_moves_[0]; }
+ ParallelMove** parallel_moves() { return ¶llel_moves_[0]; }
+
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
+
+ private:
+ explicit Instruction(InstructionCode opcode);
+
+ Instruction(InstructionCode opcode, size_t output_count,
+ InstructionOperand* outputs, size_t input_count,
+ InstructionOperand* inputs, size_t temp_count,
+ InstructionOperand* temps);
+
typedef BitField<size_t, 0, 8> OutputCountField;
typedef BitField<size_t, 8, 16> InputCountField;
typedef BitField<size_t, 24, 6> TempCountField;
typedef BitField<bool, 30, 1> IsCallField;
- typedef BitField<bool, 31, 1> IsControlField;
InstructionCode opcode_;
uint32_t bit_field_;
- PointerMap* pointer_map_;
- InstructionOperand* operands_[1];
+ ParallelMove* parallel_moves_[2];
+ ReferenceMap* reference_map_;
+ InstructionOperand operands_[1];
+
+ DISALLOW_COPY_AND_ASSIGN(Instruction);
};
@@ -579,125 +844,44 @@
std::ostream& operator<<(std::ostream& os, const PrintableInstruction& instr);
-// Represents moves inserted before an instruction due to register allocation.
-// TODO(titzer): squash GapInstruction back into Instruction, since essentially
-// every instruction can possibly have moves inserted before it.
-class GapInstruction : public Instruction {
+class RpoNumber final {
public:
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
+ static const int kInvalidRpoNumber = -1;
+ int ToInt() const {
+ DCHECK(IsValid());
+ return index_;
+ }
+ size_t ToSize() const {
+ DCHECK(IsValid());
+ return static_cast<size_t>(index_);
+ }
+ bool IsValid() const { return index_ >= 0; }
+ static RpoNumber FromInt(int index) { return RpoNumber(index); }
+ static RpoNumber Invalid() { return RpoNumber(kInvalidRpoNumber); }
- ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new (zone) ParallelMove(zone);
- }
- return parallel_moves_[pos];
+ bool IsNext(const RpoNumber other) const {
+ DCHECK(IsValid());
+ return other.index_ == this->index_ + 1;
}
- ParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- const ParallelMove* GetParallelMove(InnerPosition pos) const {
- return parallel_moves_[pos];
- }
-
- bool IsRedundant() const;
-
- ParallelMove** parallel_moves() { return parallel_moves_; }
-
- static GapInstruction* New(Zone* zone) {
- void* buffer = zone->New(sizeof(GapInstruction));
- return new (buffer) GapInstruction(kGapInstruction);
- }
-
- static GapInstruction* cast(Instruction* instr) {
- DCHECK(instr->IsGapMoves());
- return static_cast<GapInstruction*>(instr);
- }
-
- static const GapInstruction* cast(const Instruction* instr) {
- DCHECK(instr->IsGapMoves());
- return static_cast<const GapInstruction*>(instr);
- }
-
- protected:
- explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
+ // Comparison operators.
+ bool operator==(RpoNumber other) const { return index_ == other.index_; }
+ bool operator!=(RpoNumber other) const { return index_ != other.index_; }
+ bool operator>(RpoNumber other) const { return index_ > other.index_; }
+ bool operator<(RpoNumber other) const { return index_ < other.index_; }
+ bool operator<=(RpoNumber other) const { return index_ <= other.index_; }
+ bool operator>=(RpoNumber other) const { return index_ >= other.index_; }
private:
- friend std::ostream& operator<<(std::ostream& os,
- const PrintableInstruction& instr);
- ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+ explicit RpoNumber(int32_t index) : index_(index) {}
+ int32_t index_;
};
-// This special kind of gap move instruction represents the beginning of a
-// block of code.
-class BlockStartInstruction FINAL : public GapInstruction {
- public:
- static BlockStartInstruction* New(Zone* zone) {
- void* buffer = zone->New(sizeof(BlockStartInstruction));
- return new (buffer) BlockStartInstruction();
- }
-
- static BlockStartInstruction* cast(Instruction* instr) {
- DCHECK(instr->IsBlockStart());
- return static_cast<BlockStartInstruction*>(instr);
- }
-
- static const BlockStartInstruction* cast(const Instruction* instr) {
- DCHECK(instr->IsBlockStart());
- return static_cast<const BlockStartInstruction*>(instr);
- }
-
- private:
- BlockStartInstruction() : GapInstruction(kBlockStartInstruction) {}
-};
+std::ostream& operator<<(std::ostream&, const RpoNumber&);
-class SourcePositionInstruction FINAL : public Instruction {
- public:
- static SourcePositionInstruction* New(Zone* zone, SourcePosition position) {
- void* buffer = zone->New(sizeof(SourcePositionInstruction));
- return new (buffer) SourcePositionInstruction(position);
- }
-
- SourcePosition source_position() const { return source_position_; }
-
- static SourcePositionInstruction* cast(Instruction* instr) {
- DCHECK(instr->IsSourcePosition());
- return static_cast<SourcePositionInstruction*>(instr);
- }
-
- static const SourcePositionInstruction* cast(const Instruction* instr) {
- DCHECK(instr->IsSourcePosition());
- return static_cast<const SourcePositionInstruction*>(instr);
- }
-
- private:
- explicit SourcePositionInstruction(SourcePosition source_position)
- : Instruction(kSourcePositionInstruction),
- source_position_(source_position) {
- DCHECK(!source_position_.IsInvalid());
- DCHECK(!source_position_.IsUnknown());
- }
-
- SourcePosition source_position_;
-};
-
-
-class Constant FINAL {
+class Constant final {
public:
enum Type {
kInt32,
@@ -709,7 +893,7 @@
kRpoNumber
};
- explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
+ explicit Constant(int32_t v);
explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
explicit Constant(float v) : type_(kFloat32), value_(bit_cast<int32_t>(v)) {}
explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
@@ -717,8 +901,7 @@
: type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
explicit Constant(Handle<HeapObject> obj)
: type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
- explicit Constant(BasicBlock::RpoNumber rpo)
- : type_(kRpoNumber), value_(rpo.ToInt()) {}
+ explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
Type type() const { return type_; }
@@ -751,9 +934,9 @@
return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
}
- BasicBlock::RpoNumber ToRpoNumber() const {
+ RpoNumber ToRpoNumber() const {
DCHECK_EQ(kRpoNumber, type());
- return BasicBlock::RpoNumber::FromInt(static_cast<int>(value_));
+ return RpoNumber::FromInt(static_cast<int>(value_));
}
Handle<HeapObject> ToHeapObject() const {
@@ -767,12 +950,67 @@
};
+std::ostream& operator<<(std::ostream& os, const Constant& constant);
+
+
+// Forward declarations.
+class FrameStateDescriptor;
+
+
+enum class StateValueKind { kPlain, kNested, kDuplicate };
+
+
+class StateValueDescriptor {
+ public:
+ explicit StateValueDescriptor(Zone* zone)
+ : kind_(StateValueKind::kPlain),
+ type_(MachineType::AnyTagged()),
+ id_(0),
+ fields_(zone) {}
+
+ static StateValueDescriptor Plain(Zone* zone, MachineType type) {
+ return StateValueDescriptor(StateValueKind::kPlain, zone, type, 0);
+ }
+ static StateValueDescriptor Recursive(Zone* zone, size_t id) {
+ return StateValueDescriptor(StateValueKind::kNested, zone,
+ MachineType::AnyTagged(), id);
+ }
+ static StateValueDescriptor Duplicate(Zone* zone, size_t id) {
+ return StateValueDescriptor(StateValueKind::kDuplicate, zone,
+ MachineType::AnyTagged(), id);
+ }
+
+ size_t size() { return fields_.size(); }
+ ZoneVector<StateValueDescriptor>& fields() { return fields_; }
+ int IsPlain() { return kind_ == StateValueKind::kPlain; }
+ int IsNested() { return kind_ == StateValueKind::kNested; }
+ int IsDuplicate() { return kind_ == StateValueKind::kDuplicate; }
+ MachineType type() const { return type_; }
+ MachineType GetOperandType(size_t index) const {
+ return fields_[index].type_;
+ }
+ size_t id() const { return id_; }
+
+ private:
+ StateValueDescriptor(StateValueKind kind, Zone* zone, MachineType type,
+ size_t id)
+ : kind_(kind), type_(type), id_(id), fields_(zone) {}
+
+ StateValueKind kind_;
+ MachineType type_;
+ size_t id_;
+ ZoneVector<StateValueDescriptor> fields_;
+};
+
+
class FrameStateDescriptor : public ZoneObject {
public:
- FrameStateDescriptor(Zone* zone, const FrameStateCallInfo& state_info,
+ FrameStateDescriptor(Zone* zone, FrameStateType type, BailoutId bailout_id,
+ OutputFrameStateCombine state_combine,
size_t parameters_count, size_t locals_count,
size_t stack_count,
- FrameStateDescriptor* outer_state = NULL);
+ MaybeHandle<SharedFunctionInfo> shared_info,
+ FrameStateDescriptor* outer_state = nullptr);
FrameStateType type() const { return type_; }
BailoutId bailout_id() const { return bailout_id_; }
@@ -780,9 +1018,11 @@
size_t parameters_count() const { return parameters_count_; }
size_t locals_count() const { return locals_count_; }
size_t stack_count() const { return stack_count_; }
+ MaybeHandle<SharedFunctionInfo> shared_info() const { return shared_info_; }
FrameStateDescriptor* outer_state() const { return outer_state_; }
- MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
- bool HasContext() const { return type_ == JS_FRAME; }
+ bool HasContext() const {
+ return FrameStateFunctionInfo::IsJSFunctionType(type_);
+ }
size_t GetSize(OutputFrameStateCombine combine =
OutputFrameStateCombine::Ignore()) const;
@@ -790,8 +1030,10 @@
size_t GetFrameCount() const;
size_t GetJSFrameCount() const;
- MachineType GetType(size_t index) const;
- void SetType(size_t index, MachineType type);
+ MachineType GetType(size_t index) const {
+ return values_.GetOperandType(index);
+ }
+ StateValueDescriptor* GetStateValueDescriptor() { return &values_; }
private:
FrameStateType type_;
@@ -800,63 +1042,43 @@
size_t parameters_count_;
size_t locals_count_;
size_t stack_count_;
- ZoneVector<MachineType> types_;
+ StateValueDescriptor values_;
+ MaybeHandle<SharedFunctionInfo> const shared_info_;
FrameStateDescriptor* outer_state_;
- MaybeHandle<JSFunction> jsfunction_;
};
-std::ostream& operator<<(std::ostream& os, const Constant& constant);
+
+typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
-class PhiInstruction FINAL : public ZoneObject {
+class PhiInstruction final : public ZoneObject {
public:
- typedef ZoneVector<InstructionOperand*> Inputs;
+ typedef ZoneVector<InstructionOperand> Inputs;
- PhiInstruction(Zone* zone, int virtual_register, size_t reserved_input_count)
- : virtual_register_(virtual_register),
- operands_(zone),
- output_(nullptr),
- inputs_(zone) {
- UnallocatedOperand* output =
- new (zone) UnallocatedOperand(UnallocatedOperand::NONE);
- output->set_virtual_register(virtual_register);
- output_ = output;
- inputs_.reserve(reserved_input_count);
- operands_.reserve(reserved_input_count);
- }
+ PhiInstruction(Zone* zone, int virtual_register, size_t input_count);
+
+ void SetInput(size_t offset, int virtual_register);
int virtual_register() const { return virtual_register_; }
const IntVector& operands() const { return operands_; }
- void Extend(Zone* zone, int virtual_register) {
- UnallocatedOperand* input =
- new (zone) UnallocatedOperand(UnallocatedOperand::ANY);
- input->set_virtual_register(virtual_register);
- operands_.push_back(virtual_register);
- inputs_.push_back(input);
- }
-
- InstructionOperand* output() const { return output_; }
- const Inputs& inputs() const { return inputs_; }
- Inputs& inputs() { return inputs_; }
+ // TODO(dcarney): this has no real business being here, since it's internal to
+ // the register allocator, but putting it here was convenient.
+ const InstructionOperand& output() const { return output_; }
+ InstructionOperand& output() { return output_; }
private:
- // TODO(dcarney): some of these fields are only for verification, move them to
- // verifier.
const int virtual_register_;
+ InstructionOperand output_;
IntVector operands_;
- InstructionOperand* output_;
- Inputs inputs_;
};
// Analogue of BasicBlock for Instructions instead of Nodes.
-class InstructionBlock FINAL : public ZoneObject {
+class InstructionBlock final : public ZoneObject {
public:
- InstructionBlock(Zone* zone, BasicBlock::Id id,
- BasicBlock::RpoNumber rpo_number,
- BasicBlock::RpoNumber loop_header,
- BasicBlock::RpoNumber loop_end, bool deferred);
+ InstructionBlock(Zone* zone, RpoNumber rpo_number, RpoNumber loop_header,
+ RpoNumber loop_end, bool deferred, bool handler);
// Instruction indexes (used by the register allocator).
int first_instruction_index() const {
@@ -879,24 +1101,24 @@
void set_code_end(int32_t end) { code_end_ = end; }
bool IsDeferred() const { return deferred_; }
+ bool IsHandler() const { return handler_; }
- BasicBlock::Id id() const { return id_; }
- BasicBlock::RpoNumber ao_number() const { return ao_number_; }
- BasicBlock::RpoNumber rpo_number() const { return rpo_number_; }
- BasicBlock::RpoNumber loop_header() const { return loop_header_; }
- BasicBlock::RpoNumber loop_end() const {
+ RpoNumber ao_number() const { return ao_number_; }
+ RpoNumber rpo_number() const { return rpo_number_; }
+ RpoNumber loop_header() const { return loop_header_; }
+ RpoNumber loop_end() const {
DCHECK(IsLoopHeader());
return loop_end_;
}
inline bool IsLoopHeader() const { return loop_end_.IsValid(); }
- typedef ZoneVector<BasicBlock::RpoNumber> Predecessors;
+ typedef ZoneVector<RpoNumber> Predecessors;
Predecessors& predecessors() { return predecessors_; }
const Predecessors& predecessors() const { return predecessors_; }
size_t PredecessorCount() const { return predecessors_.size(); }
- size_t PredecessorIndexOf(BasicBlock::RpoNumber rpo_number) const;
+ size_t PredecessorIndexOf(RpoNumber rpo_number) const;
- typedef ZoneVector<BasicBlock::RpoNumber> Successors;
+ typedef ZoneVector<RpoNumber> Successors;
Successors& successors() { return successors_; }
const Successors& successors() const { return successors_; }
size_t SuccessorCount() const { return successors_.size(); }
@@ -905,49 +1127,64 @@
const PhiInstructions& phis() const { return phis_; }
void AddPhi(PhiInstruction* phi) { phis_.push_back(phi); }
- void set_ao_number(BasicBlock::RpoNumber ao_number) {
- ao_number_ = ao_number;
- }
+ void set_ao_number(RpoNumber ao_number) { ao_number_ = ao_number; }
+
+ bool needs_frame() const { return needs_frame_; }
+ void mark_needs_frame() { needs_frame_ = true; }
+
+ bool must_construct_frame() const { return must_construct_frame_; }
+ void mark_must_construct_frame() { must_construct_frame_ = true; }
+
+ bool must_deconstruct_frame() const { return must_deconstruct_frame_; }
+ void mark_must_deconstruct_frame() { must_deconstruct_frame_ = true; }
+
+ void set_last_deferred(RpoNumber last) { last_deferred_ = last; }
+ RpoNumber last_deferred() const { return last_deferred_; }
private:
Successors successors_;
Predecessors predecessors_;
PhiInstructions phis_;
- const BasicBlock::Id id_;
- BasicBlock::RpoNumber ao_number_; // Assembly order number.
- const BasicBlock::RpoNumber rpo_number_;
- const BasicBlock::RpoNumber loop_header_;
- const BasicBlock::RpoNumber loop_end_;
+ RpoNumber ao_number_; // Assembly order number.
+ const RpoNumber rpo_number_;
+ const RpoNumber loop_header_;
+ const RpoNumber loop_end_;
int32_t code_start_; // start index of arch-specific code.
int32_t code_end_; // end index of arch-specific code.
const bool deferred_; // Block contains deferred code.
+ const bool handler_; // Block is a handler entry point.
+ bool needs_frame_;
+ bool must_construct_frame_;
+ bool must_deconstruct_frame_;
+ RpoNumber last_deferred_;
};
typedef ZoneDeque<Constant> ConstantDeque;
typedef std::map<int, Constant, std::less<int>,
- zone_allocator<std::pair<int, Constant> > > ConstantMap;
+ zone_allocator<std::pair<const int, Constant> > > ConstantMap;
typedef ZoneDeque<Instruction*> InstructionDeque;
-typedef ZoneDeque<PointerMap*> PointerMapDeque;
-typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
+typedef ZoneDeque<ReferenceMap*> ReferenceMapDeque;
typedef ZoneVector<InstructionBlock*> InstructionBlocks;
+
+// Forward declarations.
struct PrintableInstructionSequence;
// Represents architecture-specific generated code before, during, and after
// register allocation.
-// TODO(titzer): s/IsDouble/IsFloat64/
-class InstructionSequence FINAL : public ZoneObject {
+class InstructionSequence final : public ZoneObject {
public:
static InstructionBlocks* InstructionBlocksFor(Zone* zone,
const Schedule* schedule);
// Puts the deferred blocks last.
static void ComputeAssemblyOrder(InstructionBlocks* blocks);
- InstructionSequence(Zone* zone, InstructionBlocks* instruction_blocks);
+ InstructionSequence(Isolate* isolate, Zone* zone,
+ InstructionBlocks* instruction_blocks);
- int NextVirtualRegister() { return next_virtual_register_++; }
+ int NextVirtualRegister();
int VirtualRegisterCount() const { return next_virtual_register_; }
const InstructionBlocks& instruction_blocks() const {
@@ -958,7 +1195,7 @@
return static_cast<int>(instruction_blocks_->size());
}
- InstructionBlock* InstructionBlockAt(BasicBlock::RpoNumber rpo_number) {
+ InstructionBlock* InstructionBlockAt(RpoNumber rpo_number) {
return instruction_blocks_->at(rpo_number.ToSize());
}
@@ -967,46 +1204,50 @@
->last_instruction_index();
}
- const InstructionBlock* InstructionBlockAt(
- BasicBlock::RpoNumber rpo_number) const {
+ const InstructionBlock* InstructionBlockAt(RpoNumber rpo_number) const {
return instruction_blocks_->at(rpo_number.ToSize());
}
- const InstructionBlock* GetInstructionBlock(int instruction_index) const;
+ InstructionBlock* GetInstructionBlock(int instruction_index) const;
- bool IsReference(int virtual_register) const;
- bool IsDouble(int virtual_register) const;
+ static MachineRepresentation DefaultRepresentation() {
+ return MachineType::PointerRepresentation();
+ }
+ MachineRepresentation GetRepresentation(int virtual_register) const;
+ void MarkAsRepresentation(MachineRepresentation rep, int virtual_register);
- void MarkAsReference(int virtual_register);
- void MarkAsDouble(int virtual_register);
+ bool IsReference(int virtual_register) const {
+ return GetRepresentation(virtual_register) ==
+ MachineRepresentation::kTagged;
+ }
+ bool IsFloat(int virtual_register) const {
+ return IsFloatingPoint(GetRepresentation(virtual_register));
+ }
- void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
-
- BlockStartInstruction* GetBlockStart(BasicBlock::RpoNumber rpo) const;
+ Instruction* GetBlockStart(RpoNumber rpo) const;
typedef InstructionDeque::const_iterator const_iterator;
const_iterator begin() const { return instructions_.begin(); }
const_iterator end() const { return instructions_.end(); }
const InstructionDeque& instructions() const { return instructions_; }
-
- GapInstruction* GapAt(int index) const {
- return GapInstruction::cast(InstructionAt(index));
+ int LastInstructionIndex() const {
+ return static_cast<int>(instructions().size()) - 1;
}
- bool IsGapAt(int index) const { return InstructionAt(index)->IsGapMoves(); }
+
Instruction* InstructionAt(int index) const {
DCHECK(index >= 0);
DCHECK(index < static_cast<int>(instructions_.size()));
return instructions_[index];
}
- Isolate* isolate() const { return zone()->isolate(); }
- const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
+ Isolate* isolate() const { return isolate_; }
+ const ReferenceMapDeque* reference_maps() const { return &reference_maps_; }
Zone* zone() const { return zone_; }
// Used by the instruction selector while adding instructions.
int AddInstruction(Instruction* instr);
- void StartBlock(BasicBlock::RpoNumber rpo);
- void EndBlock(BasicBlock::RpoNumber rpo);
+ void StartBlock(RpoNumber rpo);
+ void EndBlock(RpoNumber rpo);
int AddConstant(int virtual_register, Constant constant) {
// TODO(titzer): allow RPO numbers as constants?
@@ -1026,15 +1267,28 @@
typedef ZoneVector<Constant> Immediates;
Immediates& immediates() { return immediates_; }
- int AddImmediate(Constant constant) {
+ ImmediateOperand AddImmediate(const Constant& constant) {
+ if (constant.type() == Constant::kInt32) {
+ return ImmediateOperand(ImmediateOperand::INLINE, constant.ToInt32());
+ }
int index = static_cast<int>(immediates_.size());
immediates_.push_back(constant);
- return index;
+ return ImmediateOperand(ImmediateOperand::INDEXED, index);
}
- Constant GetImmediate(int index) const {
- DCHECK(index >= 0);
- DCHECK(index < static_cast<int>(immediates_.size()));
- return immediates_[index];
+
+ Constant GetImmediate(const ImmediateOperand* op) const {
+ switch (op->type()) {
+ case ImmediateOperand::INLINE:
+ return Constant(op->inline_value());
+ case ImmediateOperand::INDEXED: {
+ int index = op->indexed_value();
+ DCHECK(index >= 0);
+ DCHECK(index < static_cast<int>(immediates_.size()));
+ return immediates_[index];
+ }
+ }
+ UNREACHABLE();
+ return Constant(static_cast<int32_t>(0));
}
class StateId {
@@ -1050,30 +1304,42 @@
StateId AddFrameStateDescriptor(FrameStateDescriptor* descriptor);
FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
int GetFrameStateDescriptorCount();
-
- BasicBlock::RpoNumber InputRpo(Instruction* instr, size_t index) {
- InstructionOperand* operand = instr->InputAt(index);
- Constant constant = operand->IsImmediate() ? GetImmediate(operand->index())
- : GetConstant(operand->index());
- return constant.ToRpoNumber();
+ DeoptimizationVector const& frame_state_descriptors() const {
+ return deoptimization_entries_;
}
+ RpoNumber InputRpo(Instruction* instr, size_t index);
+
+ bool GetSourcePosition(const Instruction* instr,
+ SourcePosition* result) const;
+ void SetSourcePosition(const Instruction* instr, SourcePosition value);
+
+ bool ContainsCall() const {
+ for (Instruction* instr : instructions_) {
+ if (instr->IsCall()) return true;
+ }
+ return false;
+ }
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
+
private:
friend std::ostream& operator<<(std::ostream& os,
const PrintableInstructionSequence& code);
- typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
+ typedef ZoneMap<const Instruction*, SourcePosition> SourcePositionMap;
+ Isolate* isolate_;
Zone* const zone_;
InstructionBlocks* const instruction_blocks_;
+ SourcePositionMap source_positions_;
IntVector block_starts_;
ConstantMap constants_;
Immediates immediates_;
InstructionDeque instructions_;
int next_virtual_register_;
- PointerMapDeque pointer_maps_;
- VirtualRegisterSet doubles_;
- VirtualRegisterSet references_;
+ ReferenceMapDeque reference_maps_;
+ ZoneVector<MachineRepresentation> representations_;
DeoptimizationVector deoptimization_entries_;
DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
diff --git a/src/compiler/interpreter-assembler.cc b/src/compiler/interpreter-assembler.cc
new file mode 100644
index 0000000..7080d02
--- /dev/null
+++ b/src/compiler/interpreter-assembler.cc
@@ -0,0 +1,751 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/interpreter-assembler.h"
+
+#include <ostream>
+
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/frames.h"
+#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/machine-type.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
+ interpreter::Bytecode bytecode)
+ : bytecode_(bytecode),
+ raw_assembler_(new RawMachineAssembler(
+ isolate, new (zone) Graph(zone),
+ Linkage::GetInterpreterDispatchDescriptor(zone),
+ MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags())),
+ accumulator_(
+ raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
+ bytecode_offset_(raw_assembler_->Parameter(
+ Linkage::kInterpreterBytecodeOffsetParameter)),
+ context_(
+ raw_assembler_->Parameter(Linkage::kInterpreterContextParameter)),
+ code_generated_(false) {}
+
+
+InterpreterAssembler::~InterpreterAssembler() {}
+
+
+Handle<Code> InterpreterAssembler::GenerateCode() {
+ DCHECK(!code_generated_);
+
+ // Disallow empty handlers that never return.
+ DCHECK_NE(0, graph()->end()->InputCount());
+
+ const char* bytecode_name = interpreter::Bytecodes::ToString(bytecode_);
+ Schedule* schedule = raw_assembler_->Export();
+ Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
+ isolate(), raw_assembler_->call_descriptor(), graph(), schedule,
+ Code::STUB, bytecode_name);
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_trace_ignition_codegen) {
+ OFStream os(stdout);
+ code->Disassemble(bytecode_name, os);
+ os << std::flush;
+ }
+#endif
+
+ code_generated_ = true;
+ return code;
+}
+
+
+Node* InterpreterAssembler::GetAccumulator() { return accumulator_; }
+
+
+void InterpreterAssembler::SetAccumulator(Node* value) { accumulator_ = value; }
+
+
+Node* InterpreterAssembler::GetContext() { return context_; }
+
+
+void InterpreterAssembler::SetContext(Node* value) { context_ = value; }
+
+
+Node* InterpreterAssembler::BytecodeOffset() { return bytecode_offset_; }
+
+
+Node* InterpreterAssembler::RegisterFileRawPointer() {
+ return raw_assembler_->Parameter(Linkage::kInterpreterRegisterFileParameter);
+}
+
+
+Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
+ return raw_assembler_->Parameter(Linkage::kInterpreterBytecodeArrayParameter);
+}
+
+
+Node* InterpreterAssembler::DispatchTableRawPointer() {
+ return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
+}
+
+
+Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
+ return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
+}
+
+
+Node* InterpreterAssembler::LoadRegister(int offset) {
+ return raw_assembler_->Load(MachineType::AnyTagged(),
+ RegisterFileRawPointer(), Int32Constant(offset));
+}
+
+
+Node* InterpreterAssembler::LoadRegister(interpreter::Register reg) {
+ return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
+}
+
+
+Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
+ return WordShl(index, kPointerSizeLog2);
+}
+
+
+Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
+ return raw_assembler_->Load(MachineType::AnyTagged(),
+ RegisterFileRawPointer(),
+ RegisterFrameOffset(reg_index));
+}
+
+
+Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
+ return raw_assembler_->Store(MachineRepresentation::kTagged,
+ RegisterFileRawPointer(), Int32Constant(offset),
+ value, kNoWriteBarrier);
+}
+
+
+Node* InterpreterAssembler::StoreRegister(Node* value,
+ interpreter::Register reg) {
+ return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
+}
+
+
+Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
+ return raw_assembler_->Store(
+ MachineRepresentation::kTagged, RegisterFileRawPointer(),
+ RegisterFrameOffset(reg_index), value, kNoWriteBarrier);
+}
+
+
+Node* InterpreterAssembler::NextRegister(Node* reg_index) {
+ // Register indexes are negative, so the next index is minus one.
+ return IntPtrAdd(reg_index, Int32Constant(-1));
+}
+
+
+Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
+ DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kByte,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ return raw_assembler_->Load(
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(),
+ Int32Constant(interpreter::Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
+ DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kByte,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ Node* load = raw_assembler_->Load(
+ MachineType::Int8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(),
+ Int32Constant(interpreter::Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
+ // Ensure that we sign extend to full pointer size
+ if (kPointerSize == 8) {
+ load = raw_assembler_->ChangeInt32ToInt64(load);
+ }
+ return load;
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
+ DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kShort,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ if (TargetSupportsUnalignedAccess()) {
+ return raw_assembler_->Load(
+ MachineType::Uint16(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(),
+ Int32Constant(interpreter::Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
+ } else {
+ int offset =
+ interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
+ Node* first_byte = raw_assembler_->Load(
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
+ Node* second_byte = raw_assembler_->Load(
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
+#if V8_TARGET_LITTLE_ENDIAN
+ return raw_assembler_->WordOr(WordShl(second_byte, kBitsPerByte),
+ first_byte);
+#elif V8_TARGET_BIG_ENDIAN
+ return raw_assembler_->WordOr(WordShl(first_byte, kBitsPerByte),
+ second_byte);
+#else
+#error "Unknown Architecture"
+#endif
+ }
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
+ int operand_index) {
+ DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kShort,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ int operand_offset =
+ interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
+ Node* load;
+ if (TargetSupportsUnalignedAccess()) {
+ load = raw_assembler_->Load(
+ MachineType::Int16(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
+ } else {
+#if V8_TARGET_LITTLE_ENDIAN
+ Node* hi_byte_offset = Int32Constant(operand_offset + 1);
+ Node* lo_byte_offset = Int32Constant(operand_offset);
+#elif V8_TARGET_BIG_ENDIAN
+ Node* hi_byte_offset = Int32Constant(operand_offset);
+ Node* lo_byte_offset = Int32Constant(operand_offset + 1);
+#else
+#error "Unknown Architecture"
+#endif
+ Node* hi_byte =
+ raw_assembler_->Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), hi_byte_offset));
+ Node* lo_byte =
+ raw_assembler_->Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), lo_byte_offset));
+ hi_byte = raw_assembler_->Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
+ load = raw_assembler_->Word32Or(hi_byte, lo_byte);
+ }
+
+ // Ensure that we sign extend to full pointer size
+ if (kPointerSize == 8) {
+ load = raw_assembler_->ChangeInt32ToInt64(load);
+ }
+ return load;
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
+ switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+ case interpreter::OperandSize::kByte:
+ DCHECK_EQ(
+ interpreter::OperandType::kCount8,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperand(operand_index);
+ case interpreter::OperandSize::kShort:
+ DCHECK_EQ(
+ interpreter::OperandType::kCount16,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandShort(operand_index);
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
+ DCHECK_EQ(interpreter::OperandType::kImm8,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandSignExtended(operand_index);
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
+ switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+ case interpreter::OperandSize::kByte:
+ DCHECK_EQ(
+ interpreter::OperandType::kIdx8,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperand(operand_index);
+ case interpreter::OperandSize::kShort:
+ DCHECK_EQ(
+ interpreter::OperandType::kIdx16,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandShort(operand_index);
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
+ switch (interpreter::Bytecodes::GetOperandType(bytecode_, operand_index)) {
+ case interpreter::OperandType::kReg8:
+ case interpreter::OperandType::kRegPair8:
+ case interpreter::OperandType::kMaybeReg8:
+ DCHECK_EQ(
+ interpreter::OperandSize::kByte,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ return BytecodeOperandSignExtended(operand_index);
+ case interpreter::OperandType::kReg16:
+ DCHECK_EQ(
+ interpreter::OperandSize::kShort,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ return BytecodeOperandShortSignExtended(operand_index);
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+Node* InterpreterAssembler::Int32Constant(int value) {
+ return raw_assembler_->Int32Constant(value);
+}
+
+
+Node* InterpreterAssembler::IntPtrConstant(intptr_t value) {
+ return raw_assembler_->IntPtrConstant(value);
+}
+
+
+Node* InterpreterAssembler::NumberConstant(double value) {
+ return raw_assembler_->NumberConstant(value);
+}
+
+
+Node* InterpreterAssembler::HeapConstant(Handle<HeapObject> object) {
+ return raw_assembler_->HeapConstant(object);
+}
+
+
+Node* InterpreterAssembler::BooleanConstant(bool value) {
+ return raw_assembler_->BooleanConstant(value);
+}
+
+
+Node* InterpreterAssembler::SmiShiftBitsConstant() {
+ return Int32Constant(kSmiShiftSize + kSmiTagSize);
+}
+
+
+Node* InterpreterAssembler::SmiTag(Node* value) {
+ return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
+}
+
+
+Node* InterpreterAssembler::SmiUntag(Node* value) {
+ return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
+}
+
+
+Node* InterpreterAssembler::IntPtrAdd(Node* a, Node* b) {
+ return raw_assembler_->IntPtrAdd(a, b);
+}
+
+
+Node* InterpreterAssembler::IntPtrSub(Node* a, Node* b) {
+ return raw_assembler_->IntPtrSub(a, b);
+}
+
+
+Node* InterpreterAssembler::WordShl(Node* value, int shift) {
+ return raw_assembler_->WordShl(value, Int32Constant(shift));
+}
+
+
+Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
+ Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
+ BytecodeArray::kConstantPoolOffset);
+ Node* entry_offset =
+ IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ WordShl(index, kPointerSizeLog2));
+ return raw_assembler_->Load(MachineType::AnyTagged(), constant_pool,
+ entry_offset);
+}
+
+
+Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
+ int index) {
+ Node* entry_offset =
+ IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ WordShl(Int32Constant(index), kPointerSizeLog2));
+ return raw_assembler_->Load(MachineType::AnyTagged(), fixed_array,
+ entry_offset);
+}
+
+
+Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
+ return raw_assembler_->Load(MachineType::AnyTagged(), object,
+ IntPtrConstant(offset - kHeapObjectTag));
+}
+
+
+Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
+ return raw_assembler_->Load(MachineType::AnyTagged(), context,
+ IntPtrConstant(Context::SlotOffset(slot_index)));
+}
+
+
+Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+ return raw_assembler_->Load(MachineType::AnyTagged(), context, offset);
+}
+
+
+Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
+ Node* value) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+ return raw_assembler_->Store(MachineRepresentation::kTagged, context, offset,
+ value, kFullWriteBarrier);
+}
+
+
+Node* InterpreterAssembler::LoadTypeFeedbackVector() {
+ Node* function = raw_assembler_->Load(
+ MachineType::AnyTagged(), RegisterFileRawPointer(),
+ IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ Node* shared_info =
+ LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+ Node* vector =
+ LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
+ return vector;
+}
+
+
+Node* InterpreterAssembler::Projection(int index, Node* node) {
+ return raw_assembler_->Projection(index, node);
+}
+
+
+Node* InterpreterAssembler::CallConstruct(Node* new_target, Node* constructor,
+ Node* first_arg, Node* arg_count) {
+ Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
+
+ Node* code_target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(5);
+ args[0] = arg_count;
+ args[1] = new_target;
+ args[2] = constructor;
+ args[3] = first_arg;
+ args[4] = GetContext();
+
+ return CallN(descriptor, code_target, args);
+}
+
+
+void InterpreterAssembler::CallPrologue() {
+ StoreRegister(SmiTag(bytecode_offset_),
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
+}
+
+
+void InterpreterAssembler::CallEpilogue() {
+ // Restore the bytecode offset from the stack frame.
+ bytecode_offset_ = SmiUntag(LoadRegister(
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+}
+
+
+Node* InterpreterAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
+ Node** args) {
+ CallPrologue();
+
+ Node* stack_pointer_before_call = nullptr;
+ if (FLAG_debug_code) {
+ stack_pointer_before_call = raw_assembler_->LoadStackPointer();
+ }
+ Node* return_val = raw_assembler_->CallN(descriptor, code_target, args);
+ if (FLAG_debug_code) {
+ Node* stack_pointer_after_call = raw_assembler_->LoadStackPointer();
+ AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
+ kUnexpectedStackPointer);
+ }
+
+ CallEpilogue();
+ return return_val;
+}
+
+
+Node* InterpreterAssembler::CallJS(Node* function, Node* first_arg,
+ Node* arg_count) {
+ Callable callable = CodeFactory::InterpreterPushArgsAndCall(isolate());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
+
+ Node* code_target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg_count;
+ args[1] = first_arg;
+ args[2] = function;
+ args[3] = GetContext();
+
+ return CallN(descriptor, code_target, args);
+}
+
+
+Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
+ Node* target, Node** args) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, 0, CallDescriptor::kNoFlags);
+ return CallN(call_descriptor, target, args);
+}
+
+
+Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
+ Node* target, Node* arg1, Node* arg2,
+ Node* arg3) {
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = GetContext();
+ return CallIC(descriptor, target, args);
+}
+
+
+Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
+ Node* target, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4) {
+ Node** args = zone()->NewArray<Node*>(5);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = GetContext();
+ return CallIC(descriptor, target, args);
+}
+
+
+Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
+ Node* target, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5) {
+ Node** args = zone()->NewArray<Node*>(6);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = arg5;
+ args[5] = GetContext();
+ return CallIC(descriptor, target, args);
+}
+
+
+Node* InterpreterAssembler::CallRuntime(Node* function_id, Node* first_arg,
+ Node* arg_count, int result_size) {
+ Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ Operator::kNoProperties, MachineType::AnyTagged(), result_size);
+ Node* code_target = HeapConstant(callable.code());
+
+ // Get the function entry from the function id.
+ Node* function_table = raw_assembler_->ExternalConstant(
+ ExternalReference::runtime_function_table_address(isolate()));
+ Node* function_offset = raw_assembler_->Int32Mul(
+ function_id, Int32Constant(sizeof(Runtime::Function)));
+ Node* function = IntPtrAdd(function_table, function_offset);
+ Node* function_entry =
+ raw_assembler_->Load(MachineType::Pointer(), function,
+ Int32Constant(offsetof(Runtime::Function, entry)));
+
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg_count;
+ args[1] = first_arg;
+ args[2] = function_entry;
+ args[3] = GetContext();
+
+ return CallN(descriptor, code_target, args);
+}
+
+
+Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* arg1) {
+ CallPrologue();
+ Node* return_val =
+ raw_assembler_->CallRuntime1(function_id, arg1, GetContext());
+ CallEpilogue();
+ return return_val;
+}
+
+
+Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* arg1, Node* arg2) {
+ CallPrologue();
+ Node* return_val =
+ raw_assembler_->CallRuntime2(function_id, arg1, arg2, GetContext());
+ CallEpilogue();
+ return return_val;
+}
+
+
+Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4) {
+ CallPrologue();
+ Node* return_val = raw_assembler_->CallRuntime4(function_id, arg1, arg2, arg3,
+ arg4, GetContext());
+ CallEpilogue();
+ return return_val;
+}
+
+
+void InterpreterAssembler::Return() {
+ Node* exit_trampoline_code_object =
+ HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
+ // If the order of the parameters you need to change the call signature below.
+ STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
+ STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
+ STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
+ STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
+ STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
+ STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
+ Node* args[] = { GetAccumulator(),
+ RegisterFileRawPointer(),
+ BytecodeOffset(),
+ BytecodeArrayTaggedPointer(),
+ DispatchTableRawPointer(),
+ GetContext() };
+ raw_assembler_->TailCallN(call_descriptor(), exit_trampoline_code_object,
+ args);
+}
+
+
+Node* InterpreterAssembler::Advance(int delta) {
+ return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+}
+
+
+Node* InterpreterAssembler::Advance(Node* delta) {
+ return raw_assembler_->IntPtrAdd(BytecodeOffset(), delta);
+}
+
+
+void InterpreterAssembler::Jump(Node* delta) { DispatchTo(Advance(delta)); }
+
+
+void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
+ RawMachineLabel match, no_match;
+ Node* condition = raw_assembler_->WordEqual(lhs, rhs);
+ raw_assembler_->Branch(condition, &match, &no_match);
+ raw_assembler_->Bind(&match);
+ DispatchTo(Advance(delta));
+ raw_assembler_->Bind(&no_match);
+ Dispatch();
+}
+
+
+void InterpreterAssembler::Dispatch() {
+ DispatchTo(Advance(interpreter::Bytecodes::Size(bytecode_)));
+}
+
+
+void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
+ Node* target_bytecode = raw_assembler_->Load(
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
+
+ // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
+ // from code object on every dispatch.
+ Node* target_code_object = raw_assembler_->Load(
+ MachineType::Pointer(), DispatchTableRawPointer(),
+ raw_assembler_->Word32Shl(target_bytecode,
+ Int32Constant(kPointerSizeLog2)));
+
+ // If the order of the parameters you need to change the call signature below.
+ STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
+ STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
+ STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
+ STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
+ STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
+ STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
+ Node* args[] = { GetAccumulator(),
+ RegisterFileRawPointer(),
+ new_bytecode_offset,
+ BytecodeArrayTaggedPointer(),
+ DispatchTableRawPointer(),
+ GetContext() };
+ raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
+}
+
+
+void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
+ Node* abort_id = SmiTag(Int32Constant(bailout_reason));
+ Node* ret_value = CallRuntime(Runtime::kAbort, abort_id);
+ // Unreached, but keeps turbofan happy.
+ raw_assembler_->Return(ret_value);
+}
+
+
+void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
+ BailoutReason bailout_reason) {
+ RawMachineLabel match, no_match;
+ Node* condition = raw_assembler_->WordEqual(lhs, rhs);
+ raw_assembler_->Branch(condition, &match, &no_match);
+ raw_assembler_->Bind(&no_match);
+ Abort(bailout_reason);
+ raw_assembler_->Bind(&match);
+}
+
+
+// static
+bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ return false;
+#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
+ return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
+ return true;
+#else
+#error "Unknown Architecture"
+#endif
+}
+
+
+// RawMachineAssembler delegate helpers:
+Isolate* InterpreterAssembler::isolate() { return raw_assembler_->isolate(); }
+
+
+Graph* InterpreterAssembler::graph() { return raw_assembler_->graph(); }
+
+
+CallDescriptor* InterpreterAssembler::call_descriptor() const {
+ return raw_assembler_->call_descriptor();
+}
+
+
+Zone* InterpreterAssembler::zone() { return raw_assembler_->zone(); }
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/interpreter-assembler.h b/src/compiler/interpreter-assembler.h
new file mode 100644
index 0000000..fb79d3e
--- /dev/null
+++ b/src/compiler/interpreter-assembler.h
@@ -0,0 +1,224 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INTERPRETER_ASSEMBLER_H_
+#define V8_COMPILER_INTERPRETER_ASSEMBLER_H_
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/allocation.h"
+#include "src/base/smart-pointers.h"
+#include "src/builtins.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+class CallInterfaceDescriptor;
+class Isolate;
+class Zone;
+
+namespace compiler {
+
+class CallDescriptor;
+class Graph;
+class Node;
+class Operator;
+class RawMachineAssembler;
+class Schedule;
+
+class InterpreterAssembler {
+ public:
+ InterpreterAssembler(Isolate* isolate, Zone* zone,
+ interpreter::Bytecode bytecode);
+ virtual ~InterpreterAssembler();
+
+ Handle<Code> GenerateCode();
+
+ // Returns the count immediate for bytecode operand |operand_index| in the
+ // current bytecode.
+ Node* BytecodeOperandCount(int operand_index);
+ // Returns the index immediate for bytecode operand |operand_index| in the
+ // current bytecode.
+ Node* BytecodeOperandIdx(int operand_index);
+ // Returns the Imm8 immediate for bytecode operand |operand_index| in the
+ // current bytecode.
+ Node* BytecodeOperandImm(int operand_index);
+ // Returns the register index for bytecode operand |operand_index| in the
+ // current bytecode.
+ Node* BytecodeOperandReg(int operand_index);
+
+ // Accumulator.
+ Node* GetAccumulator();
+ void SetAccumulator(Node* value);
+
+ // Context.
+ Node* GetContext();
+ void SetContext(Node* value);
+
+ // Loads from and stores to the interpreter register file.
+ Node* LoadRegister(int offset);
+ Node* LoadRegister(interpreter::Register reg);
+ Node* LoadRegister(Node* reg_index);
+ Node* StoreRegister(Node* value, int offset);
+ Node* StoreRegister(Node* value, interpreter::Register reg);
+ Node* StoreRegister(Node* value, Node* reg_index);
+
+ // Returns the next consecutive register.
+ Node* NextRegister(Node* reg_index);
+
+ // Returns the location in memory of the register |reg_index| in the
+ // interpreter register file.
+ Node* RegisterLocation(Node* reg_index);
+
+ // Constants.
+ Node* Int32Constant(int value);
+ Node* IntPtrConstant(intptr_t value);
+ Node* NumberConstant(double value);
+ Node* HeapConstant(Handle<HeapObject> object);
+ Node* BooleanConstant(bool value);
+
+ // Tag and untag Smi values.
+ Node* SmiTag(Node* value);
+ Node* SmiUntag(Node* value);
+
+ // Basic arithmetic operations.
+ Node* IntPtrAdd(Node* a, Node* b);
+ Node* IntPtrSub(Node* a, Node* b);
+ Node* WordShl(Node* value, int shift);
+
+ // Load constant at |index| in the constant pool.
+ Node* LoadConstantPoolEntry(Node* index);
+
+ // Load an element from a fixed array on the heap.
+ Node* LoadFixedArrayElement(Node* fixed_array, int index);
+
+ // Load a field from an object on the heap.
+ Node* LoadObjectField(Node* object, int offset);
+
+ // Load |slot_index| from |context|.
+ Node* LoadContextSlot(Node* context, int slot_index);
+ Node* LoadContextSlot(Node* context, Node* slot_index);
+ // Stores |value| into |slot_index| of |context|.
+ Node* StoreContextSlot(Node* context, Node* slot_index, Node* value);
+
+ // Load the TypeFeedbackVector for the current function.
+ Node* LoadTypeFeedbackVector();
+
+ // Project the output value at index |index|
+ Node* Projection(int index, Node* node);
+
+ // Call constructor |constructor| with |arg_count| arguments (not
+ // including receiver) and the first argument located at
+ // |first_arg|. The |new_target| is the same as the
+ // |constructor| for the new keyword, but differs for the super
+ // keyword.
+ Node* CallConstruct(Node* new_target, Node* constructor, Node* first_arg,
+ Node* arg_count);
+
+ // Call JSFunction or Callable |function| with |arg_count|
+ // arguments (not including receiver) and the first argument
+ // located at |first_arg|.
+ Node* CallJS(Node* function, Node* first_arg, Node* arg_count);
+
+ // Call an IC code stub.
+ Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
+ Node* arg2, Node* arg3);
+ Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4);
+ Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4, Node* arg5);
+
+ // Call runtime function.
+ Node* CallRuntime(Node* function_id, Node* first_arg, Node* arg_count,
+ int return_size = 1);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4);
+
+ // Jump relative to the current bytecode by |jump_offset|.
+ void Jump(Node* jump_offset);
+
+ // Jump relative to the current bytecode by |jump_offset| if the
+ // word values |lhs| and |rhs| are equal.
+ void JumpIfWordEqual(Node* lhs, Node* rhs, Node* jump_offset);
+
+ // Returns from the function.
+ void Return();
+
+ // Dispatch to the bytecode.
+ void Dispatch();
+
+ // Abort with the given bailout reason.
+ void Abort(BailoutReason bailout_reason);
+
+ protected:
+ static bool TargetSupportsUnalignedAccess();
+
+ // Protected helpers (for testing) which delegate to RawMachineAssembler.
+ CallDescriptor* call_descriptor() const;
+ Graph* graph();
+
+ private:
+ // Returns a raw pointer to start of the register file on the stack.
+ Node* RegisterFileRawPointer();
+ // Returns a tagged pointer to the current function's BytecodeArray object.
+ Node* BytecodeArrayTaggedPointer();
+ // Returns the offset from the BytecodeArrayPointer of the current bytecode.
+ Node* BytecodeOffset();
+ // Returns a raw pointer to first entry in the interpreter dispatch table.
+ Node* DispatchTableRawPointer();
+
+ // Saves and restores interpreter bytecode offset to the interpreter stack
+ // frame when performing a call.
+ void CallPrologue();
+ void CallEpilogue();
+
+ // Returns the offset of register |index| relative to RegisterFilePointer().
+ Node* RegisterFrameOffset(Node* index);
+
+ Node* SmiShiftBitsConstant();
+ Node* BytecodeOperand(int operand_index);
+ Node* BytecodeOperandSignExtended(int operand_index);
+ Node* BytecodeOperandShort(int operand_index);
+ Node* BytecodeOperandShortSignExtended(int operand_index);
+
+ Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+ Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node** args);
+
+ // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
+ // update BytecodeOffset() itself.
+ Node* Advance(int delta);
+ Node* Advance(Node* delta);
+
+ // Starts next instruction dispatch at |new_bytecode_offset|.
+ void DispatchTo(Node* new_bytecode_offset);
+
+ // Abort operations for debug code.
+ void AbortIfWordNotEqual(Node* lhs, Node* rhs, BailoutReason bailout_reason);
+
+ // Private helpers which delegate to RawMachineAssembler.
+ Isolate* isolate();
+ Zone* zone();
+
+ interpreter::Bytecode bytecode_;
+ base::SmartPointer<RawMachineAssembler> raw_assembler_;
+
+ Node* accumulator_;
+ Node* bytecode_offset_;
+ Node* context_;
+
+ bool code_generated_;
+
+ DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INTERPRETER_ASSEMBLER_H_
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 263b0fe..a7a7da5 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/diamond.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/objects-inl.h"
#include "src/types.h"
namespace v8 {
@@ -15,17 +15,6 @@
namespace compiler {
-// Helper method that assumes replacement nodes are pure values that don't
-// produce an effect. Replaces {node} with {reduction} and relaxes effects.
-static Reduction ReplaceWithPureReduction(Node* node, Reduction reduction) {
- if (reduction.Changed()) {
- NodeProperties::ReplaceWithValue(node, reduction.replacement());
- return reduction;
- }
- return Reducer::NoChange();
-}
-
-
// Helper class to access JSCallFunction nodes that are potential candidates
// for reduction when they have a BuiltinFunctionId associated with them.
class JSCallReduction {
@@ -36,17 +25,17 @@
// constant callee being a well-known builtin with a BuiltinFunctionId.
bool HasBuiltinFunctionId() {
if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
- HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
- if (!m.HasValue() || !m.Value().handle()->IsJSFunction()) return false;
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
+ if (!m.HasValue() || !m.Value()->IsJSFunction()) return false;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
return function->shared()->HasBuiltinFunctionId();
}
// Retrieves the BuiltinFunctionId as described above.
BuiltinFunctionId GetBuiltinFunctionId() {
DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
- HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
return function->shared()->builtin_function_id();
}
@@ -56,20 +45,20 @@
// Determines whether the call takes one input of the given type.
bool InputsMatchOne(Type* t1) {
return GetJSCallArity() == 1 &&
- NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1);
+ NodeProperties::GetType(GetJSCallInput(0))->Is(t1);
}
// Determines whether the call takes two inputs of the given types.
bool InputsMatchTwo(Type* t1, Type* t2) {
return GetJSCallArity() == 2 &&
- NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1) &&
- NodeProperties::GetBounds(GetJSCallInput(1)).upper->Is(t2);
+ NodeProperties::GetType(GetJSCallInput(0))->Is(t1) &&
+ NodeProperties::GetType(GetJSCallInput(1))->Is(t2);
}
// Determines whether the call takes inputs all of the given type.
bool InputsMatchAll(Type* t) {
for (int i = 0; i < GetJSCallArity(); i++) {
- if (!NodeProperties::GetBounds(GetJSCallInput(i)).upper->Is(t)) {
+ if (!NodeProperties::GetType(GetJSCallInput(i))->Is(t)) {
return false;
}
}
@@ -97,40 +86,8 @@
};
-JSBuiltinReducer::JSBuiltinReducer(JSGraph* jsgraph)
- : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
-
-
-// ECMA-262, section 15.8.2.1.
-Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Unsigned32())) {
- // Math.abs(a:uint32) -> a
- return Replace(r.left());
- }
- if (r.InputsMatchOne(Type::Number())) {
- // Math.abs(a:number) -> (a > 0 ? a : 0 - a)
- Node* const value = r.left();
- Node* const zero = jsgraph()->ZeroConstant();
- return Replace(graph()->NewNode(
- common()->Select(kMachNone),
- graph()->NewNode(simplified()->NumberLessThan(), zero, value), value,
- graph()->NewNode(simplified()->NumberSubtract(), zero, value)));
- }
- return NoChange();
-}
-
-
-// ECMA-262, section 15.8.2.17.
-Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.sqrt(a:number) -> Float64Sqrt(a)
- Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
+JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph)
+ : AdvancedReducer(editor), jsgraph_(jsgraph) {}
// ECMA-262, section 15.8.2.11.
@@ -150,9 +107,9 @@
for (int i = 1; i < r.GetJSCallArity(); i++) {
Node* const input = r.GetJSCallInput(i);
value = graph()->NewNode(
- common()->Select(kMachNone),
- graph()->NewNode(simplified()->NumberLessThan(), input, value), input,
- value);
+ common()->Select(MachineRepresentation::kNone),
+ graph()->NewNode(simplified()->NumberLessThan(), input, value), value,
+ input);
}
return Replace(value);
}
@@ -185,62 +142,40 @@
}
-// ES6 draft 10-14-14, section 20.2.2.16.
-Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
- if (!machine()->HasFloat64Floor()) return NoChange();
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.floor(a:number) -> Float64Floor(a)
- Node* value = graph()->NewNode(machine()->Float64Floor(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-
-// ES6 draft 10-14-14, section 20.2.2.10.
-Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
- if (!machine()->HasFloat64Ceil()) return NoChange();
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.ceil(a:number) -> Float64Ceil(a)
- Node* value = graph()->NewNode(machine()->Float64Ceil(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-
Reduction JSBuiltinReducer::Reduce(Node* node) {
+ Reduction reduction = NoChange();
JSCallReduction r(node);
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
- case kMathAbs:
- return ReplaceWithPureReduction(node, ReduceMathAbs(node));
- case kMathSqrt:
- return ReplaceWithPureReduction(node, ReduceMathSqrt(node));
case kMathMax:
- return ReplaceWithPureReduction(node, ReduceMathMax(node));
+ reduction = ReduceMathMax(node);
+ break;
case kMathImul:
- return ReplaceWithPureReduction(node, ReduceMathImul(node));
+ reduction = ReduceMathImul(node);
+ break;
case kMathFround:
- return ReplaceWithPureReduction(node, ReduceMathFround(node));
- case kMathFloor:
- return ReplaceWithPureReduction(node, ReduceMathFloor(node));
- case kMathCeil:
- return ReplaceWithPureReduction(node, ReduceMathCeil(node));
+ reduction = ReduceMathFround(node);
+ break;
default:
break;
}
- return NoChange();
+
+ // Replace builtin call assuming replacement nodes are pure values that don't
+ // produce an effect. Replaces {node} with {reduction} and relaxes effects.
+ if (reduction.Changed()) ReplaceWithValue(node, reduction.replacement());
+
+ return reduction;
}
Graph* JSBuiltinReducer::graph() const { return jsgraph()->graph(); }
+Isolate* JSBuiltinReducer::isolate() const { return jsgraph()->isolate(); }
+
+
CommonOperatorBuilder* JSBuiltinReducer::common() const {
return jsgraph()->common();
}
@@ -250,6 +185,11 @@
return jsgraph()->machine();
}
+
+SimplifiedOperatorBuilder* JSBuiltinReducer::simplified() const {
+ return jsgraph()->simplified();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index ac6f266..cfacdc1 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -6,7 +6,6 @@
#define V8_COMPILER_JS_BUILTIN_REDUCER_H_
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -16,32 +15,30 @@
class CommonOperatorBuilder;
class JSGraph;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
-class JSBuiltinReducer FINAL : public Reducer {
+class JSBuiltinReducer final : public AdvancedReducer {
public:
- explicit JSBuiltinReducer(JSGraph* jsgraph);
- ~JSBuiltinReducer() FINAL {}
+ explicit JSBuiltinReducer(Editor* editor, JSGraph* jsgraph);
+ ~JSBuiltinReducer() final {}
- Reduction Reduce(Node* node) FINAL;
+ Reduction Reduce(Node* node) final;
private:
- Reduction ReduceMathAbs(Node* node);
- Reduction ReduceMathSqrt(Node* node);
+ Reduction ReduceFunctionCall(Node* node);
Reduction ReduceMathMax(Node* node);
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathFround(Node* node);
- Reduction ReduceMathFloor(Node* node);
- Reduction ReduceMathCeil(Node* node);
- JSGraph* jsgraph() const { return jsgraph_; }
Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ SimplifiedOperatorBuilder* simplified() const;
JSGraph* jsgraph_;
- SimplifiedOperatorBuilder simplified_;
};
} // namespace compiler
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
new file mode 100644
index 0000000..a15d6fd
--- /dev/null
+++ b/src/compiler/js-call-reducer.cc
@@ -0,0 +1,557 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-call-reducer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
+#include "src/type-feedback-vector-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+VectorSlotPair CallCountFeedback(VectorSlotPair p) {
+ // Extract call count from {p}.
+ if (!p.IsValid()) return VectorSlotPair();
+ CallICNexus n(p.vector(), p.slot());
+ int const call_count = n.ExtractCallCount();
+ if (call_count <= 0) return VectorSlotPair();
+
+ // Create megamorphic CallIC feedback with the given {call_count}.
+ StaticFeedbackVectorSpec spec;
+ FeedbackVectorSlot slot = spec.AddCallICSlot();
+ Handle<TypeFeedbackMetadata> metadata =
+ TypeFeedbackMetadata::New(n.GetIsolate(), &spec);
+ Handle<TypeFeedbackVector> vector =
+ TypeFeedbackVector::New(n.GetIsolate(), metadata);
+ CallICNexus nexus(vector, slot);
+ nexus.ConfigureMegamorphic(call_count);
+ return VectorSlotPair(vector, slot);
+}
+
+} // namespace
+
+
+Reduction JSCallReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCallConstruct:
+ return ReduceJSCallConstruct(node);
+ case IrOpcode::kJSCallFunction:
+ return ReduceJSCallFunction(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+// ES6 section 22.1.1 The Array Constructor
+Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+
+ // Check if we have an allocation site from the CallIC.
+ Handle<AllocationSite> site;
+ if (p.feedback().IsValid()) {
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ Handle<Object> feedback(nexus.GetFeedback(), isolate());
+ if (feedback->IsAllocationSite()) {
+ site = Handle<AllocationSite>::cast(feedback);
+ }
+ }
+
+ // Turn the {node} into a {JSCreateArray} call.
+ DCHECK_LE(2u, p.arity());
+ size_t const arity = p.arity() - 2;
+ NodeProperties::ReplaceValueInput(node, target, 0);
+ NodeProperties::ReplaceValueInput(node, target, 1);
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ // TODO(bmeurer): We might need to propagate the tail call mode to
+ // the JSCreateArray operator, because an Array call in tail call
+ // position must always properly consume the parent stack frame.
+ NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ return Changed(node);
+}
+
+
+// ES6 section 20.1.1 The Number Constructor
+Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+
+ // Turn the {node} into a {JSToNumber} call.
+ DCHECK_LE(2u, p.arity());
+ Node* value = (p.arity() == 2) ? jsgraph()->ZeroConstant()
+ : NodeProperties::GetValueInput(node, 2);
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ NodeProperties::ReplaceValueInputs(node, value);
+ NodeProperties::ChangeOp(node, javascript()->ToNumber());
+ return Changed(node);
+}
+
+
+// ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
+Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Handle<JSFunction> apply =
+ Handle<JSFunction>::cast(HeapObjectMatcher(target).Value());
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny;
+ if (arity == 2) {
+ // Neither thisArg nor argArray was provided.
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ node->ReplaceInput(0, node->InputAt(1));
+ node->ReplaceInput(1, jsgraph()->UndefinedConstant());
+ } else if (arity == 3) {
+ // The argArray was not provided, just remove the {target}.
+ node->RemoveInput(0);
+ --arity;
+ } else if (arity == 4) {
+ // Check if argArray is an arguments object, and {node} is the only value
+ // user of argArray (except for value uses in frame states).
+ Node* arg_array = NodeProperties::GetValueInput(node, 3);
+ if (arg_array->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
+ for (Edge edge : arg_array->use_edges()) {
+ if (edge.from()->opcode() == IrOpcode::kStateValues) continue;
+ if (!NodeProperties::IsValueEdge(edge)) continue;
+ if (edge.from() == node) continue;
+ return NoChange();
+ }
+ // Get to the actual frame state from which to extract the arguments;
+ // we can only optimize this in case the {node} was already inlined into
+ // some other function (and same for the {arg_array}).
+ CreateArgumentsParameters const& p =
+ CreateArgumentsParametersOf(arg_array->op());
+ Node* frame_state = NodeProperties::GetFrameStateInput(arg_array, 0);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
+ FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
+ if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
+ // Need to take the parameters from the arguments adaptor.
+ frame_state = outer_state;
+ }
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ if (p.type() == CreateArgumentsParameters::kMappedArguments) {
+ // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ if (shared->internal_formal_parameter_count() != 0) return NoChange();
+ }
+ // Remove the argArray input from the {node}.
+ node->RemoveInput(static_cast<int>(--arity));
+ // Add the actual parameters to the {node}, skipping the receiver.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ for (int i = p.start_index() + 1; i < state_info.parameter_count(); ++i) {
+ node->InsertInput(graph()->zone(), static_cast<int>(arity),
+ parameters->InputAt(i));
+ ++arity;
+ }
+ // Drop the {target} from the {node}.
+ node->RemoveInput(0);
+ --arity;
+ } else {
+ return NoChange();
+ }
+ // Change {node} to the new {JSCallFunction} operator.
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, p.language_mode(),
+ CallCountFeedback(p.feedback()),
+ convert_mode, p.tail_call_mode()));
+ // Change context of {node} to the Function.prototype.apply context,
+ // to ensure any exception is thrown in the correct context.
+ NodeProperties::ReplaceContextInput(
+ node, jsgraph()->HeapConstant(handle(apply->context(), isolate())));
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
+
+// ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
+Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Handle<JSFunction> call = Handle<JSFunction>::cast(
+ HeapObjectMatcher(NodeProperties::GetValueInput(node, 0)).Value());
+ // Change context of {node} to the Function.prototype.call context,
+ // to ensure any exception is thrown in the correct context.
+ NodeProperties::ReplaceContextInput(
+ node, jsgraph()->HeapConstant(handle(call->context(), isolate())));
+ // Remove the target from {node} and use the receiver as target instead, and
+ // the thisArg becomes the new target. If thisArg was not provided, insert
+ // undefined instead.
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
+ ConvertReceiverMode convert_mode;
+ if (arity == 2) {
+ // The thisArg was not provided, use undefined as receiver.
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ node->ReplaceInput(0, node->InputAt(1));
+ node->ReplaceInput(1, jsgraph()->UndefinedConstant());
+ } else {
+ // Just remove the target, which is the first value input.
+ convert_mode = ConvertReceiverMode::kAny;
+ node->RemoveInput(0);
+ --arity;
+ }
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, p.language_mode(),
+ CallCountFeedback(p.feedback()),
+ convert_mode, p.tail_call_mode()));
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
+
+Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Try to specialize JSCallFunction {node}s with constant {target}s.
+ HeapObjectMatcher m(target);
+ if (m.HasValue()) {
+ if (m.Value()->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ // Raise a TypeError if the {target} is a "classConstructor".
+ if (IsClassConstructor(shared->kind())) {
+ NodeProperties::RemoveFrameStateInput(node, 0);
+ NodeProperties::ReplaceValueInputs(node, target);
+ NodeProperties::ChangeOp(
+ node, javascript()->CallRuntime(
+ Runtime::kThrowConstructorNonCallableError, 1));
+ return Changed(node);
+ }
+
+ // Check for known builtin functions.
+ if (shared->HasBuiltinFunctionId()) {
+ switch (shared->builtin_function_id()) {
+ case kFunctionApply:
+ return ReduceFunctionPrototypeApply(node);
+ case kFunctionCall:
+ return ReduceFunctionPrototypeCall(node);
+ default:
+ break;
+ }
+ }
+
+ // Check for the Array constructor.
+ if (*function == function->native_context()->array_function()) {
+ return ReduceArrayConstructor(node);
+ }
+
+ // Check for the Number constructor.
+ if (*function == function->native_context()->number_function()) {
+ return ReduceNumberConstructor(node);
+ }
+ } else if (m.Value()->IsJSBoundFunction()) {
+ Handle<JSBoundFunction> function =
+ Handle<JSBoundFunction>::cast(m.Value());
+ Handle<JSReceiver> bound_target_function(
+ function->bound_target_function(), isolate());
+ Handle<Object> bound_this(function->bound_this(), isolate());
+ Handle<FixedArray> bound_arguments(function->bound_arguments(),
+ isolate());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ ConvertReceiverMode const convert_mode =
+ (bound_this->IsNull() || bound_this->IsUndefined())
+ ? ConvertReceiverMode::kNullOrUndefined
+ : ConvertReceiverMode::kNotNullOrUndefined;
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
+ // Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
+ NodeProperties::ReplaceValueInput(
+ node, jsgraph()->Constant(bound_target_function), 0);
+ NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(bound_this),
+ 1);
+ // Insert the [[BoundArguments]] for {node}.
+ for (int i = 0; i < bound_arguments->length(); ++i) {
+ node->InsertInput(
+ graph()->zone(), i + 2,
+ jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
+ arity++;
+ }
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, p.language_mode(),
+ CallCountFeedback(p.feedback()),
+ convert_mode, p.tail_call_mode()));
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+
+ // Don't mess with other {node}s that have a constant {target}.
+ // TODO(bmeurer): Also support proxies here.
+ return NoChange();
+ }
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Extract feedback from the {node} using the CallICNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ Handle<Object> feedback(nexus.GetFeedback(), isolate());
+ if (feedback->IsAllocationSite()) {
+ // Retrieve the Array function from the {node}.
+ Node* array_function;
+ Handle<Context> native_context;
+ if (GetNativeContext(node).ToHandle(&native_context)) {
+ array_function = jsgraph()->HeapConstant(
+ handle(native_context->array_function(), isolate()));
+ } else {
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ array_function = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ARRAY_FUNCTION_INDEX, true),
+ native_context, native_context, effect);
+ }
+
+ // Check that the {target} is still the {array_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, array_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Turn the {node} into a {JSCreateArray} call.
+ NodeProperties::ReplaceValueInput(node, array_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ return ReduceArrayConstructor(node);
+ } else if (feedback->IsWeakCell()) {
+ Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
+ if (cell->value()->IsJSFunction()) {
+ Node* target_function =
+ jsgraph()->Constant(handle(cell->value(), isolate()));
+
+ // Check that the {target} is still the {target_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, target_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Specialize the JSCallFunction node to the {target_function}.
+ NodeProperties::ReplaceValueInput(node, target_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
+ CallConstructParameters const& p = CallConstructParametersOf(node->op());
+ DCHECK_LE(2u, p.arity());
+ int const arity = static_cast<int>(p.arity() - 2);
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to specialize JSCallConstruct {node}s with constant {target}s.
+ HeapObjectMatcher m(target);
+ if (m.HasValue()) {
+ if (m.Value()->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+
+ // Raise a TypeError if the {target} is not a constructor.
+ if (!function->IsConstructor()) {
+ // Drop the lazy bailout location and use the eager bailout point for
+ // the runtime function (actually as lazy bailout point). It doesn't
+ // really matter which bailout location we use since we never really
+ // go back after throwing the exception.
+ NodeProperties::RemoveFrameStateInput(node, 0);
+ NodeProperties::ReplaceValueInputs(node, target);
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->CallRuntime(Runtime::kThrowCalledNonCallable, 1));
+ return Changed(node);
+ }
+
+ // Check for the ArrayConstructor.
+ if (*function == function->native_context()->array_function()) {
+ // Check if we have an allocation site.
+ Handle<AllocationSite> site;
+ if (p.feedback().IsValid()) {
+ Handle<Object> feedback(
+ p.feedback().vector()->Get(p.feedback().slot()), isolate());
+ if (feedback->IsAllocationSite()) {
+ site = Handle<AllocationSite>::cast(feedback);
+ }
+ }
+
+ // Turn the {node} into a {JSCreateArray} call.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ for (int i = arity; i > 0; --i) {
+ NodeProperties::ReplaceValueInput(
+ node, NodeProperties::GetValueInput(node, i), i + 1);
+ }
+ NodeProperties::ReplaceValueInput(node, new_target, 1);
+ NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ return Changed(node);
+ }
+ }
+
+ // Don't mess with other {node}s that have a constant {target}.
+ // TODO(bmeurer): Also support optimizing bound functions and proxies here.
+ return NoChange();
+ }
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // TODO(mvstanton): Use ConstructICNexus here, once available.
+ Handle<Object> feedback;
+ if (!p.feedback().IsValid()) return NoChange();
+ feedback = handle(p.feedback().vector()->Get(p.feedback().slot()), isolate());
+ if (feedback->IsAllocationSite()) {
+ // The feedback is an AllocationSite, which means we have called the
+ // Array function and collected transition (and pretenuring) feedback
+ // for the resulting arrays. This has to be kept in sync with the
+ // implementation of the CallConstructStub.
+ Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
+
+ // Retrieve the Array function from the {node}.
+ Node* array_function;
+ Handle<Context> native_context;
+ if (GetNativeContext(node).ToHandle(&native_context)) {
+ array_function = jsgraph()->HeapConstant(
+ handle(native_context->array_function(), isolate()));
+ } else {
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ array_function = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ARRAY_FUNCTION_INDEX, true),
+ native_context, native_context, effect);
+ }
+
+ // Check that the {target} is still the {array_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, array_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Turn the {node} into a {JSCreateArray} call.
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ for (int i = arity; i > 0; --i) {
+ NodeProperties::ReplaceValueInput(
+ node, NodeProperties::GetValueInput(node, i), i + 1);
+ }
+ NodeProperties::ReplaceValueInput(node, new_target, 1);
+ NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ return Changed(node);
+ } else if (feedback->IsWeakCell()) {
+ Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
+ if (cell->value()->IsJSFunction()) {
+ Node* target_function =
+ jsgraph()->Constant(handle(cell->value(), isolate()));
+
+ // Check that the {target} is still the {target_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, target_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Specialize the JSCallConstruct node to the {target_function}.
+ NodeProperties::ReplaceValueInput(node, target_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ if (target == new_target) {
+ NodeProperties::ReplaceValueInput(node, target_function, arity + 1);
+ }
+
+ // Try to further reduce the JSCallConstruct {node}.
+ Reduction const reduction = ReduceJSCallConstruct(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+
+ return NoChange();
+}
+
+
+MaybeHandle<Context> JSCallReducer::GetNativeContext(Node* node) {
+ Node* const context = NodeProperties::GetContextInput(node);
+ return NodeProperties::GetSpecializationNativeContext(context,
+ native_context());
+}
+
+
+Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
+
+
+Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
+
+
+CommonOperatorBuilder* JSCallReducer::common() const {
+ return jsgraph()->common();
+}
+
+
+JSOperatorBuilder* JSCallReducer::javascript() const {
+ return jsgraph()->javascript();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h
new file mode 100644
index 0000000..9ffae15
--- /dev/null
+++ b/src/compiler/js-call-reducer.h
@@ -0,0 +1,67 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CALL_REDUCER_H_
+#define V8_COMPILER_JS_CALL_REDUCER_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+
+
+// Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
+// which might allow inlining or other optimizations to be performed afterwards.
+class JSCallReducer final : public Reducer {
+ public:
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSCallReducer(JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context)
+ : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceArrayConstructor(Node* node);
+ Reduction ReduceNumberConstructor(Node* node);
+ Reduction ReduceFunctionPrototypeApply(Node* node);
+ Reduction ReduceFunctionPrototypeCall(Node* node);
+ Reduction ReduceJSCallConstruct(Node* node);
+ Reduction ReduceJSCallFunction(Node* node);
+
+ MaybeHandle<Context> GetNativeContext(Node* node);
+
+ Graph* graph() const;
+ Flags flags() const { return flags_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ MaybeHandle<Context> native_context() const { return native_context_; }
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+
+ JSGraph* const jsgraph_;
+ Flags const flags_;
+ MaybeHandle<Context> const native_context_;
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSCallReducer::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_CALL_REDUCER_H_
diff --git a/src/compiler/js-context-relaxation.cc b/src/compiler/js-context-relaxation.cc
new file mode 100644
index 0000000..0ca3c0c
--- /dev/null
+++ b/src/compiler/js-context-relaxation.cc
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/frame-states.h"
+#include "src/compiler/js-context-relaxation.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction JSContextRelaxation::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCallFunction:
+ case IrOpcode::kJSToNumber: {
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* outer_frame = frame_state;
+ Node* original_context = NodeProperties::GetContextInput(node);
+ Node* candidate_new_context = original_context;
+ do {
+ FrameStateInfo frame_state_info(
+ OpParameter<FrameStateInfo>(outer_frame->op()));
+ const FrameStateFunctionInfo* function_info =
+ frame_state_info.function_info();
+ if (function_info == nullptr ||
+ (function_info->context_calling_mode() ==
+ CALL_CHANGES_NATIVE_CONTEXT)) {
+ break;
+ }
+ candidate_new_context = outer_frame->InputAt(kFrameStateContextInput);
+ outer_frame = outer_frame->InputAt(kFrameStateOuterStateInput);
+ } while (outer_frame->opcode() == IrOpcode::kFrameState);
+
+ while (true) {
+ switch (candidate_new_context->opcode()) {
+ case IrOpcode::kParameter:
+ case IrOpcode::kJSCreateModuleContext:
+ case IrOpcode::kJSCreateScriptContext:
+ if (candidate_new_context != original_context) {
+ NodeProperties::ReplaceContextInput(node, candidate_new_context);
+ return Changed(node);
+ } else {
+ return NoChange();
+ }
+ case IrOpcode::kJSCreateCatchContext:
+ case IrOpcode::kJSCreateWithContext:
+ case IrOpcode::kJSCreateBlockContext:
+ candidate_new_context =
+ NodeProperties::GetContextInput(candidate_new_context);
+ break;
+ default:
+ return NoChange();
+ }
+ }
+ }
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/js-context-relaxation.h b/src/compiler/js-context-relaxation.h
new file mode 100644
index 0000000..4320e92
--- /dev/null
+++ b/src/compiler/js-context-relaxation.h
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CONTEXT_RELAXATION_H_
+#define V8_COMPILER_JS_CONTEXT_RELAXATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Ensures that operations that only need to access the native context use the
+// outer-most context rather than the specific context given by the AST graph
+// builder. This makes it possible to use these operations with context
+// specialization (e.g. for generating stubs) without forcing inner contexts to
+// be embedded in generated code thus causing leaks and potentially using the
+// wrong native context (i.e. stubs are shared between native contexts).
+class JSContextRelaxation final : public Reducer {
+ public:
+ JSContextRelaxation() {}
+ ~JSContextRelaxation() final {}
+
+ Reduction Reduce(Node* node) final;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_CONTEXT_RELAXATION_H_
diff --git a/src/compiler/js-context-specialization.cc b/src/compiler/js-context-specialization.cc
index a700b47..4d9d1d9 100644
--- a/src/compiler/js-context-specialization.cc
+++ b/src/compiler/js-context-specialization.cc
@@ -4,48 +4,51 @@
#include "src/compiler/js-context-specialization.h"
-#include "src/compiler.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/contexts.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
-Reduction JSContextSpecializer::Reduce(Node* node) {
- if (node == context_) {
- Node* constant = jsgraph_->Constant(info_->context());
- NodeProperties::ReplaceWithValue(node, constant);
- return Replace(constant);
- }
- if (node->opcode() == IrOpcode::kJSLoadContext) {
- return ReduceJSLoadContext(node);
- }
- if (node->opcode() == IrOpcode::kJSStoreContext) {
- return ReduceJSStoreContext(node);
+Reduction JSContextSpecialization::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSLoadContext:
+ return ReduceJSLoadContext(node);
+ case IrOpcode::kJSStoreContext:
+ return ReduceJSStoreContext(node);
+ default:
+ break;
}
return NoChange();
}
-Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
+MaybeHandle<Context> JSContextSpecialization::GetSpecializationContext(
+ Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadContext ||
+ node->opcode() == IrOpcode::kJSStoreContext);
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ return NodeProperties::GetSpecializationContext(object, context());
+}
+
+
+Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
- HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
- // If the context is not constant, no reduction can occur.
- if (!m.HasValue()) {
- return NoChange();
- }
-
- const ContextAccess& access = ContextAccessOf(node->op());
+ // Get the specialization context from the node.
+ Handle<Context> context;
+ if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
// Find the right parent context.
- Context* context = *m.Value().handle();
+ const ContextAccess& access = ContextAccessOf(node->op());
for (size_t i = access.depth(); i > 0; --i) {
- context = context->previous();
+ context = handle(context->previous(), isolate());
}
// If the access itself is mutable, only fold-in the parent.
@@ -56,13 +59,12 @@
}
const Operator* op = jsgraph_->javascript()->LoadContext(
0, access.index(), access.immutable());
- node->set_op(op);
- Handle<Object> context_handle = Handle<Object>(context, info_->isolate());
- node->ReplaceInput(0, jsgraph_->Constant(context_handle));
+ node->ReplaceInput(0, jsgraph_->Constant(context));
+ NodeProperties::ChangeOp(node, op);
return Changed(node);
}
- Handle<Object> value = Handle<Object>(
- context->get(static_cast<int>(access.index())), info_->isolate());
+ Handle<Object> value =
+ handle(context->get(static_cast<int>(access.index())), isolate());
// Even though the context slot is immutable, the context might have escaped
// before the function to which it belongs has initialized the slot.
@@ -76,41 +78,44 @@
// TODO(titzer): record the specialization for sharing code across multiple
// contexts that have the same value in the corresponding context slot.
Node* constant = jsgraph_->Constant(value);
- NodeProperties::ReplaceWithValue(node, constant);
+ ReplaceWithValue(node, constant);
return Replace(constant);
}
-Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
+Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
- HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
- // If the context is not constant, no reduction can occur.
- if (!m.HasValue()) {
- return NoChange();
- }
-
- const ContextAccess& access = ContextAccessOf(node->op());
+ // Get the specialization context from the node.
+ Handle<Context> context;
+ if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
// The access does not have to look up a parent, nothing to fold.
+ const ContextAccess& access = ContextAccessOf(node->op());
if (access.depth() == 0) {
return NoChange();
}
// Find the right parent context.
- Context* context = *m.Value().handle();
for (size_t i = access.depth(); i > 0; --i) {
- context = context->previous();
+ context = handle(context->previous(), isolate());
}
- const Operator* op = jsgraph_->javascript()->StoreContext(0, access.index());
- node->set_op(op);
- Handle<Object> new_context_handle = Handle<Object>(context, info_->isolate());
- node->ReplaceInput(0, jsgraph_->Constant(new_context_handle));
-
+ node->ReplaceInput(0, jsgraph_->Constant(context));
+ NodeProperties::ChangeOp(node, javascript()->StoreContext(0, access.index()));
return Changed(node);
}
+
+Isolate* JSContextSpecialization::isolate() const {
+ return jsgraph()->isolate();
+}
+
+
+JSOperatorBuilder* JSContextSpecialization::javascript() const {
+ return jsgraph()->javascript();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/js-context-specialization.h b/src/compiler/js-context-specialization.h
index 298d3a3..ef784fc 100644
--- a/src/compiler/js-context-specialization.h
+++ b/src/compiler/js-context-specialization.h
@@ -6,34 +6,46 @@
#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/contexts.h"
-#include "src/v8.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
+class JSGraph;
+class JSOperatorBuilder;
+
+
// Specializes a given JSGraph to a given context, potentially constant folding
// some {LoadContext} nodes or strength reducing some {StoreContext} nodes.
-class JSContextSpecializer : public Reducer {
+class JSContextSpecialization final : public AdvancedReducer {
public:
- JSContextSpecializer(CompilationInfo* info, JSGraph* jsgraph, Node* context)
- : info_(info), jsgraph_(jsgraph), context_(context) {}
+ JSContextSpecialization(Editor* editor, JSGraph* jsgraph,
+ MaybeHandle<Context> context)
+ : AdvancedReducer(editor), jsgraph_(jsgraph), context_(context) {}
- Reduction Reduce(Node* node) OVERRIDE;
+ Reduction Reduce(Node* node) final;
- // Visible for unit testing.
+ private:
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
- private:
- CompilationInfo* info_;
- JSGraph* jsgraph_;
- Node* context_;
+ // Returns the {Context} to specialize {node} to (if any).
+ MaybeHandle<Context> GetSpecializationContext(Node* node);
+
+ Isolate* isolate() const;
+ JSOperatorBuilder* javascript() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ MaybeHandle<Context> context() const { return context_; }
+
+ JSGraph* const jsgraph_;
+ MaybeHandle<Context> context_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
diff --git a/src/compiler/js-frame-specialization.cc b/src/compiler/js-frame-specialization.cc
new file mode 100644
index 0000000..769d615
--- /dev/null
+++ b/src/compiler/js-frame-specialization.cc
@@ -0,0 +1,75 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-frame-specialization.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/frames-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction JSFrameSpecialization::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kOsrValue:
+ return ReduceOsrValue(node);
+ case IrOpcode::kParameter:
+ return ReduceParameter(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
+ DCHECK_EQ(IrOpcode::kOsrValue, node->opcode());
+ Handle<Object> value;
+ int const index = OpParameter<int>(node);
+ int const parameters_count = frame()->ComputeParametersCount() + 1;
+ if (index == Linkage::kOsrContextSpillSlotIndex) {
+ value = handle(frame()->context(), isolate());
+ } else if (index >= parameters_count) {
+ value = handle(frame()->GetExpression(index - parameters_count), isolate());
+ } else {
+ // The OsrValue index 0 is the receiver.
+ value =
+ handle(index ? frame()->GetParameter(index - 1) : frame()->receiver(),
+ isolate());
+ }
+ return Replace(jsgraph()->Constant(value));
+}
+
+
+Reduction JSFrameSpecialization::ReduceParameter(Node* node) {
+ DCHECK_EQ(IrOpcode::kParameter, node->opcode());
+ Handle<Object> value;
+ int const index = ParameterIndexOf(node->op());
+ int const parameters_count = frame()->ComputeParametersCount() + 1;
+ if (index == Linkage::kJSCallClosureParamIndex) {
+ // The Parameter index references the closure.
+ value = handle(frame()->function(), isolate());
+ } else if (index == Linkage::GetJSCallArgCountParamIndex(parameters_count)) {
+ // The Parameter index references the parameter count.
+ value = handle(Smi::FromInt(parameters_count - 1), isolate());
+ } else if (index == Linkage::GetJSCallContextParamIndex(parameters_count)) {
+ // The Parameter index references the context.
+ value = handle(frame()->context(), isolate());
+ } else {
+ // The Parameter index 0 is the receiver.
+ value =
+ handle(index ? frame()->GetParameter(index - 1) : frame()->receiver(),
+ isolate());
+ }
+ return Replace(jsgraph()->Constant(value));
+}
+
+
+Isolate* JSFrameSpecialization::isolate() const { return jsgraph()->isolate(); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/js-frame-specialization.h b/src/compiler/js-frame-specialization.h
new file mode 100644
index 0000000..90b3ca5
--- /dev/null
+++ b/src/compiler/js-frame-specialization.h
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_FRAME_SPECIALIZATION_H_
+#define V8_COMPILER_JS_FRAME_SPECIALIZATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class JavaScriptFrame;
+
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+
+
+class JSFrameSpecialization final : public Reducer {
+ public:
+ JSFrameSpecialization(JavaScriptFrame const* frame, JSGraph* jsgraph)
+ : frame_(frame), jsgraph_(jsgraph) {}
+ ~JSFrameSpecialization() final {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceOsrValue(Node* node);
+ Reduction ReduceParameter(Node* node);
+
+ Isolate* isolate() const;
+ JavaScriptFrame const* frame() const { return frame_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+
+ JavaScriptFrame const* const frame_;
+ JSGraph* const jsgraph_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSFrameSpecialization);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_FRAME_SPECIALIZATION_H_
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 4886442..15ce908 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -5,32 +5,35 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-aux-data-inl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/unique.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
namespace v8 {
namespace internal {
namespace compiler {
-JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph)
- : info_(info),
- jsgraph_(jsgraph),
- linkage_(new (jsgraph->zone()) Linkage(jsgraph->zone(), info)) {}
-
-
-void JSGenericLowering::PatchOperator(Node* node, const Operator* op) {
- node->set_op(op);
+static CallDescriptor::Flags AdjustFrameStatesForCall(Node* node) {
+ int count = OperatorProperties::GetFrameStateInputCount(node->op());
+ if (count > 1) {
+ int index = NodeProperties::FirstFrameStateIndex(node) + 1;
+ do {
+ node->RemoveInput(index);
+ } while (--count > 1);
+ }
+ return count > 0 ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
}
-void JSGenericLowering::PatchInsertInput(Node* node, int index, Node* input) {
- node->InsertInput(zone(), index, input);
-}
+JSGenericLowering::JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph)
+ : is_typing_enabled_(is_typing_enabled), jsgraph_(jsgraph) {}
+
+
+JSGenericLowering::~JSGenericLowering() {}
Reduction JSGenericLowering::Reduce(Node* node) {
@@ -45,11 +48,11 @@
// TODO(mstarzinger): If typing is enabled then simplified lowering will
// have inserted the correct ChangeBoolToBit, otherwise we need to perform
// poor-man's representation inference here and insert manual change.
- if (!info()->is_typing_enabled()) {
- Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
+ if (!is_typing_enabled_) {
+ Node* condition = node->InputAt(0);
+ Node* test = graph()->NewNode(machine()->WordEqual(), condition,
jsgraph()->TrueConstant());
node->ReplaceInput(0, test);
- break;
}
// Fall-through.
default:
@@ -60,10 +63,15 @@
}
-#define REPLACE_BINARY_OP_IC_CALL(op, token) \
- void JSGenericLowering::Lower##op(Node* node) { \
- ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token), \
- CallDescriptor::kPatchableCallSiteWithNop); \
+#define REPLACE_BINARY_OP_IC_CALL(Op, token) \
+ void JSGenericLowering::Lower##Op(Node* node) { \
+ BinaryOperationParameters const& p = \
+ BinaryOperationParametersOf(node->op()); \
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
+ ReplaceWithStubCall(node, \
+ CodeFactory::BinaryOpIC(isolate(), token, \
+ strength(p.language_mode())), \
+ CallDescriptor::kPatchableCallSiteWithNop | flags); \
}
REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
@@ -79,123 +87,136 @@
#undef REPLACE_BINARY_OP_IC_CALL
-#define REPLACE_COMPARE_IC_CALL(op, token) \
- void JSGenericLowering::Lower##op(Node* node) { \
- ReplaceWithCompareIC(node, token); \
+// These ops are not language mode dependent; we arbitrarily pass Strength::WEAK
+// here.
+#define REPLACE_COMPARE_IC_CALL(op, token) \
+ void JSGenericLowering::Lower##op(Node* node) { \
+ ReplaceWithCompareIC(node, token, Strength::WEAK); \
}
REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ)
REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE)
REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT)
REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT)
-REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT)
-REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT)
-REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE)
-REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE)
#undef REPLACE_COMPARE_IC_CALL
+#define REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(op, token) \
+ void JSGenericLowering::Lower##op(Node* node) { \
+ ReplaceWithCompareIC(node, token, \
+ strength(OpParameter<LanguageMode>(node))); \
+ }
+REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSLessThan, Token::LT)
+REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThan, Token::GT)
+REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSLessThanOrEqual, Token::LTE)
+REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThanOrEqual, Token::GTE)
+#undef REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE
+
+
#define REPLACE_RUNTIME_CALL(op, fun) \
void JSGenericLowering::Lower##op(Node* node) { \
ReplaceWithRuntimeCall(node, fun); \
}
-REPLACE_RUNTIME_CALL(JSTypeOf, Runtime::kTypeof)
-REPLACE_RUNTIME_CALL(JSCreate, Runtime::kAbort)
REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
-REPLACE_RUNTIME_CALL(JSCreateCatchContext, Runtime::kPushCatchContext)
REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
-REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext)
REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
-REPLACE_RUNTIME_CALL(JSCreateScriptContext, Runtime::kAbort)
+REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
#undef REPLACE_RUNTIME
-#define REPLACE_UNIMPLEMENTED(op) \
- void JSGenericLowering::Lower##op(Node* node) { UNIMPLEMENTED(); }
-REPLACE_UNIMPLEMENTED(JSToName)
-REPLACE_UNIMPLEMENTED(JSYield)
-REPLACE_UNIMPLEMENTED(JSDebugger)
-#undef REPLACE_UNIMPLEMENTED
-
-
static CallDescriptor::Flags FlagsForNode(Node* node) {
CallDescriptor::Flags result = CallDescriptor::kNoFlags;
- if (OperatorProperties::HasFrameStateInput(node->op())) {
+ if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
result |= CallDescriptor::kNeedsFrameState;
}
return result;
}
-void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
- Callable callable = CodeFactory::CompareIC(isolate(), token);
- bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
- CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(
- callable.descriptor(), 0,
- CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node));
+void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
+ Strength str) {
+ Callable callable = CodeFactory::CompareIC(isolate(), token, str);
+
+ // Create a new call node asking a CompareIC for help.
NodeVector inputs(zone());
inputs.reserve(node->InputCount() + 1);
inputs.push_back(jsgraph()->HeapConstant(callable.code()));
inputs.push_back(NodeProperties::GetValueInput(node, 0));
inputs.push_back(NodeProperties::GetValueInput(node, 1));
inputs.push_back(NodeProperties::GetContextInput(node));
- if (node->op()->HasProperty(Operator::kPure)) {
- // A pure (strict) comparison doesn't have an effect, control or frame
- // state. But for the graph, we need to add control and effect inputs.
- DCHECK(!has_frame_state);
- inputs.push_back(graph()->start());
- inputs.push_back(graph()->start());
- } else {
- DCHECK(has_frame_state == FLAG_turbo_deoptimization);
- if (FLAG_turbo_deoptimization) {
- inputs.push_back(NodeProperties::GetFrameStateInput(node));
- }
- inputs.push_back(NodeProperties::GetEffectInput(node));
- inputs.push_back(NodeProperties::GetControlInput(node));
+ // Some comparisons (StrictEqual) don't have an effect, control or frame
+ // state inputs, so handle those cases here.
+ if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+ inputs.push_back(NodeProperties::GetFrameStateInput(node, 0));
}
+ Node* effect = (node->op()->EffectInputCount() > 0)
+ ? NodeProperties::GetEffectInput(node)
+ : graph()->start();
+ inputs.push_back(effect);
+ Node* control = (node->op()->ControlInputCount() > 0)
+ ? NodeProperties::GetControlInput(node)
+ : graph()->start();
+ inputs.push_back(control);
+ CallDescriptor* desc_compare = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0,
+ CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node),
+ Operator::kNoProperties, MachineType::IntPtr());
Node* compare =
graph()->NewNode(common()->Call(desc_compare),
static_cast<int>(inputs.size()), &inputs.front());
- node->ReplaceInput(0, compare);
- node->ReplaceInput(1, jsgraph()->SmiConstant(token));
-
- if (has_frame_state) {
- // Remove the frame state from inputs.
- node->RemoveInput(NodeProperties::FirstFrameStateIndex(node));
+ // Decide how the return value from the above CompareIC can be converted into
+ // a JavaScript boolean oddball depending on the given token.
+ Node* false_value = jsgraph()->FalseConstant();
+ Node* true_value = jsgraph()->TrueConstant();
+ const Operator* op = nullptr;
+ switch (token) {
+ case Token::EQ: // a == 0
+ case Token::EQ_STRICT:
+ op = machine()->WordEqual();
+ break;
+ case Token::NE: // a != 0 becomes !(a == 0)
+ case Token::NE_STRICT:
+ op = machine()->WordEqual();
+ std::swap(true_value, false_value);
+ break;
+ case Token::LT: // a < 0
+ op = machine()->IntLessThan();
+ break;
+ case Token::GT: // a > 0 becomes !(a <= 0)
+ op = machine()->IntLessThanOrEqual();
+ std::swap(true_value, false_value);
+ break;
+ case Token::LTE: // a <= 0
+ op = machine()->IntLessThanOrEqual();
+ break;
+ case Token::GTE: // a >= 0 becomes !(a < 0)
+ op = machine()->IntLessThan();
+ std::swap(true_value, false_value);
+ break;
+ default:
+ UNREACHABLE();
}
+ Node* booleanize = graph()->NewNode(op, compare, jsgraph()->ZeroConstant());
- ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
+ // Finally patch the original node to select a boolean.
+ NodeProperties::ReplaceUses(node, node, compare, compare, compare);
+ node->TrimInputCount(3);
+ node->ReplaceInput(0, booleanize);
+ node->ReplaceInput(1, true_value);
+ node->ReplaceInput(2, false_value);
+ NodeProperties::ChangeOp(node,
+ common()->Select(MachineRepresentation::kTagged));
}
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
CallDescriptor::Flags flags) {
Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = linkage()->GetStubCallDescriptor(
- callable.descriptor(), 0, flags | FlagsForNode(node), properties);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0, flags, properties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- PatchInsertInput(node, 0, stub_code);
- PatchOperator(node, common()->Call(desc));
-}
-
-
-void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
- Builtins::JavaScript id,
- int nargs) {
- Operator::Properties properties = node->op()->properties();
- Callable callable =
- CodeFactory::CallFunction(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
- CallDescriptor* desc = linkage()->GetStubCallDescriptor(
- callable.descriptor(), nargs, FlagsForNode(node), properties);
- // TODO(mstarzinger): Accessing the builtins object this way prevents sharing
- // of code across native contexts. Fix this by loading from given context.
- Handle<JSFunction> function(
- JSFunction::cast(info()->context()->builtins()->javascript_builtin(id)));
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* function_node = jsgraph()->HeapConstant(function);
- PatchInsertInput(node, 0, stub_code);
- PatchInsertInput(node, 1, function_node);
- PatchOperator(node, common()->Call(desc));
+ node->InsertInput(zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -205,109 +226,172 @@
Operator::Properties properties = node->op()->properties();
const Runtime::Function* fun = Runtime::FunctionForId(f);
int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
- CallDescriptor* desc =
- linkage()->GetRuntimeCallDescriptor(f, nargs, properties);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), f, nargs, properties, CallDescriptor::kNeedsFrameState);
Node* ref = jsgraph()->ExternalConstant(ExternalReference(f, isolate()));
Node* arity = jsgraph()->Int32Constant(nargs);
- PatchInsertInput(node, 0, jsgraph()->CEntryStubConstant(fun->result_size));
- PatchInsertInput(node, nargs + 1, ref);
- PatchInsertInput(node, nargs + 2, arity);
- PatchOperator(node, common()->Call(desc));
+ node->InsertInput(zone(), 0, jsgraph()->CEntryStubConstant(fun->result_size));
+ node->InsertInput(zone(), nargs + 1, ref);
+ node->InsertInput(zone(), nargs + 2, arity);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
-void JSGenericLowering::LowerJSUnaryNot(Node* node) {
- Callable callable = CodeFactory::ToBoolean(
- isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+void JSGenericLowering::LowerJSTypeOf(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::Typeof(isolate());
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSToBoolean(Node* node) {
- Callable callable =
- CodeFactory::ToBoolean(isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::ToBoolean(isolate());
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
}
void JSGenericLowering::LowerJSToNumber(Node* node) {
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Callable callable = CodeFactory::ToNumber(isolate());
- ReplaceWithStubCall(node, callable, FlagsForNode(node));
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSToString(Node* node) {
- ReplaceWithBuiltinCall(node, Builtins::TO_STRING, 1);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::ToString(isolate());
+ ReplaceWithStubCall(node, callable, flags);
+}
+
+
+void JSGenericLowering::LowerJSToName(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kToName);
}
void JSGenericLowering::LowerJSToObject(Node* node) {
- ReplaceWithBuiltinCall(node, Builtins::TO_OBJECT, 1);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::ToObject(isolate());
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
- const LoadPropertyParameters& p = LoadPropertyParametersOf(node->op());
- Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
- if (FLAG_vector_ics) {
- PatchInsertInput(node, 2, jsgraph()->SmiConstant(p.feedback().index()));
- PatchInsertInput(node, 3, jsgraph()->HeapConstant(p.feedback().vector()));
- }
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ const PropertyAccess& p = PropertyAccessOf(node->op());
+ Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), p.language_mode(), UNINITIALIZED);
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
- const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
- Callable callable =
- CodeFactory::LoadICInOptimizedCode(isolate(), p.contextual_mode());
- PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name()));
- if (FLAG_vector_ics) {
- PatchInsertInput(node, 2, jsgraph()->SmiConstant(p.feedback().index()));
- PatchInsertInput(node, 3, jsgraph()->HeapConstant(p.feedback().vector()));
- }
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ NamedAccess const& p = NamedAccessOf(node->op());
+ Callable callable = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF, p.language_mode(), UNINITIALIZED);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable, flags);
+}
+
+
+void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
+ Callable callable = CodeFactory::LoadICInOptimizedCode(
+ isolate(), p.typeof_mode(), SLOPPY, UNINITIALIZED);
+ // Load global object from the context.
+ Node* native_context =
+ graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
+ jsgraph()->IntPtrConstant(
+ Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
+ effect, graph()->start());
+ Node* global = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), native_context,
+ jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
+ effect, graph()->start());
+ node->InsertInput(zone(), 0, global);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
- StrictMode strict_mode = OpParameter<StrictMode>(node);
- Callable callable = CodeFactory::KeyedStoreIC(isolate(), strict_mode);
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ PropertyAccess const& p = PropertyAccessOf(node->op());
+ LanguageMode language_mode = p.language_mode();
+ Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), language_mode, UNINITIALIZED);
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
}
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
- const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
- Callable callable = CodeFactory::StoreIC(isolate(), p.strict_mode());
- PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name()));
- ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ NamedAccess const& p = NamedAccessOf(node->op());
+ Callable callable = CodeFactory::StoreICInOptimizedCode(
+ isolate(), p.language_mode(), UNINITIALIZED);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
+}
+
+
+void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
+ Callable callable = CodeFactory::StoreICInOptimizedCode(
+ isolate(), p.language_mode(), UNINITIALIZED);
+ // Load global object from the context.
+ Node* native_context =
+ graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
+ jsgraph()->IntPtrConstant(
+ Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
+ effect, graph()->start());
+ Node* global = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), native_context,
+ jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
+ effect, graph()->start());
+ node->InsertInput(zone(), 0, global);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
}
void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
- StrictMode strict_mode = OpParameter<StrictMode>(node);
- PatchInsertInput(node, 2, jsgraph()->SmiConstant(strict_mode));
- ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
+ LanguageMode language_mode = OpParameter<LanguageMode>(node);
+ ReplaceWithRuntimeCall(node, is_strict(language_mode)
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
}
void JSGenericLowering::LowerJSHasProperty(Node* node) {
- ReplaceWithBuiltinCall(node, Builtins::IN, 2);
+ ReplaceWithRuntimeCall(node, Runtime::kHasProperty);
}
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
- InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
- InstanceofStub::kReturnTrueFalseObject |
- InstanceofStub::kArgsInRegisters);
- InstanceofStub stub(isolate(), flags);
- CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
- CallDescriptor* desc =
- linkage()->GetStubCallDescriptor(d, 0, FlagsForNode(node));
- Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
- PatchInsertInput(node, 0, stub_code);
- PatchOperator(node, common()->Call(desc));
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::InstanceOf(isolate());
+ ReplaceWithStubCall(node, callable, flags);
}
@@ -315,7 +399,7 @@
const ContextAccess& access = ContextAccessOf(node->op());
for (size_t i = 0; i < access.depth(); ++i) {
node->ReplaceInput(
- 0, graph()->NewNode(machine()->Load(kMachAnyTagged),
+ 0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
NodeProperties::GetValueInput(node, 0),
jsgraph()->Int32Constant(
Context::SlotOffset(Context::PREVIOUS_INDEX)),
@@ -325,7 +409,7 @@
node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
static_cast<int>(access.index()))));
node->AppendInput(zone(), graph()->start());
- PatchOperator(node, machine()->Load(kMachAnyTagged));
+ NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
}
@@ -333,7 +417,7 @@
const ContextAccess& access = ContextAccessOf(node->op());
for (size_t i = 0; i < access.depth(); ++i) {
node->ReplaceInput(
- 0, graph()->NewNode(machine()->Load(kMachAnyTagged),
+ 0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
NodeProperties::GetValueInput(node, 0),
jsgraph()->Int32Constant(
Context::SlotOffset(Context::PREVIOUS_INDEX)),
@@ -343,86 +427,454 @@
node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
static_cast<int>(access.index()))));
- PatchOperator(node, machine()->Store(StoreRepresentation(kMachAnyTagged,
- kFullWriteBarrier)));
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(MachineRepresentation::kTagged,
+ kFullWriteBarrier)));
+}
+
+
+void JSGenericLowering::LowerJSLoadDynamic(Node* node) {
+ const DynamicAccess& access = DynamicAccessOf(node->op());
+ Runtime::FunctionId function_id =
+ (access.typeof_mode() == NOT_INSIDE_TYPEOF)
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotNoReferenceError;
+ Node* projection = graph()->NewNode(common()->Projection(0), node);
+ NodeProperties::ReplaceUses(node, projection, node, node, node);
+ node->RemoveInput(NodeProperties::FirstValueIndex(node));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(access.name()));
+ ReplaceWithRuntimeCall(node, function_id);
+ projection->ReplaceInput(0, node);
+}
+
+
+void JSGenericLowering::LowerJSCreate(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kNewObject);
+}
+
+
+void JSGenericLowering::LowerJSCreateArguments(Node* node) {
+ const CreateArgumentsParameters& p = CreateArgumentsParametersOf(node->op());
+ switch (p.type()) {
+ case CreateArgumentsParameters::kMappedArguments:
+ ReplaceWithRuntimeCall(node, Runtime::kNewSloppyArguments_Generic);
+ break;
+ case CreateArgumentsParameters::kUnmappedArguments:
+ ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments_Generic);
+ break;
+ case CreateArgumentsParameters::kRestArray:
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.start_index()));
+ ReplaceWithRuntimeCall(node, Runtime::kNewRestArguments_Generic);
+ break;
+ }
+}
+
+
+void JSGenericLowering::LowerJSCreateArray(Node* node) {
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ int const arity = static_cast<int>(p.arity());
+ Node* new_target = node->InputAt(1);
+ // TODO(turbofan): We embed the AllocationSite from the Operator at this
+ // point, which we should not do once we want to both consume the feedback
+ // but at the same time shared the optimized code across native contexts,
+ // as the AllocationSite is associated with a single native context (it's
+ // stored in the type feedback vector after all). Once we go for cross
+ // context code generation, we should somehow find a way to get to the
+ // allocation site for the actual native context at runtime.
+ Node* type_info = p.site().is_null() ? jsgraph()->UndefinedConstant()
+ : jsgraph()->HeapConstant(p.site());
+ node->RemoveInput(1);
+ node->InsertInput(zone(), 1 + arity, new_target);
+ node->InsertInput(zone(), 2 + arity, type_info);
+ ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+}
+
+
+void JSGenericLowering::LowerJSCreateClosure(Node* node) {
+ CreateClosureParameters p = CreateClosureParametersOf(node->op());
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.shared_info()));
+ ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
+ ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
+}
+
+
+void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kCreateIterResultObject);
+}
+
+
+void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
+ node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
+ ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
+}
+
+
+void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
+ node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
+ ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
+}
+
+
+void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::FastCloneRegExp(isolate());
+ Node* literal_index = jsgraph()->SmiConstant(p.index());
+ Node* literal_flags = jsgraph()->SmiConstant(p.flags());
+ Node* pattern = jsgraph()->HeapConstant(p.constant());
+ node->InsertInput(graph()->zone(), 1, literal_index);
+ node->InsertInput(graph()->zone(), 2, pattern);
+ node->InsertInput(graph()->zone(), 3, literal_flags);
+ ReplaceWithStubCall(node, callable, flags);
+}
+
+
+void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
+ Handle<String> name = OpParameter<Handle<String>>(node);
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(name));
+ ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
+}
+
+
+void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
+ Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info));
+ ReplaceWithRuntimeCall(node, Runtime::kPushBlockContext);
+}
+
+
+void JSGenericLowering::LowerJSCreateScriptContext(Node* node) {
+ Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
+ ReplaceWithRuntimeCall(node, Runtime::kNewScriptContext);
}
void JSGenericLowering::LowerJSCallConstruct(Node* node) {
- int arity = OpParameter<int>(node);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
- CallDescriptor* desc =
- linkage()->GetStubCallDescriptor(d, arity, FlagsForNode(node));
- Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
- Node* construct = NodeProperties::GetValueInput(node, 0);
- PatchInsertInput(node, 0, stub_code);
- PatchInsertInput(node, 1, jsgraph()->Int32Constant(arity - 1));
- PatchInsertInput(node, 2, construct);
- PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
- PatchOperator(node, common()->Call(desc));
-}
-
-
-bool JSGenericLowering::TryLowerDirectJSCall(Node* node) {
- // Lower to a direct call to a constant JSFunction if legal.
- const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
- int arg_count = static_cast<int>(p.arity() - 2);
-
- // Check the function is a constant and is really a JSFunction.
- HeapObjectMatcher<Object> function_const(node->InputAt(0));
- if (!function_const.HasValue()) return false; // not a constant.
- Handle<Object> func = function_const.Value().handle();
- if (!func->IsJSFunction()) return false; // not a function.
- Handle<JSFunction> function = Handle<JSFunction>::cast(func);
- if (arg_count != function->shared()->formal_parameter_count()) return false;
-
- // Check the receiver doesn't need to be wrapped.
- Node* receiver = node->InputAt(1);
- if (!NodeProperties::IsTyped(receiver)) return false;
- Type* ok_receiver = Type::Union(Type::Undefined(), Type::Receiver(), zone());
- if (!NodeProperties::GetBounds(receiver).upper->Is(ok_receiver)) return false;
-
- int index = NodeProperties::FirstContextIndex(node);
-
- // TODO(titzer): total hack to share function context constants.
- // Remove this when the JSGraph canonicalizes heap constants.
- Node* context = node->InputAt(index);
- HeapObjectMatcher<Context> context_const(context);
- if (!context_const.HasValue() ||
- *(context_const.Value().handle()) != function->context()) {
- context = jsgraph()->HeapConstant(Handle<Context>(function->context()));
- }
- node->ReplaceInput(index, context);
- CallDescriptor* desc = linkage()->GetJSCallDescriptor(
- 1 + arg_count, jsgraph()->zone(), FlagsForNode(node));
- PatchOperator(node, common()->Call(desc));
- return true;
+ CallConstructParameters const& p = CallConstructParametersOf(node->op());
+ int const arg_count = static_cast<int>(p.arity() - 2);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::Construct(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* new_target = node->InputAt(arg_count + 1);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(arg_count + 1); // Drop new target.
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
void JSGenericLowering::LowerJSCallFunction(Node* node) {
- // Fast case: call function directly.
- if (TryLowerDirectJSCall(node)) return;
-
- // General case: CallFunctionStub.
- const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
- int arg_count = static_cast<int>(p.arity() - 2);
- CallFunctionStub stub(isolate(), arg_count, p.flags());
- CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
- CallDescriptor* desc = linkage()->GetStubCallDescriptor(
- d, static_cast<int>(p.arity() - 1), FlagsForNode(node));
- Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
- PatchInsertInput(node, 0, stub_code);
- PatchOperator(node, common()->Call(desc));
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ int const arg_count = static_cast<int>(p.arity() - 2);
+ ConvertReceiverMode const mode = p.convert_mode();
+ Callable callable = CodeFactory::Call(isolate(), mode);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
void JSGenericLowering::LowerJSCallRuntime(Node* node) {
const CallRuntimeParameters& p = CallRuntimeParametersOf(node->op());
+ AdjustFrameStatesForCall(node);
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
+
+void JSGenericLowering::LowerJSForInDone(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kForInDone);
+}
+
+
+void JSGenericLowering::LowerJSForInNext(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kForInNext);
+}
+
+
+void JSGenericLowering::LowerJSForInPrepare(Node* node) {
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+
+ // Get the set of properties to enumerate.
+ Runtime::Function const* function =
+ Runtime::FunctionForId(Runtime::kGetPropertyNamesFast);
+ CallDescriptor const* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function->function_id, 1, Operator::kNoProperties,
+ CallDescriptor::kNeedsFrameState);
+ Node* cache_type = effect = graph()->NewNode(
+ common()->Call(descriptor),
+ jsgraph()->CEntryStubConstant(function->result_size), object,
+ jsgraph()->ExternalConstant(function->function_id),
+ jsgraph()->Int32Constant(1), context, frame_state, effect, control);
+ control = graph()->NewNode(common()->IfSuccess(), cache_type);
+
+ Node* object_map = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), object,
+ jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
+ effect, control);
+ Node* cache_type_map = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), cache_type,
+ jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
+ effect, control);
+ Node* meta_map = jsgraph()->HeapConstant(isolate()->factory()->meta_map());
+
+ // If we got a map from the GetPropertyNamesFast runtime call, we can do a
+ // fast modification check. Otherwise, we got a fixed array, and we have to
+ // perform a slow check on every iteration.
+ Node* check0 =
+ graph()->NewNode(machine()->WordEqual(), cache_type_map, meta_map);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* cache_array_true0;
+ Node* cache_length_true0;
+ Node* cache_type_true0;
+ Node* etrue0;
+ {
+ // Enum cache case.
+ Node* cache_type_enum_length = etrue0 = graph()->NewNode(
+ machine()->Load(MachineType::Uint32()), cache_type,
+ jsgraph()->IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag),
+ effect, if_true0);
+ cache_type_enum_length =
+ graph()->NewNode(machine()->Word32And(), cache_type_enum_length,
+ jsgraph()->Uint32Constant(Map::EnumLengthBits::kMask));
+
+ Node* check1 =
+ graph()->NewNode(machine()->Word32Equal(), cache_type_enum_length,
+ jsgraph()->Int32Constant(0));
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* cache_array_true1;
+ Node* etrue1;
+ {
+ // No properties to enumerate.
+ cache_array_true1 =
+ jsgraph()->HeapConstant(isolate()->factory()->empty_fixed_array());
+ etrue1 = etrue0;
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* cache_array_false1;
+ Node* efalse1;
+ {
+ // Load the enumeration cache from the instance descriptors of {object}.
+ Node* object_map_descriptors = efalse1 = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), object_map,
+ jsgraph()->IntPtrConstant(Map::kDescriptorsOffset - kHeapObjectTag),
+ etrue0, if_false1);
+ Node* object_map_enum_cache = efalse1 = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), object_map_descriptors,
+ jsgraph()->IntPtrConstant(DescriptorArray::kEnumCacheOffset -
+ kHeapObjectTag),
+ efalse1, if_false1);
+ cache_array_false1 = efalse1 = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), object_map_enum_cache,
+ jsgraph()->IntPtrConstant(
+ DescriptorArray::kEnumCacheBridgeCacheOffset - kHeapObjectTag),
+ efalse1, if_false1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ etrue0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+ cache_array_true0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true1, cache_array_false1, if_true0);
+
+ cache_length_true0 = graph()->NewNode(
+ machine()->WordShl(),
+ machine()->Is64()
+ ? graph()->NewNode(machine()->ChangeUint32ToUint64(),
+ cache_type_enum_length)
+ : cache_type_enum_length,
+ jsgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize));
+ cache_type_true0 = cache_type;
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* cache_array_false0;
+ Node* cache_length_false0;
+ Node* cache_type_false0;
+ Node* efalse0;
+ {
+ // FixedArray case.
+ cache_type_false0 = jsgraph()->OneConstant(); // Smi means slow check
+ cache_array_false0 = cache_type;
+ cache_length_false0 = efalse0 = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), cache_array_false0,
+ jsgraph()->IntPtrConstant(FixedArray::kLengthOffset - kHeapObjectTag),
+ effect, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ Node* cache_array =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true0, cache_array_false0, control);
+ Node* cache_length =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_length_true0, cache_length_false0, control);
+ cache_type =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_type_true0, cache_type_false0, control);
+
+ for (auto edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsControlEdge(edge)) {
+ Node* const use = edge.from();
+ if (use->opcode() == IrOpcode::kIfSuccess) {
+ use->ReplaceUses(control);
+ use->Kill();
+ } else if (use->opcode() == IrOpcode::kIfException) {
+ edge.UpdateTo(cache_type_true0);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ Node* const use = edge.from();
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ DCHECK_EQ(IrOpcode::kProjection, use->opcode());
+ switch (ProjectionIndexOf(use->op())) {
+ case 0:
+ use->ReplaceUses(cache_type);
+ break;
+ case 1:
+ use->ReplaceUses(cache_array);
+ break;
+ case 2:
+ use->ReplaceUses(cache_length);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ use->Kill();
+ }
+ }
+}
+
+
+void JSGenericLowering::LowerJSForInStep(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kForInStep);
+}
+
+
+void JSGenericLowering::LowerJSLoadMessage(Node* node) {
+ ExternalReference message_address =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+ node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
+ node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
+ NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+}
+
+
+void JSGenericLowering::LowerJSStoreMessage(Node* node) {
+ ExternalReference message_address =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+ node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
+ node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
+ StoreRepresentation representation(MachineRepresentation::kTagged,
+ kNoWriteBarrier);
+ NodeProperties::ChangeOp(node, machine()->Store(representation));
+}
+
+
+void JSGenericLowering::LowerJSYield(Node* node) { UNIMPLEMENTED(); }
+
+
+void JSGenericLowering::LowerJSStackCheck(Node* node) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* limit = graph()->NewNode(
+ machine()->Load(MachineType::Pointer()),
+ jsgraph()->ExternalConstant(
+ ExternalReference::address_of_stack_limit(isolate())),
+ jsgraph()->IntPtrConstant(0), effect, control);
+ Node* pointer = graph()->NewNode(machine()->LoadStackPointer());
+
+ Node* check = graph()->NewNode(machine()->UintLessThan(), limit, pointer);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ NodeProperties::ReplaceControlInput(node, if_false);
+ Node* efalse = node;
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
+
+ // Wire the new diamond into the graph, {node} can still throw.
+ NodeProperties::ReplaceUses(node, node, ephi, node, node);
+ NodeProperties::ReplaceEffectInput(ephi, efalse, 1);
+
+ // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
+ // the node and places it inside the diamond. Come up with a helper method!
+ for (Node* use : node->uses()) {
+ if (use->opcode() == IrOpcode::kIfSuccess) {
+ use->ReplaceUses(merge);
+ merge->ReplaceInput(1, use);
+ }
+ }
+
+ // Turn the stack check into a runtime call.
+ ReplaceWithRuntimeCall(node, Runtime::kStackGuard);
+}
+
+
+Zone* JSGenericLowering::zone() const { return graph()->zone(); }
+
+
+Isolate* JSGenericLowering::isolate() const { return jsgraph()->isolate(); }
+
+
+Graph* JSGenericLowering::graph() const { return jsgraph()->graph(); }
+
+
+CommonOperatorBuilder* JSGenericLowering::common() const {
+ return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* JSGenericLowering::machine() const {
+ return jsgraph()->machine();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
index f626338..ffce912 100644
--- a/src/compiler/js-generic-lowering.h
+++ b/src/compiler/js-generic-lowering.h
@@ -5,11 +5,8 @@
#ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_
#define V8_COMPILER_JS_GENERIC_LOWERING_H_
-#include "src/allocation.h"
#include "src/code-factory.h"
-#include "src/compiler/graph.h"
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/opcodes.h"
@@ -19,17 +16,18 @@
// Forward declarations.
class CommonOperatorBuilder;
+class JSGraph;
class MachineOperatorBuilder;
class Linkage;
// Lowers JS-level operators to runtime and IC calls in the "generic" case.
-class JSGenericLowering FINAL : public Reducer {
+class JSGenericLowering final : public Reducer {
public:
- JSGenericLowering(CompilationInfo* info, JSGraph* graph);
- ~JSGenericLowering() FINAL {}
+ JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph);
+ ~JSGenericLowering() final;
- Reduction Reduce(Node* node) FINAL;
+ Reduction Reduce(Node* node) final;
protected:
#define DECLARE_LOWER(x) void Lower##x(Node* node);
@@ -37,32 +35,21 @@
JS_OP_LIST(DECLARE_LOWER)
#undef DECLARE_LOWER
- // Helpers to patch existing nodes in the graph.
- void PatchOperator(Node* node, const Operator* new_op);
- void PatchInsertInput(Node* node, int index, Node* input);
-
// Helpers to replace existing nodes with a generic call.
- void ReplaceWithCompareIC(Node* node, Token::Value token);
+ void ReplaceWithCompareIC(Node* node, Token::Value token, Strength strength);
void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
- void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
- // Helper for optimization of JSCallFunction.
- bool TryLowerDirectJSCall(Node* node);
-
- Zone* zone() const { return graph()->zone(); }
- Isolate* isolate() const { return zone()->isolate(); }
+ Zone* zone() const;
+ Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
- Graph* graph() const { return jsgraph()->graph(); }
- Linkage* linkage() const { return linkage_; }
- CompilationInfo* info() const { return info_; }
- CommonOperatorBuilder* common() const { return jsgraph()->common(); }
- MachineOperatorBuilder* machine() const { return jsgraph()->machine(); }
+ Graph* graph() const;
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
private:
- CompilationInfo* info_;
- JSGraph* jsgraph_;
- Linkage* linkage_;
+ bool const is_typing_enabled_;
+ JSGraph* const jsgraph_;
};
} // namespace compiler
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
new file mode 100644
index 0000000..e6f01b3
--- /dev/null
+++ b/src/compiler/js-global-object-specialization.cc
@@ -0,0 +1,320 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-global-object-specialization.h"
+
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct JSGlobalObjectSpecialization::ScriptContextTableLookupResult {
+ Handle<Context> context;
+ bool immutable;
+ int index;
+};
+
+
+JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
+ Editor* editor, JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context, CompilationDependencies* dependencies)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ flags_(flags),
+ native_context_(native_context),
+ dependencies_(dependencies),
+ type_cache_(TypeCache::Get()) {}
+
+
+Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSLoadGlobal:
+ return ReduceJSLoadGlobal(node);
+ case IrOpcode::kJSStoreGlobal:
+ return ReduceJSStoreGlobal(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
+ Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Retrieve the global object from the given {node}.
+ Handle<JSGlobalObject> global_object;
+ if (!GetGlobalObject(node).ToHandle(&global_object)) return NoChange();
+
+ // Try to lookup the name on the script context table first (lexical scoping).
+ ScriptContextTableLookupResult result;
+ if (LookupInScriptContextTable(global_object, name, &result)) {
+ if (result.context->is_the_hole(result.index)) return NoChange();
+ Node* context = jsgraph()->HeapConstant(result.context);
+ Node* value = effect = graph()->NewNode(
+ javascript()->LoadContext(0, result.index, result.immutable), context,
+ context, effect);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+
+ // Lookup on the global object instead. We only deal with own data
+ // properties of the global object here (represented as PropertyCell).
+ LookupIterator it(global_object, name, LookupIterator::OWN);
+ if (it.state() != LookupIterator::DATA) return NoChange();
+ Handle<PropertyCell> property_cell = it.GetPropertyCell();
+ PropertyDetails property_details = property_cell->property_details();
+ Handle<Object> property_cell_value(property_cell->value(), isolate());
+
+ // Load from non-configurable, read-only data property on the global
+ // object can be constant-folded, even without deoptimization support.
+ if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
+ Node* value = jsgraph()->Constant(property_cell_value);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
+ // Load from non-configurable, data property on the global can be lowered to
+ // a field load, even without deoptimization, because the property cannot be
+ // deleted or reconfigured to an accessor/interceptor property. Yet, if
+ // deoptimization support is available, we can constant-fold certain global
+ // properties or at least lower them to field loads annotated with more
+ // precise type feedback.
+ Type* property_cell_value_type = Type::Tagged();
+ if (flags() & kDeoptimizationEnabled) {
+ // Record a code dependency on the cell if we can benefit from the
+ // additional feedback, or the global property is configurable (i.e.
+ // can be deleted or reconfigured to an accessor property).
+ if (property_details.cell_type() != PropertyCellType::kMutable ||
+ property_details.IsConfigurable()) {
+ dependencies()->AssumePropertyCell(property_cell);
+ }
+
+ // Load from constant/undefined global property can be constant-folded.
+ if ((property_details.cell_type() == PropertyCellType::kConstant ||
+ property_details.cell_type() == PropertyCellType::kUndefined)) {
+ Node* value = jsgraph()->Constant(property_cell_value);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
+ // Load from constant type cell can benefit from type feedback.
+ if (property_details.cell_type() == PropertyCellType::kConstantType) {
+ // Compute proper type based on the current value in the cell.
+ if (property_cell_value->IsSmi()) {
+ property_cell_value_type = type_cache_.kSmi;
+ } else if (property_cell_value->IsNumber()) {
+ property_cell_value_type = type_cache_.kHeapNumber;
+ } else {
+ Handle<Map> property_cell_value_map(
+ Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+ property_cell_value_type =
+ Type::Class(property_cell_value_map, graph()->zone());
+ }
+ }
+ } else if (property_details.IsConfigurable()) {
+ // Access to configurable global properties requires deoptimization support.
+ return NoChange();
+ }
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
+ jsgraph()->HeapConstant(property_cell), effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
+ Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Retrieve the global object from the given {node}.
+ Handle<JSGlobalObject> global_object;
+ if (!GetGlobalObject(node).ToHandle(&global_object)) return NoChange();
+
+ // Try to lookup the name on the script context table first (lexical scoping).
+ ScriptContextTableLookupResult result;
+ if (LookupInScriptContextTable(global_object, name, &result)) {
+ if (result.context->is_the_hole(result.index)) return NoChange();
+ if (result.immutable) return NoChange();
+ Node* context = jsgraph()->HeapConstant(result.context);
+ effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
+ context, value, context, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ // Lookup on the global object instead. We only deal with own data
+ // properties of the global object here (represented as PropertyCell).
+ LookupIterator it(global_object, name, LookupIterator::OWN);
+ if (it.state() != LookupIterator::DATA) return NoChange();
+ Handle<PropertyCell> property_cell = it.GetPropertyCell();
+ PropertyDetails property_details = property_cell->property_details();
+ Handle<Object> property_cell_value(property_cell->value(), isolate());
+
+ // Don't even bother trying to lower stores to read-only data properties.
+ if (property_details.IsReadOnly()) return NoChange();
+ switch (property_details.cell_type()) {
+ case PropertyCellType::kUndefined: {
+ return NoChange();
+ }
+ case PropertyCellType::kConstant: {
+ // Store to constant property cell requires deoptimization support,
+ // because we might even need to eager deoptimize for mismatch.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ dependencies()->AssumePropertyCell(property_cell);
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
+ jsgraph()->Constant(property_cell_value));
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ break;
+ }
+ case PropertyCellType::kConstantType: {
+ // Store to constant-type property cell requires deoptimization support,
+ // because we might even need to eager deoptimize for mismatch.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ dependencies()->AssumePropertyCell(property_cell);
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Type* property_cell_value_type = Type::TaggedSigned();
+ if (property_cell_value->IsHeapObject()) {
+ // Deoptimize if the {value} is a Smi.
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_true);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+
+ // Load the {value} map check against the {property_cell} map.
+ Node* value_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, control);
+ Handle<Map> property_cell_value_map(
+ Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+ check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Any()), value_map,
+ jsgraph()->HeapConstant(property_cell_value_map));
+ property_cell_value_type = Type::TaggedPointer();
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
+ jsgraph()->HeapConstant(property_cell), value, effect, control);
+ break;
+ }
+ case PropertyCellType::kMutable: {
+ // Store to non-configurable, data property on the global can be lowered
+ // to a field store, even without deoptimization, because the property
+ // cannot be deleted or reconfigured to an accessor/interceptor property.
+ if (property_details.IsConfigurable()) {
+ // With deoptimization support, we can lower stores even to configurable
+ // data properties on the global object, by adding a code dependency on
+ // the cell.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ dependencies()->AssumePropertyCell(property_cell);
+ }
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForPropertyCellValue()),
+ jsgraph()->HeapConstant(property_cell), value, effect, control);
+ break;
+ }
+ }
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+MaybeHandle<JSGlobalObject> JSGlobalObjectSpecialization::GetGlobalObject(
+ Node* node) {
+ Node* const context = NodeProperties::GetContextInput(node);
+ return NodeProperties::GetSpecializationGlobalObject(context,
+ native_context());
+}
+
+
+bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
+ Handle<JSGlobalObject> global_object, Handle<Name> name,
+ ScriptContextTableLookupResult* result) {
+ if (!name->IsString()) return false;
+ Handle<ScriptContextTable> script_context_table(
+ global_object->native_context()->script_context_table(), isolate());
+ ScriptContextTable::LookupResult lookup_result;
+ if (!ScriptContextTable::Lookup(script_context_table,
+ Handle<String>::cast(name), &lookup_result)) {
+ return false;
+ }
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ script_context_table, lookup_result.context_index);
+ result->context = script_context;
+ result->immutable = IsImmutableVariableMode(lookup_result.mode);
+ result->index = lookup_result.slot_index;
+ return true;
+}
+
+
+Graph* JSGlobalObjectSpecialization::graph() const {
+ return jsgraph()->graph();
+}
+
+
+Isolate* JSGlobalObjectSpecialization::isolate() const {
+ return jsgraph()->isolate();
+}
+
+
+CommonOperatorBuilder* JSGlobalObjectSpecialization::common() const {
+ return jsgraph()->common();
+}
+
+
+JSOperatorBuilder* JSGlobalObjectSpecialization::javascript() const {
+ return jsgraph()->javascript();
+}
+
+
+SimplifiedOperatorBuilder* JSGlobalObjectSpecialization::simplified() const {
+ return jsgraph()->simplified();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/js-global-object-specialization.h b/src/compiler/js-global-object-specialization.h
new file mode 100644
index 0000000..83d890c
--- /dev/null
+++ b/src/compiler/js-global-object-specialization.h
@@ -0,0 +1,83 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
+#define V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class TypeCache;
+
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class SimplifiedOperatorBuilder;
+
+
+// Specializes a given JSGraph to a given global object, potentially constant
+// folding some {JSLoadGlobal} nodes or strength reducing some {JSStoreGlobal}
+// nodes.
+class JSGlobalObjectSpecialization final : public AdvancedReducer {
+ public:
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context,
+ CompilationDependencies* dependencies);
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceJSLoadGlobal(Node* node);
+ Reduction ReduceJSStoreGlobal(Node* node);
+
+ // Retrieve the global object from the given {node} if known.
+ MaybeHandle<JSGlobalObject> GetGlobalObject(Node* node);
+
+ struct ScriptContextTableLookupResult;
+ bool LookupInScriptContextTable(Handle<JSGlobalObject> global_object,
+ Handle<Name> name,
+ ScriptContextTableLookupResult* result);
+
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ Flags flags() const { return flags_; }
+ MaybeHandle<Context> native_context() const { return native_context_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+
+ JSGraph* const jsgraph_;
+ Flags const flags_;
+ MaybeHandle<Context> native_context_;
+ CompilationDependencies* const dependencies_;
+ TypeCache const& type_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSGlobalObjectSpecialization);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSGlobalObjectSpecialization::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index 7759ba1..e938798 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -4,110 +4,82 @@
#include "src/code-stubs.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/typer.h"
namespace v8 {
namespace internal {
namespace compiler {
-Node* JSGraph::ImmovableHeapConstant(Handle<HeapObject> object) {
- Unique<HeapObject> unique = Unique<HeapObject>::CreateImmovable(object);
- return graph()->NewNode(common()->HeapConstant(unique));
-}
+#define CACHED(name, expr) \
+ cached_nodes_[name] ? cached_nodes_[name] : (cached_nodes_[name] = (expr))
Node* JSGraph::CEntryStubConstant(int result_size) {
if (result_size == 1) {
- if (!c_entry_stub_constant_.is_set()) {
- c_entry_stub_constant_.set(
- ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
- }
- return c_entry_stub_constant_.get();
+ return CACHED(kCEntryStubConstant,
+ HeapConstant(CEntryStub(isolate(), 1).GetCode()));
}
+ return HeapConstant(CEntryStub(isolate(), result_size).GetCode());
+}
- return ImmovableHeapConstant(CEntryStub(isolate(), result_size).GetCode());
+
+Node* JSGraph::EmptyFixedArrayConstant() {
+ return CACHED(kEmptyFixedArrayConstant,
+ HeapConstant(factory()->empty_fixed_array()));
}
Node* JSGraph::UndefinedConstant() {
- if (!undefined_constant_.is_set()) {
- undefined_constant_.set(
- ImmovableHeapConstant(factory()->undefined_value()));
- }
- return undefined_constant_.get();
+ return CACHED(kUndefinedConstant, HeapConstant(factory()->undefined_value()));
}
Node* JSGraph::TheHoleConstant() {
- if (!the_hole_constant_.is_set()) {
- the_hole_constant_.set(ImmovableHeapConstant(factory()->the_hole_value()));
- }
- return the_hole_constant_.get();
+ return CACHED(kTheHoleConstant, HeapConstant(factory()->the_hole_value()));
}
Node* JSGraph::TrueConstant() {
- if (!true_constant_.is_set()) {
- true_constant_.set(ImmovableHeapConstant(factory()->true_value()));
- }
- return true_constant_.get();
+ return CACHED(kTrueConstant, HeapConstant(factory()->true_value()));
}
Node* JSGraph::FalseConstant() {
- if (!false_constant_.is_set()) {
- false_constant_.set(ImmovableHeapConstant(factory()->false_value()));
- }
- return false_constant_.get();
+ return CACHED(kFalseConstant, HeapConstant(factory()->false_value()));
}
Node* JSGraph::NullConstant() {
- if (!null_constant_.is_set()) {
- null_constant_.set(ImmovableHeapConstant(factory()->null_value()));
- }
- return null_constant_.get();
+ return CACHED(kNullConstant, HeapConstant(factory()->null_value()));
}
Node* JSGraph::ZeroConstant() {
- if (!zero_constant_.is_set()) zero_constant_.set(NumberConstant(0.0));
- return zero_constant_.get();
+ return CACHED(kZeroConstant, NumberConstant(0.0));
}
Node* JSGraph::OneConstant() {
- if (!one_constant_.is_set()) one_constant_.set(NumberConstant(1.0));
- return one_constant_.get();
+ return CACHED(kOneConstant, NumberConstant(1.0));
}
Node* JSGraph::NaNConstant() {
- if (!nan_constant_.is_set()) {
- nan_constant_.set(NumberConstant(base::OS::nan_value()));
- }
- return nan_constant_.get();
-}
-
-
-Node* JSGraph::HeapConstant(Unique<HeapObject> value) {
- // TODO(turbofan): canonicalize heap constants using Unique<T>
- return graph()->NewNode(common()->HeapConstant(value));
+ return CACHED(kNaNConstant,
+ NumberConstant(std::numeric_limits<double>::quiet_NaN()));
}
Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
- // TODO(titzer): We could also match against the addresses of immortable
- // immovables here, even without access to the heap, thus always
- // canonicalizing references to them.
- // return HeapConstant(Unique<Object>::CreateUninitialized(value));
- // TODO(turbofan): This is a work-around to make Unique::HashCode() work for
- // value numbering. We need some sane way to compute a unique hash code for
- // arbitrary handles here.
- Unique<HeapObject> unique(reinterpret_cast<Address>(*value.location()),
- value);
- return HeapConstant(unique);
+ if (value->IsConsString()) {
+ value = String::Flatten(Handle<String>::cast(value), TENURED);
+ }
+ Node** loc = cache_.FindHeapConstant(value);
+ if (*loc == nullptr) {
+ *loc = graph()->NewNode(common()->HeapConstant(value));
+ }
+ return *loc;
}
@@ -148,7 +120,7 @@
Node* JSGraph::Int32Constant(int32_t value) {
Node** loc = cache_.FindInt32Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Int32Constant(value));
}
return *loc;
@@ -157,7 +129,7 @@
Node* JSGraph::Int64Constant(int64_t value) {
Node** loc = cache_.FindInt64Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Int64Constant(value));
}
return *loc;
@@ -166,7 +138,7 @@
Node* JSGraph::NumberConstant(double value) {
Node** loc = cache_.FindNumberConstant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->NumberConstant(value));
}
return *loc;
@@ -175,7 +147,7 @@
Node* JSGraph::Float32Constant(float value) {
Node** loc = cache_.FindFloat32Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Float32Constant(value));
}
return *loc;
@@ -184,7 +156,7 @@
Node* JSGraph::Float64Constant(double value) {
Node** loc = cache_.FindFloat64Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Float64Constant(value));
}
return *loc;
@@ -193,21 +165,44 @@
Node* JSGraph::ExternalConstant(ExternalReference reference) {
Node** loc = cache_.FindExternalConstant(reference);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->ExternalConstant(reference));
}
return *loc;
}
+Node* JSGraph::ExternalConstant(Runtime::FunctionId function_id) {
+ return ExternalConstant(ExternalReference(function_id, isolate()));
+}
+
+
+Node* JSGraph::EmptyFrameState() {
+ Node* empty_frame_state = cached_nodes_[kEmptyFrameState];
+ if (!empty_frame_state || empty_frame_state->IsDead()) {
+ Node* state_values = graph()->NewNode(common()->StateValues(0));
+ empty_frame_state = graph()->NewNode(
+ common()->FrameState(BailoutId::None(),
+ OutputFrameStateCombine::Ignore(), nullptr),
+ state_values, state_values, state_values, NoContextConstant(),
+ UndefinedConstant(), graph()->start());
+ cached_nodes_[kEmptyFrameState] = empty_frame_state;
+ }
+ return empty_frame_state;
+}
+
+
+Node* JSGraph::Dead() {
+ return CACHED(kDead, graph()->NewNode(common()->Dead()));
+}
+
+
void JSGraph::GetCachedNodes(NodeVector* nodes) {
cache_.GetCachedNodes(nodes);
- SetOncePointer<Node>* ptrs[] = {
- &c_entry_stub_constant_, &undefined_constant_, &the_hole_constant_,
- &true_constant_, &false_constant_, &null_constant_,
- &zero_constant_, &one_constant_, &nan_constant_};
- for (size_t i = 0; i < arraysize(ptrs); i++) {
- if (ptrs[i]->is_set()) nodes->push_back(ptrs[i]->get());
+ for (size_t i = 0; i < arraysize(cached_nodes_); i++) {
+ if (Node* node = cached_nodes_[i]) {
+ if (!node->IsDead()) nodes->push_back(node);
+ }
}
}
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index 040a745..5a25ed0 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -11,28 +11,36 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
namespace compiler {
+class SimplifiedOperatorBuilder;
class Typer;
// Implements a facade on a Graph, enhancing the graph with JS-specific
-// notions, including a builder for for JS* operators, canonicalized global
+// notions, including various builders for operators, canonicalized global
// constants, and various helper methods.
class JSGraph : public ZoneObject {
public:
- JSGraph(Graph* graph, CommonOperatorBuilder* common,
- JSOperatorBuilder* javascript, MachineOperatorBuilder* machine)
- : graph_(graph),
+ JSGraph(Isolate* isolate, Graph* graph, CommonOperatorBuilder* common,
+ JSOperatorBuilder* javascript, SimplifiedOperatorBuilder* simplified,
+ MachineOperatorBuilder* machine)
+ : isolate_(isolate),
+ graph_(graph),
common_(common),
javascript_(javascript),
+ simplified_(simplified),
machine_(machine),
- cache_(zone()) {}
+ cache_(zone()) {
+ for (int i = 0; i < kNumCachedNodes; i++) cached_nodes_[i] = nullptr;
+ }
// Canonicalized global constants.
Node* CEntryStubConstant(int result_size);
+ Node* EmptyFixedArrayConstant();
Node* UndefinedConstant();
Node* TheHoleConstant();
Node* TrueConstant();
@@ -42,10 +50,6 @@
Node* OneConstant();
Node* NaNConstant();
- // Creates a HeapConstant node, possibly canonicalized, without inspecting the
- // object.
- Node* HeapConstant(Unique<HeapObject> value);
-
// Creates a HeapConstant node, possibly canonicalized, and may access the
// heap to inspect the object.
Node* HeapConstant(Handle<HeapObject> value);
@@ -99,6 +103,7 @@
// Creates an ExternalConstant node, usually canonicalized.
Node* ExternalConstant(ExternalReference ref);
+ Node* ExternalConstant(Runtime::FunctionId function_id);
Node* SmiConstant(int32_t immediate) {
DCHECK(Smi::IsValid(immediate));
@@ -109,36 +114,50 @@
// stubs and runtime functions that do not require a context.
Node* NoContextConstant() { return ZeroConstant(); }
- JSOperatorBuilder* javascript() { return javascript_; }
- CommonOperatorBuilder* common() { return common_; }
- MachineOperatorBuilder* machine() { return machine_; }
- Graph* graph() { return graph_; }
- Zone* zone() { return graph()->zone(); }
- Isolate* isolate() { return zone()->isolate(); }
- Factory* factory() { return isolate()->factory(); }
+ // Creates an empty frame states for cases where we know that a function
+ // cannot deopt.
+ Node* EmptyFrameState();
+
+ // Create a control node that serves as dependency for dead nodes.
+ Node* Dead();
+
+ CommonOperatorBuilder* common() const { return common_; }
+ JSOperatorBuilder* javascript() const { return javascript_; }
+ SimplifiedOperatorBuilder* simplified() const { return simplified_; }
+ MachineOperatorBuilder* machine() const { return machine_; }
+ Graph* graph() const { return graph_; }
+ Zone* zone() const { return graph()->zone(); }
+ Isolate* isolate() const { return isolate_; }
+ Factory* factory() const { return isolate()->factory(); }
void GetCachedNodes(NodeVector* nodes);
private:
+ enum CachedNode {
+ kCEntryStubConstant,
+ kEmptyFixedArrayConstant,
+ kUndefinedConstant,
+ kTheHoleConstant,
+ kTrueConstant,
+ kFalseConstant,
+ kNullConstant,
+ kZeroConstant,
+ kOneConstant,
+ kNaNConstant,
+ kEmptyFrameState,
+ kDead,
+ kNumCachedNodes // Must remain last.
+ };
+
+ Isolate* isolate_;
Graph* graph_;
CommonOperatorBuilder* common_;
JSOperatorBuilder* javascript_;
+ SimplifiedOperatorBuilder* simplified_;
MachineOperatorBuilder* machine_;
-
- // TODO(titzer): make this into a simple array.
- SetOncePointer<Node> c_entry_stub_constant_;
- SetOncePointer<Node> undefined_constant_;
- SetOncePointer<Node> the_hole_constant_;
- SetOncePointer<Node> true_constant_;
- SetOncePointer<Node> false_constant_;
- SetOncePointer<Node> null_constant_;
- SetOncePointer<Node> zero_constant_;
- SetOncePointer<Node> one_constant_;
- SetOncePointer<Node> nan_constant_;
-
CommonNodeCache cache_;
+ Node* cached_nodes_[kNumCachedNodes];
- Node* ImmovableHeapConstant(Handle<HeapObject> value);
Node* NumberConstant(double value);
DISALLOW_COPY_AND_ASSIGN(JSGraph);
diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc
new file mode 100644
index 0000000..cd5637b
--- /dev/null
+++ b/src/compiler/js-inlining-heuristic.cc
@@ -0,0 +1,141 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-inlining-heuristic.h"
+
+#include "src/compiler.h"
+#include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction JSInliningHeuristic::Reduce(Node* node) {
+ if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+
+ // Check if we already saw that {node} before, and if so, just skip it.
+ if (seen_.find(node->id()) != seen_.end()) return NoChange();
+ seen_.insert(node->id());
+
+ Node* callee = node->InputAt(0);
+ HeapObjectMatcher match(callee);
+ if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+
+ // Functions marked with %SetForceInlineFlag are immediately inlined.
+ if (function->shared()->force_inline()) {
+ return inliner_.ReduceJSCall(node, function);
+ }
+
+ // Handling of special inlining modes right away:
+ // - For restricted inlining: stop all handling at this point.
+ // - For stressing inlining: immediately handle all functions.
+ switch (mode_) {
+ case kRestrictedInlining:
+ return NoChange();
+ case kStressInlining:
+ return inliner_.ReduceJSCall(node, function);
+ case kGeneralInlining:
+ break;
+ }
+
+ // ---------------------------------------------------------------------------
+ // Everything below this line is part of the inlining heuristic.
+ // ---------------------------------------------------------------------------
+
+ // Built-in functions are handled by the JSBuiltinReducer.
+ if (function->shared()->HasBuiltinFunctionId()) return NoChange();
+
+ // Don't inline builtins.
+ if (function->shared()->IsBuiltin()) return NoChange();
+
+ // Quick check on source code length to avoid parsing large candidate.
+ if (function->shared()->SourceSize() > FLAG_max_inlined_source_size) {
+ return NoChange();
+ }
+
+ // Quick check on the size of the AST to avoid parsing large candidate.
+ if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
+ return NoChange();
+ }
+
+ // Avoid inlining within or across the boundary of asm.js code.
+ if (info_->shared_info()->asm_function()) return NoChange();
+ if (function->shared()->asm_function()) return NoChange();
+
+ // Stop inlinining once the maximum allowed level is reached.
+ int level = 0;
+ for (Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ frame_state->opcode() == IrOpcode::kFrameState;
+ frame_state = NodeProperties::GetFrameStateInput(frame_state, 0)) {
+ if (++level > FLAG_max_inlining_levels) return NoChange();
+ }
+
+ // Gather feedback on how often this call site has been hit before.
+ int calls = -1; // Same default as CallICNexus::ExtractCallCount.
+ // TODO(turbofan): We also want call counts for constructor calls.
+ if (node->opcode() == IrOpcode::kJSCallFunction) {
+ CallFunctionParameters p = CallFunctionParametersOf(node->op());
+ if (p.feedback().IsValid()) {
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ calls = nexus.ExtractCallCount();
+ }
+ }
+
+ // ---------------------------------------------------------------------------
+ // Everything above this line is part of the inlining heuristic.
+ // ---------------------------------------------------------------------------
+
+ // In the general case we remember the candidate for later.
+ candidates_.insert({function, node, calls});
+ return NoChange();
+}
+
+
+void JSInliningHeuristic::Finalize() {
+ if (candidates_.empty()) return; // Nothing to do without candidates.
+ if (FLAG_trace_turbo_inlining) PrintCandidates();
+
+ // We inline at most one candidate in every iteration of the fixpoint.
+ // This is to ensure that we don't consume the full inlining budget
+ // on things that aren't called very often.
+ // TODO(bmeurer): Use std::priority_queue instead of std::set here.
+ while (!candidates_.empty()) {
+ if (cumulative_count_ > FLAG_max_inlined_nodes_cumulative) return;
+ auto i = candidates_.begin();
+ Candidate candidate = *i;
+ candidates_.erase(i);
+ // Make sure we don't try to inline dead candidate nodes.
+ if (!candidate.node->IsDead()) {
+ Reduction r = inliner_.ReduceJSCall(candidate.node, candidate.function);
+ if (r.Changed()) {
+ cumulative_count_ += candidate.function->shared()->ast_node_count();
+ return;
+ }
+ }
+ }
+}
+
+
+bool JSInliningHeuristic::CandidateCompare::operator()(
+ const Candidate& left, const Candidate& right) const {
+ return left.node != right.node && left.calls >= right.calls;
+}
+
+
+void JSInliningHeuristic::PrintCandidates() {
+ PrintF("Candidates for inlining (size=%zu):\n", candidates_.size());
+ for (const Candidate& candidate : candidates_) {
+ PrintF(" id:%d, calls:%d, size[source]:%d, size[ast]:%d / %s\n",
+ candidate.node->id(), candidate.calls,
+ candidate.function->shared()->SourceSize(),
+ candidate.function->shared()->ast_node_count(),
+ candidate.function->shared()->DebugName()->ToCString().get());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/js-inlining-heuristic.h b/src/compiler/js-inlining-heuristic.h
new file mode 100644
index 0000000..7f57747
--- /dev/null
+++ b/src/compiler/js-inlining-heuristic.h
@@ -0,0 +1,62 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_INLINING_HEURISTIC_H_
+#define V8_COMPILER_JS_INLINING_HEURISTIC_H_
+
+#include "src/compiler/js-inlining.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSInliningHeuristic final : public AdvancedReducer {
+ public:
+ enum Mode { kGeneralInlining, kRestrictedInlining, kStressInlining };
+ JSInliningHeuristic(Editor* editor, Mode mode, Zone* local_zone,
+ CompilationInfo* info, JSGraph* jsgraph)
+ : AdvancedReducer(editor),
+ mode_(mode),
+ inliner_(editor, local_zone, info, jsgraph),
+ candidates_(local_zone),
+ seen_(local_zone),
+ info_(info) {}
+
+ Reduction Reduce(Node* node) final;
+
+ // Processes the list of candidates gathered while the reducer was running,
+ // and inlines call sites that the heuristic determines to be important.
+ void Finalize() final;
+
+ private:
+ struct Candidate {
+ Handle<JSFunction> function; // The call target being inlined.
+ Node* node; // The call site at which to inline.
+ int calls; // Number of times the call site was hit.
+ };
+
+ // Comparator for candidates.
+ struct CandidateCompare {
+ bool operator()(const Candidate& left, const Candidate& right) const;
+ };
+
+ // Candidates are kept in a sorted set of unique candidates.
+ typedef ZoneSet<Candidate, CandidateCompare> Candidates;
+
+ // Dumps candidates to console.
+ void PrintCandidates();
+
+ Mode const mode_;
+ JSInliner inliner_;
+ Candidates candidates_;
+ ZoneSet<NodeId> seen_;
+ CompilationInfo* info_;
+ int cumulative_count_ = 0;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_INLINING_HEURISTIC_H_
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index d143382..99a1547 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -2,270 +2,187 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast.h"
-#include "src/ast-numbering.h"
-#include "src/compiler/access-builder.h"
+#include "src/compiler/js-inlining.h"
+
+#include "src/ast/ast.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/scopes.h"
+#include "src/compiler.h"
+#include "src/compiler/all-nodes.h"
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
-#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/js-inlining.h"
-#include "src/compiler/js-intrinsic-builder.h"
+#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
-#include "src/compiler/node-aux-data-inl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/compiler/typer.h"
-#include "src/full-codegen.h"
-#include "src/parser.h"
-#include "src/rewriter.h"
-#include "src/scopes.h"
-
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/isolate-inl.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
namespace v8 {
namespace internal {
namespace compiler {
-class InlinerVisitor : public NullNodeVisitor {
- public:
- explicit InlinerVisitor(JSInliner* inliner) : inliner_(inliner) {}
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_inlining) PrintF(__VA_ARGS__); \
+ } while (false)
- void Post(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kJSCallFunction:
- inliner_->TryInlineJSCall(node);
- break;
- case IrOpcode::kJSCallRuntime:
- if (FLAG_turbo_inlining_intrinsics) {
- inliner_->TryInlineRuntimeCall(node);
- }
- break;
- default:
- break;
- }
+
+// Provides convenience accessors for the common layout of nodes having either
+// the {JSCallFunction} or the {JSCallConstruct} operator.
+class JSCallAccessor {
+ public:
+ explicit JSCallAccessor(Node* call) : call_(call) {
+ DCHECK(call->opcode() == IrOpcode::kJSCallFunction ||
+ call->opcode() == IrOpcode::kJSCallConstruct);
+ }
+
+ Node* target() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have same layout here.
+ return call_->InputAt(0);
+ }
+
+ Node* receiver() {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, call_->opcode());
+ return call_->InputAt(1);
+ }
+
+ Node* new_target() {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, call_->opcode());
+ return call_->InputAt(formal_arguments() + 1);
+ }
+
+ Node* frame_state_before() {
+ return NodeProperties::GetFrameStateInput(call_, 1);
+ }
+
+ Node* frame_state_after() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have frame state after.
+ return NodeProperties::GetFrameStateInput(call_, 0);
+ }
+
+ int formal_arguments() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have two extra inputs:
+ // - JSCallConstruct: Includes target function and new target.
+ // - JSCallFunction: Includes target function and receiver.
+ return call_->op()->ValueInputCount() - 2;
}
private:
- JSInliner* inliner_;
+ Node* call_;
};
-void JSInliner::Inline() {
- InlinerVisitor visitor(this);
- jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
-}
-
-
-// A facade on a JSFunction's graph to facilitate inlining. It assumes the
-// that the function graph has only one return statement, and provides
-// {UnifyReturn} to convert a function graph to that end.
-class Inlinee {
- public:
- Inlinee(Node* start, Node* end) : start_(start), end_(end) {}
-
- // Returns the last regular control node, that is
- // the last control node before the end node.
- Node* end_block() { return NodeProperties::GetControlInput(unique_return()); }
-
- // Return the effect output of the graph,
- // that is the effect input of the return statement of the inlinee.
- Node* effect_output() {
- return NodeProperties::GetEffectInput(unique_return());
- }
- // Return the value output of the graph,
- // that is the value input of the return statement of the inlinee.
- Node* value_output() {
- return NodeProperties::GetValueInput(unique_return(), 0);
- }
- // Return the unique return statement of the graph.
- Node* unique_return() {
- Node* unique_return = NodeProperties::GetControlInput(end_);
- DCHECK_EQ(IrOpcode::kReturn, unique_return->opcode());
- return unique_return;
- }
-
- // Counts JSFunction, Receiver, arguments, context but not effect, control.
- size_t total_parameters() { return start_->op()->ValueOutputCount(); }
-
- // Counts only formal parameters.
- size_t formal_parameters() {
- DCHECK_GE(total_parameters(), 3);
- return total_parameters() - 3;
- }
-
- // Inline this graph at {call}, use {jsgraph} and its zone to create
- // any new nodes.
- void InlineAtCall(JSGraph* jsgraph, Node* call);
-
- // Ensure that only a single return reaches the end node.
- static void UnifyReturn(JSGraph* jsgraph);
-
- private:
- Node* start_;
- Node* end_;
-};
-
-
-void Inlinee::UnifyReturn(JSGraph* jsgraph) {
- Graph* graph = jsgraph->graph();
-
- Node* final_merge = NodeProperties::GetControlInput(graph->end(), 0);
- if (final_merge->opcode() == IrOpcode::kReturn) {
- // nothing to do
- return;
- }
- DCHECK_EQ(IrOpcode::kMerge, final_merge->opcode());
-
- int predecessors = final_merge->op()->ControlInputCount();
-
- const Operator* op_phi = jsgraph->common()->Phi(kMachAnyTagged, predecessors);
- const Operator* op_ephi = jsgraph->common()->EffectPhi(predecessors);
-
- NodeVector values(jsgraph->zone());
- NodeVector effects(jsgraph->zone());
- // Iterate over all control flow predecessors,
- // which must be return statements.
- for (Edge edge : final_merge->input_edges()) {
- Node* input = edge.to();
- switch (input->opcode()) {
- case IrOpcode::kReturn:
- values.push_back(NodeProperties::GetValueInput(input, 0));
- effects.push_back(NodeProperties::GetEffectInput(input));
- edge.UpdateTo(NodeProperties::GetControlInput(input));
- input->RemoveAllInputs();
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- values.push_back(final_merge);
- effects.push_back(final_merge);
- Node* phi =
- graph->NewNode(op_phi, static_cast<int>(values.size()), &values.front());
- Node* ephi = graph->NewNode(op_ephi, static_cast<int>(effects.size()),
- &effects.front());
- Node* new_return =
- graph->NewNode(jsgraph->common()->Return(), phi, ephi, final_merge);
- graph->end()->ReplaceInput(0, new_return);
-}
-
-
-class CopyVisitor : public NullNodeVisitor {
+class CopyVisitor {
public:
CopyVisitor(Graph* source_graph, Graph* target_graph, Zone* temp_zone)
- : copies_(source_graph->NodeCount(), NULL, temp_zone),
- sentinels_(source_graph->NodeCount(), NULL, temp_zone),
+ : sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, "Sentinel", 0, 0,
+ 0, 0, 0, 0),
+ sentinel_(target_graph->NewNode(&sentinel_op_)),
+ copies_(source_graph->NodeCount(), sentinel_, temp_zone),
source_graph_(source_graph),
target_graph_(target_graph),
- temp_zone_(temp_zone),
- sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, "sentinel", 0, 0,
- 0, 0, 0, 0) {}
+ temp_zone_(temp_zone) {}
- void Post(Node* original) {
- NodeVector inputs(temp_zone_);
- for (Node* const node : original->inputs()) {
- inputs.push_back(GetCopy(node));
- }
-
- // Reuse the operator in the copy. This assumes that op lives in a zone
- // that lives longer than graph()'s zone.
- Node* copy =
- target_graph_->NewNode(original->op(), static_cast<int>(inputs.size()),
- (inputs.empty() ? NULL : &inputs.front()));
- copies_[original->id()] = copy;
- }
-
- Node* GetCopy(Node* original) {
- Node* copy = copies_[original->id()];
- if (copy == NULL) {
- copy = GetSentinel(original);
- }
- DCHECK_NE(NULL, copy);
- return copy;
- }
+ Node* GetCopy(Node* orig) { return copies_[orig->id()]; }
void CopyGraph() {
- source_graph_->VisitNodeInputsFromEnd(this);
- ReplaceSentinels();
+ NodeVector inputs(temp_zone_);
+ // TODO(bmeurer): AllNodes should be turned into something like
+ // Graph::CollectNodesReachableFromEnd() and the gray set stuff should be
+ // removed since it's only needed by the visualizer.
+ AllNodes all(temp_zone_, source_graph_);
+ // Copy all nodes reachable from end.
+ for (Node* orig : all.live) {
+ Node* copy = GetCopy(orig);
+ if (copy != sentinel_) {
+ // Mapping already exists.
+ continue;
+ }
+ // Copy the node.
+ inputs.clear();
+ for (Node* input : orig->inputs()) inputs.push_back(copies_[input->id()]);
+ copy = target_graph_->NewNode(orig->op(), orig->InputCount(),
+ inputs.empty() ? nullptr : &inputs[0]);
+ copies_[orig->id()] = copy;
+ }
+ // For missing inputs.
+ for (Node* orig : all.live) {
+ Node* copy = copies_[orig->id()];
+ for (int i = 0; i < copy->InputCount(); ++i) {
+ Node* input = copy->InputAt(i);
+ if (input == sentinel_) {
+ copy->ReplaceInput(i, GetCopy(orig->InputAt(i)));
+ }
+ }
+ }
}
- const NodeVector& copies() { return copies_; }
+ const NodeVector& copies() const { return copies_; }
private:
- void ReplaceSentinels() {
- for (NodeId id = 0; id < source_graph_->NodeCount(); ++id) {
- Node* sentinel = sentinels_[id];
- if (sentinel == NULL) continue;
- Node* copy = copies_[id];
- DCHECK_NE(NULL, copy);
- sentinel->ReplaceUses(copy);
- }
- }
-
- Node* GetSentinel(Node* original) {
- if (sentinels_[original->id()] == NULL) {
- sentinels_[original->id()] = target_graph_->NewNode(&sentinel_op_);
- }
- return sentinels_[original->id()];
- }
-
+ Operator const sentinel_op_;
+ Node* const sentinel_;
NodeVector copies_;
- NodeVector sentinels_;
- Graph* source_graph_;
- Graph* target_graph_;
- Zone* temp_zone_;
- Operator sentinel_op_;
+ Graph* const source_graph_;
+ Graph* const target_graph_;
+ Zone* const temp_zone_;
};
-void Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
+Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
+ Node* frame_state, Node* start, Node* end) {
// The scheduler is smart enough to place our code; we just ensure {control}
- // becomes the control input of the start of the inlinee.
+ // becomes the control input of the start of the inlinee, and {effect} becomes
+ // the effect input of the start of the inlinee.
Node* control = NodeProperties::GetControlInput(call);
+ Node* effect = NodeProperties::GetEffectInput(call);
- // The inlinee uses the context from the JSFunction object. This will
- // also be the effect dependency for the inlinee as it produces an effect.
- SimplifiedOperatorBuilder simplified(jsgraph->zone());
- Node* context = jsgraph->graph()->NewNode(
- simplified.LoadField(AccessBuilder::ForJSFunctionContext()),
- NodeProperties::GetValueInput(call, 0),
- NodeProperties::GetEffectInput(call), control);
+ int const inlinee_new_target_index =
+ static_cast<int>(start->op()->ValueOutputCount()) - 3;
+ int const inlinee_arity_index =
+ static_cast<int>(start->op()->ValueOutputCount()) - 2;
+ int const inlinee_context_index =
+ static_cast<int>(start->op()->ValueOutputCount()) - 1;
- // Context is last argument.
- int inlinee_context_index = static_cast<int>(total_parameters()) - 1;
- // {inliner_inputs} counts JSFunction, Receiver, arguments, but not
- // context, effect, control.
+ // {inliner_inputs} counts JSFunction, receiver, arguments, but not
+ // new target value, argument count, context, effect or control.
int inliner_inputs = call->op()->ValueInputCount();
// Iterate over all uses of the start node.
- for (Edge edge : start_->use_edges()) {
+ for (Edge edge : start->use_edges()) {
Node* use = edge.from();
switch (use->opcode()) {
case IrOpcode::kParameter: {
- int index = 1 + OpParameter<int>(use->op());
- if (index < inliner_inputs && index < inlinee_context_index) {
+ int index = 1 + ParameterIndexOf(use->op());
+ DCHECK_LE(index, inlinee_context_index);
+ if (index < inliner_inputs && index < inlinee_new_target_index) {
// There is an input from the call, and the index is a value
// projection but not the context, so rewire the input.
- NodeProperties::ReplaceWithValue(use, call->InputAt(index));
+ Replace(use, call->InputAt(index));
+ } else if (index == inlinee_new_target_index) {
+ // The projection is requesting the new target value.
+ Replace(use, new_target);
+ } else if (index == inlinee_arity_index) {
+ // The projection is requesting the number of arguments.
+ Replace(use, jsgraph_->Int32Constant(inliner_inputs - 2));
} else if (index == inlinee_context_index) {
- // This is the context projection, rewire it to the context from the
- // JSFunction object.
- NodeProperties::ReplaceWithValue(use, context);
- } else if (index < inlinee_context_index) {
- // Call has fewer arguments than required, fill with undefined.
- NodeProperties::ReplaceWithValue(use, jsgraph->UndefinedConstant());
+ // The projection is requesting the inlinee function context.
+ Replace(use, context);
} else {
- // We got too many arguments, discard for now.
- // TODO(sigurds): Fix to treat arguments array correctly.
+ // Call has fewer arguments than required, fill with undefined.
+ Replace(use, jsgraph_->UndefinedConstant());
}
break;
}
default:
if (NodeProperties::IsEffectEdge(edge)) {
- edge.UpdateTo(context);
+ edge.UpdateTo(effect);
} else if (NodeProperties::IsControlEdge(edge)) {
edge.UpdateTo(control);
+ } else if (NodeProperties::IsFrameStateEdge(edge)) {
+ edge.UpdateTo(frame_state);
} else {
UNREACHABLE();
}
@@ -273,65 +190,69 @@
}
}
- NodeProperties::ReplaceWithValue(call, value_output(), effect_output());
- call->RemoveAllInputs();
- DCHECK_EQ(0, call->UseCount());
+ NodeVector values(local_zone_);
+ NodeVector effects(local_zone_);
+ NodeVector controls(local_zone_);
+ for (Node* const input : end->inputs()) {
+ switch (input->opcode()) {
+ case IrOpcode::kReturn:
+ values.push_back(NodeProperties::GetValueInput(input, 0));
+ effects.push_back(NodeProperties::GetEffectInput(input));
+ controls.push_back(NodeProperties::GetControlInput(input));
+ break;
+ case IrOpcode::kDeoptimize:
+ case IrOpcode::kTerminate:
+ case IrOpcode::kThrow:
+ NodeProperties::MergeControlToEnd(jsgraph_->graph(), jsgraph_->common(),
+ input);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ DCHECK_EQ(values.size(), effects.size());
+ DCHECK_EQ(values.size(), controls.size());
+
+ // Depending on whether the inlinee produces a value, we either replace value
+ // uses with said value or kill value uses if no value can be returned.
+ if (values.size() > 0) {
+ int const input_count = static_cast<int>(controls.size());
+ Node* control_output = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->Merge(input_count), input_count, &controls.front());
+ values.push_back(control_output);
+ effects.push_back(control_output);
+ Node* value_output = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->Phi(MachineRepresentation::kTagged, input_count),
+ static_cast<int>(values.size()), &values.front());
+ Node* effect_output = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->EffectPhi(input_count),
+ static_cast<int>(effects.size()), &effects.front());
+ ReplaceWithValue(call, value_output, effect_output, control_output);
+ return Changed(value_output);
+ } else {
+ ReplaceWithValue(call, call, call, jsgraph_->Dead());
+ return Changed(call);
+ }
}
-// TODO(turbofan) Provide such accessors for every node, possibly even
-// generate them.
-class JSCallFunctionAccessor {
- public:
- explicit JSCallFunctionAccessor(Node* call) : call_(call) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, call->opcode());
- }
+Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
+ int parameter_count,
+ FrameStateType frame_state_type,
+ Handle<SharedFunctionInfo> shared) {
+ const FrameStateFunctionInfo* state_info =
+ jsgraph_->common()->CreateFrameStateFunctionInfo(
+ frame_state_type, parameter_count + 1, 0, shared,
+ CALL_MAINTAINS_NATIVE_CONTEXT);
- Node* jsfunction() { return call_->InputAt(0); }
-
- Node* receiver() { return call_->InputAt(1); }
-
- Node* formal_argument(size_t index) {
- DCHECK(index < formal_arguments());
- return call_->InputAt(static_cast<int>(2 + index));
- }
-
- size_t formal_arguments() {
- // {value_inputs} includes jsfunction and receiver.
- size_t value_inputs = call_->op()->ValueInputCount();
- DCHECK_GE(call_->InputCount(), 2);
- return value_inputs - 2;
- }
-
- Node* frame_state() { return NodeProperties::GetFrameStateInput(call_); }
-
- private:
- Node* call_;
-};
-
-
-void JSInliner::AddClosureToFrameState(Node* frame_state,
- Handle<JSFunction> jsfunction) {
- FrameStateCallInfo call_info = OpParameter<FrameStateCallInfo>(frame_state);
const Operator* op = jsgraph_->common()->FrameState(
- FrameStateType::JS_FRAME, call_info.bailout_id(),
- call_info.state_combine(), jsfunction);
- frame_state->set_op(op);
-}
-
-
-Node* JSInliner::CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
- Handle<JSFunction> jsfunction,
- Zone* temp_zone) {
- const Operator* op = jsgraph_->common()->FrameState(
- FrameStateType::ARGUMENTS_ADAPTOR, BailoutId(-1),
- OutputFrameStateCombine::Ignore(), jsfunction);
+ BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
const Operator* op0 = jsgraph_->common()->StateValues(0);
Node* node0 = jsgraph_->graph()->NewNode(op0);
- NodeVector params(temp_zone);
- params.push_back(call->receiver());
- for (size_t argument = 0; argument != call->formal_arguments(); ++argument) {
- params.push_back(call->formal_argument(argument));
+ NodeVector params(local_zone_);
+ for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
+ params.push_back(node->InputAt(1 + parameter));
}
const Operator* op_param =
jsgraph_->common()->StateValues(static_cast<int>(params.size()));
@@ -339,151 +260,261 @@
op_param, static_cast<int>(params.size()), ¶ms.front());
return jsgraph_->graph()->NewNode(op, params_node, node0, node0,
jsgraph_->UndefinedConstant(),
- call->frame_state());
+ node->InputAt(0), outer_frame_state);
}
-void JSInliner::TryInlineJSCall(Node* call_node) {
- JSCallFunctionAccessor call(call_node);
+namespace {
- HeapObjectMatcher<JSFunction> match(call.jsfunction());
- if (!match.HasValue()) {
- return;
+// TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
+bool NeedsImplicitReceiver(Handle<JSFunction> function, Isolate* isolate) {
+ Code* construct_stub = function->shared()->construct_stub();
+ return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
+ construct_stub != *isolate->builtins()->ConstructedNonConstructable();
+}
+
+} // namespace
+
+
+Reduction JSInliner::Reduce(Node* node) {
+ if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+
+ // This reducer can handle both normal function calls as well a constructor
+ // calls whenever the target is a constant function object, as follows:
+ // - JSCallFunction(target:constant, receiver, args...)
+ // - JSCallConstruct(target:constant, args..., new.target)
+ HeapObjectMatcher match(node->InputAt(0));
+ if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+
+ return ReduceJSCall(node, function);
+}
+
+
+Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
+ DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+ JSCallAccessor call(node);
+
+ // Function must be inlineable.
+ if (!function->shared()->IsInlineable()) {
+ TRACE("Not inlining %s into %s because callee is not inlineable\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
}
- Handle<JSFunction> function = match.Value().handle();
+ // Constructor must be constructable.
+ if (node->opcode() == IrOpcode::kJSCallConstruct &&
+ !function->IsConstructor()) {
+ TRACE("Not inlining %s into %s because constructor is not constructable.\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
- if (function->shared()->native()) {
- if (FLAG_trace_turbo_inlining) {
- SmartArrayPointer<char> name =
- function->shared()->DebugName()->ToCString();
- PrintF("Not Inlining %s into %s because inlinee is native\n", name.get(),
- info_->shared_info()->DebugName()->ToCString().get());
+ // Class constructors are callable, but [[Call]] will raise an exception.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
+ if (node->opcode() == IrOpcode::kJSCallFunction &&
+ IsClassConstructor(function->shared()->kind())) {
+ TRACE("Not inlining %s into %s because callee is a class constructor.\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
+ // Function contains break points.
+ if (function->shared()->HasDebugInfo()) {
+ TRACE("Not inlining %s into %s because callee may contain break points\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
+ // Disallow cross native-context inlining for now. This means that all parts
+ // of the resulting code will operate on the same global object.
+ // This also prevents cross context leaks for asm.js code, where we could
+ // inline functions from a different context and hold on to that context (and
+ // closure) from the code object.
+ // TODO(turbofan): We might want to revisit this restriction later when we
+ // have a need for this, and we know how to model different native contexts
+ // in the same graph in a compositional way.
+ if (function->context()->native_context() !=
+ info_->context()->native_context()) {
+ TRACE("Not inlining %s into %s because of different native contexts\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
+ // TODO(turbofan): TranslatedState::GetAdaptedArguments() currently relies on
+ // not inlining recursive functions. We might want to relax that at some
+ // point.
+ for (Node* frame_state = call.frame_state_after();
+ frame_state->opcode() == IrOpcode::kFrameState;
+ frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
+ FrameStateInfo const& info = OpParameter<FrameStateInfo>(frame_state);
+ Handle<SharedFunctionInfo> shared_info;
+ if (info.shared_info().ToHandle(&shared_info) &&
+ *shared_info == function->shared()) {
+ TRACE("Not inlining %s into %s because call is recursive\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
}
- return;
}
- CompilationInfoWithZone info(function);
- // TODO(wingo): ParseAndAnalyze can fail due to stack overflow.
- CHECK(Compiler::ParseAndAnalyze(&info));
- CHECK(Compiler::EnsureDeoptimizationSupport(&info));
+ // TODO(turbofan): Inlining into a try-block is not yet supported.
+ if (NodeProperties::IsExceptionalCall(node)) {
+ TRACE("Not inlining %s into %s because of surrounding try-block\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
- if (info.scope()->arguments() != NULL && info.strict_mode() != STRICT) {
- // For now do not inline functions that use their arguments array.
- SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
- if (FLAG_trace_turbo_inlining) {
- PrintF(
- "Not Inlining %s into %s because inlinee uses arguments "
- "array\n",
- name.get(), info_->shared_info()->DebugName()->ToCString().get());
+ Zone zone;
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info);
+ if (info_->is_deoptimization_enabled()) {
+ info.MarkAsDeoptimizationEnabled();
+ }
+
+ if (!Compiler::ParseAndAnalyze(info.parse_info())) {
+ TRACE("Not inlining %s into %s because parsing failed\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ if (info_->isolate()->has_pending_exception()) {
+ info_->isolate()->clear_pending_exception();
}
- return;
+ return NoChange();
}
- if (FLAG_trace_turbo_inlining) {
- SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
- PrintF("Inlining %s into %s\n", name.get(),
- info_->shared_info()->DebugName()->ToCString().get());
+ // In strong mode, in case of too few arguments we need to throw a TypeError
+ // so we must not inline this call.
+ int parameter_count = info.literal()->parameter_count();
+ if (is_strong(info.language_mode()) &&
+ call.formal_arguments() < parameter_count) {
+ TRACE("Not inlining %s into %s because too few arguments for strong mode\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
}
- Graph graph(info.zone());
- JSGraph jsgraph(&graph, jsgraph_->common(), jsgraph_->javascript(),
+ if (!Compiler::EnsureDeoptimizationSupport(&info)) {
+ TRACE("Not inlining %s into %s because deoptimization support failed\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+ // Remember that we inlined this function. This needs to be called right
+ // after we ensure deoptimization support so that the code flusher
+ // does not remove the code with the deoptimization support.
+ info_->AddInlinedFunction(info.shared_info());
+
+ // ----------------------------------------------------------------
+ // After this point, we've made a decision to inline this function.
+ // We shall not bailout from inlining if we got here.
+
+ TRACE("Inlining %s into %s\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+
+ // TODO(mstarzinger): We could use the temporary zone for the graph because
+ // nodes are copied. This however leads to Zone-Types being allocated in the
+ // wrong zone and makes the engine explode at high speeds. Explosion bad!
+ Graph graph(jsgraph_->zone());
+ JSGraph jsgraph(info.isolate(), &graph, jsgraph_->common(),
+ jsgraph_->javascript(), jsgraph_->simplified(),
jsgraph_->machine());
-
AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
- graph_builder.CreateGraph();
- Inlinee::UnifyReturn(&jsgraph);
+ graph_builder.CreateGraph(false);
- CopyVisitor visitor(&graph, jsgraph_->graph(), info.zone());
+ CopyVisitor visitor(&graph, jsgraph_->graph(), &zone);
visitor.CopyGraph();
- Inlinee inlinee(visitor.GetCopy(graph.start()), visitor.GetCopy(graph.end()));
+ Node* start = visitor.GetCopy(graph.start());
+ Node* end = visitor.GetCopy(graph.end());
+ Node* frame_state = call.frame_state_after();
+ Node* new_target = jsgraph_->UndefinedConstant();
- if (FLAG_turbo_deoptimization) {
- Node* outer_frame_state = call.frame_state();
- // Insert argument adaptor frame if required.
- if (call.formal_arguments() != inlinee.formal_parameters()) {
- outer_frame_state =
- CreateArgumentsAdaptorFrameState(&call, function, info.zone());
- }
-
- for (NodeVectorConstIter it = visitor.copies().begin();
- it != visitor.copies().end(); ++it) {
- Node* node = *it;
- if (node != NULL && node->opcode() == IrOpcode::kFrameState) {
- AddClosureToFrameState(node, function);
- NodeProperties::ReplaceFrameStateInput(node, outer_frame_state);
- }
- }
+ // Insert nodes around the call that model the behavior required for a
+ // constructor dispatch (allocate implicit receiver and check return value).
+ // This models the behavior usually accomplished by our {JSConstructStub}.
+ // Note that the context has to be the callers context (input to call node).
+ Node* receiver = jsgraph_->UndefinedConstant(); // Implicit receiver.
+ if (node->opcode() == IrOpcode::kJSCallConstruct &&
+ NeedsImplicitReceiver(function, info_->isolate())) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* create = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->Create(), call.target(), call.new_target(),
+ context, call.frame_state_before(), effect);
+ NodeProperties::ReplaceEffectInput(node, create);
+ // Insert a check of the return value to determine whether the return value
+ // or the implicit receiver should be selected as a result of the call.
+ Node* check = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1),
+ node, context, node, start);
+ Node* select = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->Select(MachineRepresentation::kTagged), check, node,
+ create);
+ NodeProperties::ReplaceUses(node, select, check, node, node);
+ NodeProperties::ReplaceValueInput(select, node, 1);
+ NodeProperties::ReplaceValueInput(check, node, 0);
+ NodeProperties::ReplaceEffectInput(check, node);
+ receiver = create; // The implicit receiver.
}
- inlinee.InlineAtCall(jsgraph_, call_node);
+ // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
+ // normal {JSCallFunction} node so that the rest of the inlining machinery
+ // behaves as if we were dealing with a regular function invocation.
+ if (node->opcode() == IrOpcode::kJSCallConstruct) {
+ new_target = call.new_target(); // Retrieve new target value input.
+ node->RemoveInput(call.formal_arguments() + 1); // Drop new target.
+ node->InsertInput(jsgraph_->graph()->zone(), 1, receiver);
+ // Insert a construct stub frame into the chain of frame states. This will
+ // reconstruct the proper frame when deoptimizing within the constructor.
+ frame_state = CreateArtificialFrameState(
+ node, frame_state, call.formal_arguments(),
+ FrameStateType::kConstructStub, info.shared_info());
+ }
+
+ // The inlinee specializes to the context from the JSFunction object.
+ // TODO(turbofan): We might want to load the context from the JSFunction at
+ // runtime in case we only know the SharedFunctionInfo once we have dynamic
+ // type feedback in the compiler.
+ Node* context = jsgraph_->Constant(handle(function->context()));
+
+ // Insert a JSConvertReceiver node for sloppy callees. Note that the context
+ // passed into this node has to be the callees context (loaded above). Note
+ // that the frame state passed to the JSConvertReceiver must be the frame
+ // state _before_ the call; it is not necessary to fiddle with the receiver
+ // in that frame state tho, as the conversion of the receiver can be repeated
+ // any number of times, it's not observable.
+ if (node->opcode() == IrOpcode::kJSCallFunction &&
+ is_sloppy(info.language_mode()) && !function->shared()->native()) {
+ const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* convert = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->ConvertReceiver(p.convert_mode()),
+ call.receiver(), context, call.frame_state_before(), effect, start);
+ NodeProperties::ReplaceValueInput(node, convert, 1);
+ NodeProperties::ReplaceEffectInput(node, convert);
+ }
+
+ // Insert argument adaptor frame if required. The callees formal parameter
+ // count (i.e. value outputs of start node minus target, receiver, new target,
+ // arguments count and context) have to match the number of arguments passed
+ // to the call.
+ DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
+ if (call.formal_arguments() != parameter_count) {
+ frame_state = CreateArtificialFrameState(
+ node, frame_state, call.formal_arguments(),
+ FrameStateType::kArgumentsAdaptor, info.shared_info());
+ }
+
+ return InlineCall(node, new_target, context, frame_state, start, end);
}
-
-class JSCallRuntimeAccessor {
- public:
- explicit JSCallRuntimeAccessor(Node* call) : call_(call) {
- DCHECK_EQ(IrOpcode::kJSCallRuntime, call->opcode());
- }
-
- Node* formal_argument(size_t index) {
- DCHECK(index < formal_arguments());
- return call_->InputAt(static_cast<int>(index));
- }
-
- size_t formal_arguments() {
- size_t value_inputs = call_->op()->ValueInputCount();
- return value_inputs;
- }
-
- Node* frame_state() const {
- return NodeProperties::GetFrameStateInput(call_);
- }
- Node* context() const { return NodeProperties::GetContextInput(call_); }
- Node* control() const { return NodeProperties::GetControlInput(call_); }
- Node* effect() const { return NodeProperties::GetEffectInput(call_); }
-
- const Runtime::Function* function() const {
- return Runtime::FunctionForId(CallRuntimeParametersOf(call_->op()).id());
- }
-
- NodeVector inputs(Zone* zone) const {
- NodeVector inputs(zone);
- for (Node* const node : call_->inputs()) {
- inputs.push_back(node);
- }
- return inputs;
- }
-
- private:
- Node* call_;
-};
-
-
-void JSInliner::TryInlineRuntimeCall(Node* call_node) {
- JSCallRuntimeAccessor call(call_node);
- const Runtime::Function* f = call.function();
-
- if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) {
- return;
- }
-
- JSIntrinsicBuilder intrinsic_builder(jsgraph_);
-
- ResultAndEffect r = intrinsic_builder.BuildGraphFor(
- f->function_id, call.inputs(jsgraph_->zone()));
-
- if (r.first != NULL) {
- if (FLAG_trace_turbo_inlining) {
- PrintF("Inlining %s into %s\n", f->name,
- info_->shared_info()->DebugName()->ToCString().get());
- }
- NodeProperties::ReplaceWithValue(call_node, r.first, r.second);
- call_node->RemoveAllInputs();
- DCHECK_EQ(0, call_node->UseCount());
- }
-}
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index eef29d6..99eff96 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -6,37 +6,51 @@
#define V8_COMPILER_JS_INLINING_H_
#include "src/compiler/js-graph.h"
-#include "src/v8.h"
+#include "src/compiler/graph-reducer.h"
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class CompilationInfo;
+
namespace compiler {
-class JSCallFunctionAccessor;
-
-class JSInliner {
+// The JSInliner provides the core graph inlining machinery. Note that this
+// class only deals with the mechanics of how to inline one graph into another,
+// heuristics that decide what and how much to inline are beyond its scope.
+class JSInliner final : public AdvancedReducer {
public:
- JSInliner(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph)
- : local_zone_(local_zone), info_(info), jsgraph_(jsgraph) {}
+ JSInliner(Editor* editor, Zone* local_zone, CompilationInfo* info,
+ JSGraph* jsgraph)
+ : AdvancedReducer(editor),
+ local_zone_(local_zone),
+ info_(info),
+ jsgraph_(jsgraph) {}
- void Inline();
- void TryInlineJSCall(Node* node);
- void TryInlineRuntimeCall(Node* node);
+ // Reducer interface, eagerly inlines everything.
+ Reduction Reduce(Node* node) final;
+
+ // Can be used by inlining heuristics or by testing code directly, without
+ // using the above generic reducer interface of the inlining machinery.
+ Reduction ReduceJSCall(Node* node, Handle<JSFunction> function);
private:
- friend class InlinerVisitor;
Zone* local_zone_;
CompilationInfo* info_;
JSGraph* jsgraph_;
- Node* CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
- Handle<JSFunction> jsfunction,
- Zone* temp_zone);
- void AddClosureToFrameState(Node* frame_state, Handle<JSFunction> jsfunction);
- static void UnifyReturn(Graph* graph);
+ Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
+ int parameter_count,
+ FrameStateType frame_state_type,
+ Handle<SharedFunctionInfo> shared);
+
+ Reduction InlineCall(Node* call, Node* new_target, Node* context,
+ Node* frame_state, Node* start, Node* end);
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_JS_INLINING_H_
diff --git a/src/compiler/js-intrinsic-builder.cc b/src/compiler/js-intrinsic-builder.cc
deleted file mode 100644
index 80b6968..0000000
--- a/src/compiler/js-intrinsic-builder.cc
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/access-builder.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/diamond.h"
-#include "src/compiler/js-intrinsic-builder.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/simplified-operator.h"
-
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-ResultAndEffect JSIntrinsicBuilder::BuildGraphFor(Runtime::FunctionId id,
- const NodeVector& arguments) {
- switch (id) {
- case Runtime::kInlineIsSmi:
- return BuildGraphFor_IsSmi(arguments);
- case Runtime::kInlineIsNonNegativeSmi:
- return BuildGraphFor_IsNonNegativeSmi(arguments);
- case Runtime::kInlineIsArray:
- return BuildMapCheck(arguments[0], arguments[2], JS_ARRAY_TYPE);
- case Runtime::kInlineIsRegExp:
- return BuildMapCheck(arguments[0], arguments[2], JS_REGEXP_TYPE);
- case Runtime::kInlineIsFunction:
- return BuildMapCheck(arguments[0], arguments[2], JS_FUNCTION_TYPE);
- case Runtime::kInlineValueOf:
- return BuildGraphFor_ValueOf(arguments);
- default:
- break;
- }
- return ResultAndEffect();
-}
-
-ResultAndEffect JSIntrinsicBuilder::BuildGraphFor_IsSmi(
- const NodeVector& arguments) {
- Node* object = arguments[0];
- SimplifiedOperatorBuilder simplified(jsgraph_->zone());
- Node* condition = graph()->NewNode(simplified.ObjectIsSmi(), object);
-
- return ResultAndEffect(condition, arguments[2]);
-}
-
-
-ResultAndEffect JSIntrinsicBuilder::BuildGraphFor_IsNonNegativeSmi(
- const NodeVector& arguments) {
- Node* object = arguments[0];
- SimplifiedOperatorBuilder simplified(jsgraph_->zone());
- Node* condition =
- graph()->NewNode(simplified.ObjectIsNonNegativeSmi(), object);
-
- return ResultAndEffect(condition, arguments[2]);
-}
-
-
-/*
- * if (_isSmi(object)) {
- * return false
- * } else {
- * return %_GetMapInstanceType(object) == map_type
- * }
- */
-ResultAndEffect JSIntrinsicBuilder::BuildMapCheck(Node* object, Node* effect,
- InstanceType map_type) {
- SimplifiedOperatorBuilder simplified(jsgraph_->zone());
-
- Node* is_smi = graph()->NewNode(simplified.ObjectIsSmi(), object);
- Diamond d(graph(), common(), is_smi);
-
- Node* map = graph()->NewNode(simplified.LoadField(AccessBuilder::ForMap()),
- object, effect, d.if_false);
-
- Node* instance_type = graph()->NewNode(
- simplified.LoadField(AccessBuilder::ForMapInstanceType()), map, map,
- d.if_false);
-
- Node* has_map_type =
- graph()->NewNode(jsgraph_->machine()->Word32Equal(), instance_type,
- jsgraph_->Int32Constant(map_type));
-
- Node* phi = d.Phi(static_cast<MachineType>(kTypeBool | kRepTagged),
- jsgraph_->FalseConstant(), has_map_type);
-
- Node* ephi = d.EffectPhi(effect, instance_type);
-
- return ResultAndEffect(phi, ephi);
-}
-
-
-/*
- * if (%_isSmi(object)) {
- * return object;
- * } else if (%_GetMapInstanceType(object) == JS_VALUE_TYPE) {
- * return %_LoadValueField(object);
- * } else {
- * return object;
- * }
- */
-ResultAndEffect JSIntrinsicBuilder::BuildGraphFor_ValueOf(
- const NodeVector& arguments) {
- Node* object = arguments[0];
- Node* effect = arguments[2];
- SimplifiedOperatorBuilder simplified(jsgraph_->zone());
-
- Node* is_smi = graph()->NewNode(simplified.ObjectIsSmi(), object);
-
- Diamond if_is_smi(graph(), common(), is_smi);
-
- Node* map = graph()->NewNode(simplified.LoadField(AccessBuilder::ForMap()),
- object, effect, if_is_smi.if_false);
-
- Node* instance_type = graph()->NewNode(
- simplified.LoadField(AccessBuilder::ForMapInstanceType()), map, map,
- if_is_smi.if_false);
-
- Node* is_value =
- graph()->NewNode(jsgraph_->machine()->Word32Equal(), instance_type,
- jsgraph_->Constant(JS_VALUE_TYPE));
-
- Diamond if_is_value(graph(), common(), is_value);
- if_is_value.Nest(if_is_smi, false);
-
- Node* value =
- graph()->NewNode(simplified.LoadField(AccessBuilder::ForValue()), object,
- instance_type, if_is_value.if_true);
-
- Node* phi_is_value = if_is_value.Phi(kTypeAny, value, object);
-
- Node* phi = if_is_smi.Phi(kTypeAny, object, phi_is_value);
-
- Node* ephi = if_is_smi.EffectPhi(effect, instance_type);
-
- return ResultAndEffect(phi, ephi);
-}
-}
-}
-} // namespace v8::internal::compiler
diff --git a/src/compiler/js-intrinsic-builder.h b/src/compiler/js-intrinsic-builder.h
deleted file mode 100644
index 9336be6..0000000
--- a/src/compiler/js-intrinsic-builder.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_INTRINSIC_BUILDER_H_
-#define V8_COMPILER_JS_INTRINSIC_BUILDER_H_
-
-#include "src/compiler/js-graph.h"
-#include "src/v8.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-typedef std::pair<Node*, Node*> ResultAndEffect;
-
-class JSIntrinsicBuilder {
- public:
- explicit JSIntrinsicBuilder(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
-
- ResultAndEffect BuildGraphFor(Runtime::FunctionId id,
- const NodeVector& arguments);
-
- private:
- ResultAndEffect BuildMapCheck(Node* object, Node* effect,
- InstanceType map_type);
- ResultAndEffect BuildGraphFor_IsSmi(const NodeVector& arguments);
- ResultAndEffect BuildGraphFor_IsNonNegativeSmi(const NodeVector& arguments);
- ResultAndEffect BuildGraphFor_ValueOf(const NodeVector& arguments);
-
-
- Graph* graph() const { return jsgraph_->graph(); }
- CommonOperatorBuilder* common() const { return jsgraph_->common(); }
- JSGraph* jsgraph_;
-};
-}
-}
-} // namespace v8::internal::compiler
-
-#endif // V8_COMPILER_JS_INTRINSIC_BUILDER_H_
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
new file mode 100644
index 0000000..ca5cb93
--- /dev/null
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -0,0 +1,702 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-intrinsic-lowering.h"
+
+#include <stack>
+
+#include "src/code-factory.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
+#include "src/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
+ DeoptimizationMode mode)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ mode_(mode),
+ type_cache_(TypeCache::Get()) {}
+
+
+Reduction JSIntrinsicLowering::Reduce(Node* node) {
+ if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange();
+ const Runtime::Function* const f =
+ Runtime::FunctionForId(CallRuntimeParametersOf(node->op()).id());
+ if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) return NoChange();
+ switch (f->function_id) {
+ case Runtime::kInlineConstructDouble:
+ return ReduceConstructDouble(node);
+ case Runtime::kInlineCreateIterResultObject:
+ return ReduceCreateIterResultObject(node);
+ case Runtime::kInlineDeoptimizeNow:
+ return ReduceDeoptimizeNow(node);
+ case Runtime::kInlineDoubleHi:
+ return ReduceDoubleHi(node);
+ case Runtime::kInlineDoubleLo:
+ return ReduceDoubleLo(node);
+ case Runtime::kInlineIncrementStatsCounter:
+ return ReduceIncrementStatsCounter(node);
+ case Runtime::kInlineIsArray:
+ return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
+ case Runtime::kInlineIsDate:
+ return ReduceIsInstanceType(node, JS_DATE_TYPE);
+ case Runtime::kInlineIsTypedArray:
+ return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
+ case Runtime::kInlineIsFunction:
+ return ReduceIsFunction(node);
+ case Runtime::kInlineIsRegExp:
+ return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
+ case Runtime::kInlineIsJSReceiver:
+ return ReduceIsJSReceiver(node);
+ case Runtime::kInlineIsSmi:
+ return ReduceIsSmi(node);
+ case Runtime::kInlineJSValueGetValue:
+ return ReduceJSValueGetValue(node);
+ case Runtime::kInlineMathClz32:
+ return ReduceMathClz32(node);
+ case Runtime::kInlineMathFloor:
+ return ReduceMathFloor(node);
+ case Runtime::kInlineMathSqrt:
+ return ReduceMathSqrt(node);
+ case Runtime::kInlineValueOf:
+ return ReduceValueOf(node);
+ case Runtime::kInlineIsMinusZero:
+ return ReduceIsMinusZero(node);
+ case Runtime::kInlineFixedArrayGet:
+ return ReduceFixedArrayGet(node);
+ case Runtime::kInlineFixedArraySet:
+ return ReduceFixedArraySet(node);
+ case Runtime::kInlineRegExpConstructResult:
+ return ReduceRegExpConstructResult(node);
+ case Runtime::kInlineRegExpExec:
+ return ReduceRegExpExec(node);
+ case Runtime::kInlineRegExpFlags:
+ return ReduceRegExpFlags(node);
+ case Runtime::kInlineRegExpSource:
+ return ReduceRegExpSource(node);
+ case Runtime::kInlineSubString:
+ return ReduceSubString(node);
+ case Runtime::kInlineToInteger:
+ return ReduceToInteger(node);
+ case Runtime::kInlineToLength:
+ return ReduceToLength(node);
+ case Runtime::kInlineToName:
+ return ReduceToName(node);
+ case Runtime::kInlineToNumber:
+ return ReduceToNumber(node);
+ case Runtime::kInlineToObject:
+ return ReduceToObject(node);
+ case Runtime::kInlineToPrimitive:
+ return ReduceToPrimitive(node);
+ case Runtime::kInlineToString:
+ return ReduceToString(node);
+ case Runtime::kInlineCall:
+ return ReduceCall(node);
+ case Runtime::kInlineTailCall:
+ return ReduceTailCall(node);
+ case Runtime::kInlineGetSuperConstructor:
+ return ReduceGetSuperConstructor(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSIntrinsicLowering::ReduceCreateIterResultObject(Node* node) {
+ Node* const value = NodeProperties::GetValueInput(node, 0);
+ Node* const done = NodeProperties::GetValueInput(node, 1);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ return Change(node, javascript()->CreateIterResultObject(), value, done,
+ context, effect);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceConstructDouble(Node* node) {
+ Node* high = NodeProperties::GetValueInput(node, 0);
+ Node* low = NodeProperties::GetValueInput(node, 1);
+ Node* value =
+ graph()->NewNode(machine()->Float64InsertHighWord32(),
+ graph()->NewNode(machine()->Float64InsertLowWord32(),
+ jsgraph()->Constant(0), low),
+ high);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
+ if (mode() != kDeoptimizationEnabled) return NoChange();
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+
+ // TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, control);
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceDoubleHi(Node* node) {
+ return Change(node, machine()->Float64ExtractHighWord32());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
+ return Change(node, machine()->Float64ExtractLowWord32());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceIncrementStatsCounter(Node* node) {
+ if (!FLAG_native_code_counters) return ChangeToUndefined(node);
+ HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
+ if (!m.HasValue() || !m.Value()->IsString()) {
+ return ChangeToUndefined(node);
+ }
+ base::SmartArrayPointer<char> name =
+ Handle<String>::cast(m.Value())->ToCString();
+ StatsCounter counter(jsgraph()->isolate(), name.get());
+ if (!counter.Enabled()) return ChangeToUndefined(node);
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ FieldAccess access = AccessBuilder::ForStatsCounter();
+ Node* cnt = jsgraph()->ExternalConstant(ExternalReference(&counter));
+ Node* load =
+ graph()->NewNode(simplified()->LoadField(access), cnt, effect, control);
+ Node* inc =
+ graph()->NewNode(machine()->Int32Add(), load, jsgraph()->OneConstant());
+ Node* store = graph()->NewNode(simplified()->StoreField(access), cnt, inc,
+ load, control);
+ return ChangeToUndefined(node, store);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceIsInstanceType(
+ Node* node, InstanceType instance_type) {
+ // if (%_IsSmi(value)) {
+ // return false;
+ // } else {
+ // return %_GetInstanceType(%_GetMap(value)) == instance_type;
+ // }
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->FalseConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), value,
+ effect, if_false),
+ effect, if_false);
+ Node* vfalse = graph()->NewNode(machine()->Word32Equal(), efalse,
+ jsgraph()->Int32Constant(instance_type));
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+
+ // Replace all effect uses of {node} with the {ephi}.
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
+ ReplaceWithValue(node, node, ephi);
+
+ // Turn the {node} into a Phi.
+ return Change(node, common()->Phi(MachineRepresentation::kTagged, 2), vtrue,
+ vfalse, merge);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceIsFunction(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (value_type->Is(Type::Function())) {
+ value = jsgraph()->TrueConstant();
+ } else {
+ // if (%_IsSmi(value)) {
+ // return false;
+ // } else {
+ // return FIRST_FUNCTION_TYPE <= %_GetInstanceType(%_GetMap(value))
+ // }
+ STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->FalseConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, if_false),
+ effect, if_false);
+ Node* vfalse =
+ graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Int32Constant(FIRST_FUNCTION_TYPE), efalse);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+ ReplaceWithValue(node, node, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceIsJSReceiver(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (value_type->Is(Type::Receiver())) {
+ value = jsgraph()->TrueConstant();
+ } else if (!value_type->Maybe(Type::Receiver())) {
+ value = jsgraph()->FalseConstant();
+ } else {
+ // if (%_IsSmi(value)) {
+ // return false;
+ // } else {
+ // return FIRST_JS_RECEIVER_TYPE <= %_GetInstanceType(%_GetMap(value))
+ // }
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->FalseConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, if_false),
+ effect, if_false);
+ Node* vfalse = graph()->NewNode(
+ machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Int32Constant(FIRST_JS_RECEIVER_TYPE), efalse);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+ ReplaceWithValue(node, node, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
+ return Change(node, simplified()->ObjectIsSmi());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceJSValueGetValue(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ return Change(node, simplified()->LoadField(AccessBuilder::ForValue()), value,
+ effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceMathClz32(Node* node) {
+ return Change(node, machine()->Word32Clz());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceMathFloor(Node* node) {
+ if (!machine()->Float64RoundDown().IsSupported()) return NoChange();
+ return Change(node, machine()->Float64RoundDown().op());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceMathSqrt(Node* node) {
+ return Change(node, machine()->Float64Sqrt());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
+ // if (%_IsSmi(value)) {
+ // return value;
+ // } else if (%_GetInstanceType(%_GetMap(value)) == JS_VALUE_TYPE) {
+ // return %_GetValue(value);
+ // } else {
+ // return value;
+ // }
+ const Operator* const merge_op = common()->Merge(2);
+ const Operator* const ephi_op = common()->EffectPhi(2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kTagged, 2);
+
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = value;
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0;
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, if_false0),
+ effect, if_false0),
+ jsgraph()->Int32Constant(JS_VALUE_TYPE));
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForValue()),
+ value, effect, if_true1);
+ Node* vtrue1 = etrue1;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = effect;
+ Node* vfalse1 = value;
+
+ Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
+ efalse0 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
+ vfalse0 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
+ }
+
+ Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+
+ // Replace all effect uses of {node} with the {ephi0}.
+ Node* ephi0 = graph()->NewNode(ephi_op, etrue0, efalse0, merge0);
+ ReplaceWithValue(node, node, ephi0);
+
+ // Turn the {node} into a Phi.
+ return Change(node, phi_op, vtrue0, vfalse0, merge0);
+}
+
+
+Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
+ // Replace all effect uses of {node} with the effect dependency.
+ RelaxEffectsAndControls(node);
+ // Remove the inputs corresponding to context, effect and control.
+ NodeProperties::RemoveNonValueInputs(node);
+ // Finally update the operator to the new one.
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceIsMinusZero(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ Node* double_lo =
+ graph()->NewNode(machine()->Float64ExtractLowWord32(), value);
+ Node* check1 = graph()->NewNode(machine()->Word32Equal(), double_lo,
+ jsgraph()->ZeroConstant());
+
+ Node* double_hi =
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value);
+ Node* check2 = graph()->NewNode(
+ machine()->Word32Equal(), double_hi,
+ jsgraph()->Int32Constant(static_cast<int32_t>(0x80000000)));
+
+ ReplaceWithValue(node, node, effect);
+
+ Node* and_result = graph()->NewNode(machine()->Word32And(), check1, check2);
+
+ return Change(node, machine()->Word32Equal(), and_result,
+ jsgraph()->Int32Constant(1));
+}
+
+
+Reduction JSIntrinsicLowering::ReduceFixedArrayGet(Node* node) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ return Change(
+ node, simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ base, index, effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceFixedArraySet(Node* node) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* store = (graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()), base,
+ index, value, effect, control));
+ ReplaceWithValue(node, value, store);
+ return Changed(store);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceRegExpConstructResult(Node* node) {
+ // TODO(bmeurer): Introduce JSCreateRegExpResult?
+ return Change(node, CodeFactory::RegExpConstructResult(isolate()), 0);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceRegExpExec(Node* node) {
+ return Change(node, CodeFactory::RegExpExec(isolate()), 4);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceRegExpFlags(Node* node) {
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForJSRegExpFlags());
+ return Change(node, op, receiver, effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceRegExpSource(Node* node) {
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForJSRegExpSource());
+ return Change(node, op, receiver, effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
+ return Change(node, CodeFactory::SubString(isolate()), 3);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToName(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->ToName());
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToNumber(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->ToNumber());
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
+ if (value_type->Max() <= 0.0) {
+ value = jsgraph()->ZeroConstant();
+ } else if (value_type->Min() >= kMaxSafeInteger) {
+ value = jsgraph()->Constant(kMaxSafeInteger);
+ } else {
+ if (value_type->Min() <= 0.0) {
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), value,
+ jsgraph()->ZeroConstant()),
+ jsgraph()->ZeroConstant(), value);
+ value_type = Type::Range(0.0, value_type->Max(), graph()->zone());
+ NodeProperties::SetType(value, value_type);
+ }
+ if (value_type->Max() > kMaxSafeInteger) {
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->Constant(kMaxSafeInteger), value),
+ jsgraph()->Constant(kMaxSafeInteger), value);
+ value_type =
+ Type::Range(value_type->Min(), kMaxSafeInteger, graph()->zone());
+ NodeProperties::SetType(value, value_type);
+ }
+ }
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ return Change(node, CodeFactory::ToLength(isolate()), 0);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->ToObject());
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToPrimitive(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ if (value_type->Is(Type::Primitive())) {
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->ToString());
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
+ size_t const arity = CallRuntimeParametersOf(node->op()).arity();
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
+ ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow));
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceTailCall(Node* node) {
+ size_t const arity = CallRuntimeParametersOf(node->op()).arity();
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
+ ConvertReceiverMode::kAny,
+ TailCallMode::kAllow));
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
+ Node* active_function = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* active_function_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ active_function, effect, control);
+ return Change(node, simplified()->LoadField(AccessBuilder::ForMapPrototype()),
+ active_function_map, effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
+ Node* b) {
+ RelaxControls(node);
+ node->ReplaceInput(0, a);
+ node->ReplaceInput(1, b);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
+ Node* b, Node* c) {
+ RelaxControls(node);
+ node->ReplaceInput(0, a);
+ node->ReplaceInput(1, b);
+ node->ReplaceInput(2, c);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
+ Node* b, Node* c, Node* d) {
+ RelaxControls(node);
+ node->ReplaceInput(0, a);
+ node->ReplaceInput(1, b);
+ node->ReplaceInput(2, c);
+ node->ReplaceInput(3, d);
+ node->TrimInputCount(4);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ChangeToUndefined(Node* node, Node* effect) {
+ ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
+ int stack_parameter_count) {
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), stack_parameter_count,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+}
+
+
+Graph* JSIntrinsicLowering::graph() const { return jsgraph()->graph(); }
+
+
+Isolate* JSIntrinsicLowering::isolate() const { return jsgraph()->isolate(); }
+
+
+CommonOperatorBuilder* JSIntrinsicLowering::common() const {
+ return jsgraph()->common();
+}
+
+JSOperatorBuilder* JSIntrinsicLowering::javascript() const {
+ return jsgraph_->javascript();
+}
+
+
+MachineOperatorBuilder* JSIntrinsicLowering::machine() const {
+ return jsgraph()->machine();
+}
+
+
+SimplifiedOperatorBuilder* JSIntrinsicLowering::simplified() const {
+ return jsgraph()->simplified();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
new file mode 100644
index 0000000..1977a58
--- /dev/null
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -0,0 +1,103 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_INTRINSIC_LOWERING_H_
+#define V8_COMPILER_JS_INTRINSIC_LOWERING_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Callable;
+class TypeCache;
+
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSOperatorBuilder;
+class JSGraph;
+class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
+
+
+// Lowers certain JS-level runtime calls.
+class JSIntrinsicLowering final : public AdvancedReducer {
+ public:
+ enum DeoptimizationMode { kDeoptimizationEnabled, kDeoptimizationDisabled };
+
+ JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
+ DeoptimizationMode mode);
+ ~JSIntrinsicLowering() final {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceConstructDouble(Node* node);
+ Reduction ReduceCreateIterResultObject(Node* node);
+ Reduction ReduceDeoptimizeNow(Node* node);
+ Reduction ReduceDoubleHi(Node* node);
+ Reduction ReduceDoubleLo(Node* node);
+ Reduction ReduceIncrementStatsCounter(Node* node);
+ Reduction ReduceIsMinusZero(Node* node);
+ Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
+ Reduction ReduceIsFunction(Node* node);
+ Reduction ReduceIsJSReceiver(Node* node);
+ Reduction ReduceIsSmi(Node* node);
+ Reduction ReduceJSValueGetValue(Node* node);
+ Reduction ReduceMathClz32(Node* node);
+ Reduction ReduceMathFloor(Node* node);
+ Reduction ReduceMathSqrt(Node* node);
+ Reduction ReduceValueOf(Node* node);
+ Reduction ReduceFixedArrayGet(Node* node);
+ Reduction ReduceFixedArraySet(Node* node);
+ Reduction ReduceRegExpConstructResult(Node* node);
+ Reduction ReduceRegExpExec(Node* node);
+ Reduction ReduceRegExpFlags(Node* node);
+ Reduction ReduceRegExpSource(Node* node);
+ Reduction ReduceSubString(Node* node);
+ Reduction ReduceToInteger(Node* node);
+ Reduction ReduceToLength(Node* node);
+ Reduction ReduceToName(Node* node);
+ Reduction ReduceToNumber(Node* node);
+ Reduction ReduceToObject(Node* node);
+ Reduction ReduceToPrimitive(Node* node);
+ Reduction ReduceToString(Node* node);
+ Reduction ReduceCall(Node* node);
+ Reduction ReduceTailCall(Node* node);
+ Reduction ReduceGetSuperConstructor(Node* node);
+
+ Reduction Change(Node* node, const Operator* op);
+ Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
+ Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
+ Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c,
+ Node* d);
+ Reduction ChangeToUndefined(Node* node, Node* effect = nullptr);
+ Reduction Change(Node* node, Callable const& callable,
+ int stack_parameter_count);
+
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+ MachineOperatorBuilder* machine() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ DeoptimizationMode mode() const { return mode_; }
+ TypeCache const& type_cache() const { return type_cache_; }
+
+ JSGraph* const jsgraph_;
+ DeoptimizationMode const mode_;
+ TypeCache const& type_cache_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_INTRINSIC_LOWERING_H_
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
new file mode 100644
index 0000000..06cf770
--- /dev/null
+++ b/src/compiler/js-native-context-specialization.cc
@@ -0,0 +1,1033 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-native-context-specialization.h"
+
+#include "src/accessors.h"
+#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/access-info.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/field-index-inl.h"
+#include "src/isolate-inl.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-cache.h"
+#include "src/type-feedback-vector.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+JSNativeContextSpecialization::JSNativeContextSpecialization(
+ Editor* editor, JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context, CompilationDependencies* dependencies,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ flags_(flags),
+ native_context_(native_context),
+ dependencies_(dependencies),
+ zone_(zone),
+ type_cache_(TypeCache::Get()) {}
+
+
+Reduction JSNativeContextSpecialization::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSLoadNamed:
+ return ReduceJSLoadNamed(node);
+ case IrOpcode::kJSStoreNamed:
+ return ReduceJSStoreNamed(node);
+ case IrOpcode::kJSLoadProperty:
+ return ReduceJSLoadProperty(node);
+ case IrOpcode::kJSStoreProperty:
+ return ReduceJSStoreProperty(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceNamedAccess(
+ Node* node, Node* value, MapHandleList const& receiver_maps,
+ Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
+ Node* index) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
+ node->opcode() == IrOpcode::kJSStoreNamed ||
+ node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSStoreProperty);
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Retrieve the native context from the given {node}.
+ Handle<Context> native_context;
+ if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+
+ // Compute property access infos for the receiver maps.
+ AccessInfoFactory access_info_factory(dependencies(), native_context,
+ graph()->zone());
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ if (!access_info_factory.ComputePropertyAccessInfos(
+ receiver_maps, name, access_mode, &access_infos)) {
+ return NoChange();
+ }
+
+ // Nothing to do if we have no non-deprecated maps.
+ if (access_infos.empty()) return NoChange();
+
+ // The final states for every polymorphic branch. We join them with
+ // Merge++Phi+EffectPhi at the bottom.
+ ZoneVector<Node*> values(zone());
+ ZoneVector<Node*> effects(zone());
+ ZoneVector<Node*> controls(zone());
+
+ // The list of "exiting" controls, which currently go to a single deoptimize.
+ // TODO(bmeurer): Consider using an IC as fallback.
+ Node* const exit_effect = effect;
+ ZoneVector<Node*> exit_controls(zone());
+
+ // Ensure that {index} matches the specified {name} (if {index} is given).
+ if (index != nullptr) {
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Name()),
+ index, jsgraph()->HeapConstant(name));
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+
+ // Ensure that {receiver} is a heap object.
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+ Node* receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+ Node* receiverissmi_effect = effect;
+
+ // Load the {receiver} map. The resulting effect is the dominating effect for
+ // all (polymorphic) branches.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+
+ // Generate code for the various different property access patterns.
+ Node* fallthrough_control = control;
+ for (PropertyAccessInfo const& access_info : access_infos) {
+ Node* this_value = value;
+ Node* this_receiver = receiver;
+ Node* this_effect = effect;
+ Node* this_control;
+
+ // Perform map check on {receiver}.
+ Type* receiver_type = access_info.receiver_type();
+ if (receiver_type->Is(Type::String())) {
+ // Emit an instance type check for strings.
+ Node* receiver_instance_type = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ receiver_map, this_effect, fallthrough_control);
+ Node* check =
+ graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
+ jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ } else {
+ // Emit a (sequence of) map checks for other {receiver}s.
+ ZoneVector<Node*> this_controls(zone());
+ ZoneVector<Node*> this_effects(zone());
+ for (auto i = access_info.receiver_type()->Classes(); !i.Done();
+ i.Advance()) {
+ Handle<Map> map = i.Current();
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
+ receiver_map, jsgraph()->Constant(map));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(this_effect);
+ }
+
+ // The Number case requires special treatment to also deal with Smis.
+ if (receiver_type->Is(Type::Number())) {
+ // Join this check with the "receiver is smi" check above, and mark the
+ // "receiver is smi" check as "consumed" so that we don't deoptimize if
+ // the {receiver} is actually a Smi.
+ if (receiverissmi_control != nullptr) {
+ this_controls.push_back(receiverissmi_control);
+ this_effects.push_back(receiverissmi_effect);
+ receiverissmi_control = receiverissmi_effect = nullptr;
+ }
+ }
+
+ // Create dominating Merge+EffectPhi for this {receiver} type.
+ int const this_control_count = static_cast<int>(this_controls.size());
+ this_control =
+ (this_control_count == 1)
+ ? this_controls.front()
+ : graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count, &this_controls.front());
+ this_effects.push_back(this_control);
+ int const this_effect_count = static_cast<int>(this_effects.size());
+ this_effect =
+ (this_control_count == 1)
+ ? this_effects.front()
+ : graph()->NewNode(common()->EffectPhi(this_control_count),
+ this_effect_count, &this_effects.front());
+ }
+
+ // Determine actual holder and perform prototype chain checks.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ AssumePrototypesStable(receiver_type, native_context, holder);
+ }
+
+ // Generate the actual property access.
+ if (access_info.IsNotFound()) {
+ DCHECK_EQ(AccessMode::kLoad, access_mode);
+ if (is_strong(language_mode)) {
+ // TODO(bmeurer/mstarzinger): Add support for lowering inside try
+ // blocks rewiring the IfException edge to a runtime call/throw.
+ exit_controls.push_back(this_control);
+ continue;
+ } else {
+ this_value = jsgraph()->UndefinedConstant();
+ }
+ } else if (access_info.IsDataConstant()) {
+ this_value = jsgraph()->Constant(access_info.constant());
+ if (access_mode == AccessMode::kStore) {
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Tagged()), value, this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+ } else {
+ DCHECK(access_info.IsDataField());
+ FieldIndex const field_index = access_info.field_index();
+ FieldCheck const field_check = access_info.field_check();
+ Type* const field_type = access_info.field_type();
+ switch (field_check) {
+ case FieldCheck::kNone:
+ break;
+ case FieldCheck::kJSArrayBufferViewBufferNotNeutered: {
+ Node* this_buffer = this_effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewBuffer()),
+ this_receiver, this_effect, this_control);
+ Node* this_buffer_bit_field = this_effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferBitField()),
+ this_buffer, this_effect, this_control);
+ Node* check = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), this_buffer_bit_field,
+ jsgraph()->Int32Constant(
+ 1 << JSArrayBuffer::WasNeutered::kShift)),
+ jsgraph()->Int32Constant(0));
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ break;
+ }
+ }
+ if (access_mode == AccessMode::kLoad &&
+ access_info.holder().ToHandle(&holder)) {
+ this_receiver = jsgraph()->Constant(holder);
+ }
+ Node* this_storage = this_receiver;
+ if (!field_index.is_inobject()) {
+ this_storage = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
+ this_storage, this_effect, this_control);
+ }
+ FieldAccess field_access = {kTaggedBase, field_index.offset(), name,
+ field_type, MachineType::AnyTagged()};
+ if (access_mode == AccessMode::kLoad) {
+ if (field_type->Is(Type::UntaggedFloat64())) {
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ this_storage = this_effect =
+ graph()->NewNode(simplified()->LoadField(field_access),
+ this_storage, this_effect, this_control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ }
+ field_access.machine_type = MachineType::Float64();
+ }
+ this_value = this_effect =
+ graph()->NewNode(simplified()->LoadField(field_access),
+ this_storage, this_effect, this_control);
+ } else {
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+ if (field_type->Is(Type::UntaggedFloat64())) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(
+ graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(Type::Number()),
+ this_value, this_control);
+
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ if (access_info.HasTransitionMap()) {
+ // Allocate a MutableHeapNumber for the new property.
+ Callable callable =
+ CodeFactory::AllocateMutableHeapNumber(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ Node* this_box = this_effect = graph()->NewNode(
+ common()->Call(desc),
+ jsgraph()->HeapConstant(callable.code()),
+ jsgraph()->NoContextConstant(), this_effect, this_control);
+ this_effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+ this_box, this_value, this_effect, this_control);
+ this_value = this_box;
+
+ field_access.type = Type::TaggedPointer();
+ } else {
+ // We just store directly to the MutableHeapNumber.
+ this_storage = this_effect =
+ graph()->NewNode(simplified()->LoadField(field_access),
+ this_storage, this_effect, this_control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ field_access.machine_type = MachineType::Float64();
+ }
+ } else {
+ // Unboxed double field, we store directly to the field.
+ field_access.machine_type = MachineType::Float64();
+ }
+ } else if (field_type->Is(Type::TaggedSigned())) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(
+ graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
+ this_value, this_control);
+ } else if (field_type->Is(Type::TaggedPointer())) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ if (field_type->NumClasses() > 0) {
+ // Emit a (sequence of) map checks for the value.
+ ZoneVector<Node*> this_controls(zone());
+ Node* this_value_map = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), this_value,
+ this_effect, this_control);
+ for (auto i = field_type->Classes(); !i.Done(); i.Advance()) {
+ Handle<Map> field_map(i.Current());
+ check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Internal()),
+ this_value_map, jsgraph()->Constant(field_map));
+ branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_controls.push_back(
+ graph()->NewNode(common()->IfTrue(), branch));
+ }
+ exit_controls.push_back(this_control);
+ int const this_control_count =
+ static_cast<int>(this_controls.size());
+ this_control =
+ (this_control_count == 1)
+ ? this_controls.front()
+ : graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count,
+ &this_controls.front());
+ }
+ } else {
+ DCHECK(field_type->Is(Type::Tagged()));
+ }
+ Handle<Map> transition_map;
+ if (access_info.transition_map().ToHandle(&transition_map)) {
+ this_effect = graph()->NewNode(common()->BeginRegion(), this_effect);
+ this_effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), this_receiver,
+ jsgraph()->Constant(transition_map), this_effect, this_control);
+ }
+ this_effect = graph()->NewNode(simplified()->StoreField(field_access),
+ this_storage, this_value, this_effect,
+ this_control);
+ if (access_info.HasTransitionMap()) {
+ this_effect =
+ graph()->NewNode(common()->FinishRegion(),
+ jsgraph()->UndefinedConstant(), this_effect);
+ }
+ }
+ }
+
+ // Remember the final state for this property access.
+ values.push_back(this_value);
+ effects.push_back(this_effect);
+ controls.push_back(this_control);
+ }
+
+ // Collect the fallthrough control as final "exit" control.
+ if (fallthrough_control != control) {
+ // Mark the last fallthrough branch as deferred.
+ MarkAsDeferred(fallthrough_control);
+ }
+ exit_controls.push_back(fallthrough_control);
+
+ // Also collect the "receiver is smi" control if we didn't handle the case of
+ // Number primitives in the polymorphic branches above.
+ if (receiverissmi_control != nullptr) {
+ // Mark the "receiver is smi" case as deferred.
+ MarkAsDeferred(receiverissmi_control);
+ DCHECK_EQ(exit_effect, receiverissmi_effect);
+ exit_controls.push_back(receiverissmi_control);
+ }
+
+ // Generate the single "exit" point, where we get if either all map/instance
+ // type checks failed, or one of the assumptions inside one of the cases
+ // failes (i.e. failing prototype chain check).
+ // TODO(bmeurer): Consider falling back to IC here if deoptimization is
+ // disabled.
+ int const exit_control_count = static_cast<int>(exit_controls.size());
+ Node* exit_control =
+ (exit_control_count == 1)
+ ? exit_controls.front()
+ : graph()->NewNode(common()->Merge(exit_control_count),
+ exit_control_count, &exit_controls.front());
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, exit_effect, exit_control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+
+ // Generate the final merge point for all (polymorphic) branches.
+ int const control_count = static_cast<int>(controls.size());
+ if (control_count == 0) {
+ value = effect = control = jsgraph()->Dead();
+ } else if (control_count == 1) {
+ value = values.front();
+ effect = effects.front();
+ control = controls.front();
+ } else {
+ control = graph()->NewNode(common()->Merge(control_count), control_count,
+ &controls.front());
+ values.push_back(control);
+ value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, control_count),
+ control_count + 1, &values.front());
+ effects.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(control_count),
+ control_count + 1, &effects.front());
+ }
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
+ NamedAccess const& p = NamedAccessOf(node->op());
+ Node* const value = jsgraph()->Dead();
+
+ // Extract receiver maps from the LOAD_IC using the LoadICNexus.
+ MapHandleList receiver_maps;
+ if (!p.feedback().IsValid()) return NoChange();
+ LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+ DCHECK_LT(0, receiver_maps.length());
+
+ // Try to lower the named access based on the {receiver_maps}.
+ return ReduceNamedAccess(node, value, receiver_maps, p.name(),
+ AccessMode::kLoad, p.language_mode());
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode());
+ NamedAccess const& p = NamedAccessOf(node->op());
+ Node* const value = NodeProperties::GetValueInput(node, 1);
+
+ // Extract receiver maps from the STORE_IC using the StoreICNexus.
+ MapHandleList receiver_maps;
+ if (!p.feedback().IsValid()) return NoChange();
+ StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+ DCHECK_LT(0, receiver_maps.length());
+
+ // Try to lower the named access based on the {receiver_maps}.
+ return ReduceNamedAccess(node, value, receiver_maps, p.name(),
+ AccessMode::kStore, p.language_mode());
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceElementAccess(
+ Node* node, Node* index, Node* value, MapHandleList const& receiver_maps,
+ AccessMode access_mode, LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSStoreProperty);
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // TODO(bmeurer): Add support for non-standard stores.
+ if (store_mode != STANDARD_STORE) return NoChange();
+
+ // Retrieve the native context from the given {node}.
+ Handle<Context> native_context;
+ if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+
+ // Compute element access infos for the receiver maps.
+ AccessInfoFactory access_info_factory(dependencies(), native_context,
+ graph()->zone());
+ ZoneVector<ElementAccessInfo> access_infos(zone());
+ if (!access_info_factory.ComputeElementAccessInfos(receiver_maps, access_mode,
+ &access_infos)) {
+ return NoChange();
+ }
+
+ // Nothing to do if we have no non-deprecated maps.
+ if (access_infos.empty()) return NoChange();
+
+ // The final states for every polymorphic branch. We join them with
+ // Merge+Phi+EffectPhi at the bottom.
+ ZoneVector<Node*> values(zone());
+ ZoneVector<Node*> effects(zone());
+ ZoneVector<Node*> controls(zone());
+
+ // The list of "exiting" controls, which currently go to a single deoptimize.
+ // TODO(bmeurer): Consider using an IC as fallback.
+ Node* const exit_effect = effect;
+ ZoneVector<Node*> exit_controls(zone());
+
+ // Ensure that {receiver} is a heap object.
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ control = graph()->NewNode(common()->IfFalse(), branch);
+
+ // Load the {receiver} map. The resulting effect is the dominating effect for
+ // all (polymorphic) branches.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+
+ // Generate code for the various different element access patterns.
+ Node* fallthrough_control = control;
+ for (ElementAccessInfo const& access_info : access_infos) {
+ Node* this_receiver = receiver;
+ Node* this_value = value;
+ Node* this_index = index;
+ Node* this_effect;
+ Node* this_control;
+
+ // Perform map check on {receiver}.
+ Type* receiver_type = access_info.receiver_type();
+ bool receiver_is_jsarray = true;
+ {
+ ZoneVector<Node*> this_controls(zone());
+ ZoneVector<Node*> this_effects(zone());
+ for (auto i = access_info.receiver_type()->Classes(); !i.Done();
+ i.Advance()) {
+ Handle<Map> map = i.Current();
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
+ receiver_map, jsgraph()->Constant(map));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(effect);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ if (!map->IsJSArrayMap()) receiver_is_jsarray = false;
+ }
+
+ // Generate possible elements kind transitions.
+ for (auto transition : access_info.transitions()) {
+ Handle<Map> transition_source = transition.first;
+ Handle<Map> transition_target = transition.second;
+
+ // Check if {receiver} has the specified {transition_source} map.
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Any()), receiver_map,
+ jsgraph()->HeapConstant(transition_source));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+
+ // Migrate {receiver} from {transition_source} to {transition_target}.
+ Node* transition_control = graph()->NewNode(common()->IfTrue(), branch);
+ Node* transition_effect = effect;
+ if (IsSimpleMapChangeTransition(transition_source->elements_kind(),
+ transition_target->elements_kind())) {
+ // In-place migration, just store the {transition_target} map.
+ transition_effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), receiver,
+ jsgraph()->HeapConstant(transition_target), transition_effect,
+ transition_control);
+ } else {
+ // Instance migration, let the stub deal with the {receiver}.
+ TransitionElementsKindStub stub(isolate(),
+ transition_source->elements_kind(),
+ transition_target->elements_kind(),
+ transition_source->IsJSArrayMap());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ transition_effect = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(stub.GetCode()),
+ receiver, jsgraph()->HeapConstant(transition_target), context,
+ frame_state, transition_effect, transition_control);
+ }
+ this_controls.push_back(transition_control);
+ this_effects.push_back(transition_effect);
+
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ }
+
+ // Create single chokepoint for the control.
+ int const this_control_count = static_cast<int>(this_controls.size());
+ if (this_control_count == 1) {
+ this_control = this_controls.front();
+ this_effect = this_effects.front();
+ } else {
+ this_control =
+ graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count, &this_controls.front());
+ this_effects.push_back(this_control);
+ this_effect =
+ graph()->NewNode(common()->EffectPhi(this_control_count),
+ this_control_count + 1, &this_effects.front());
+ }
+ }
+
+ // Certain stores need a prototype chain check because shape changes
+ // could allow callbacks on elements in the prototype chain that are
+ // not compatible with (monomorphic) keyed stores.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ AssumePrototypesStable(receiver_type, native_context, holder);
+ }
+
+ // Check that the {index} is actually a Number.
+ if (!NumberMatcher(this_index).HasValue()) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_index = graph()->NewNode(common()->Guard(Type::Number()), this_index,
+ this_control);
+ }
+
+ // Convert the {index} to an unsigned32 value and check if the result is
+ // equal to the original {index}.
+ if (!NumberMatcher(this_index).IsInRange(0.0, kMaxUInt32)) {
+ Node* this_index32 =
+ graph()->NewNode(simplified()->NumberToUint32(), this_index);
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), this_index32,
+ this_index);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_index = this_index32;
+ }
+
+ // TODO(bmeurer): We currently specialize based on elements kind. We should
+ // also be able to properly support strings and other JSObjects here.
+ ElementsKind elements_kind = access_info.elements_kind();
+
+ // Load the elements for the {receiver}.
+ Node* this_elements = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ this_receiver, this_effect, this_control);
+
+ // Don't try to store to a copy-on-write backing store.
+ if (access_mode == AccessMode::kStore &&
+ IsFastSmiOrObjectElementsKind(elements_kind)) {
+ Node* this_elements_map = this_effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ this_elements, this_effect, this_control);
+ check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Any()), this_elements_map,
+ jsgraph()->HeapConstant(factory()->fixed_array_map()));
+ branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
+ this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+
+ // Load the length of the {receiver}.
+ Node* this_length = this_effect =
+ receiver_is_jsarray
+ ? graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayLength(elements_kind)),
+ this_receiver, this_effect, this_control)
+ : graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ this_elements, this_effect, this_control);
+
+ // Check that the {index} is in the valid range for the {receiver}.
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(), this_index,
+ this_length);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
+ this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Compute the element access.
+ Type* element_type = Type::Any();
+ MachineType element_machine_type = MachineType::AnyTagged();
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ element_type = type_cache_.kFloat64;
+ element_machine_type = MachineType::Float64();
+ } else if (IsFastSmiElementsKind(elements_kind)) {
+ element_type = type_cache_.kSmi;
+ }
+ ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+ element_type, element_machine_type};
+
+ // Access the actual element.
+ // TODO(bmeurer): Refactor this into separate methods or even a separate
+ // class that deals with the elements access.
+ if (access_mode == AccessMode::kLoad) {
+ // Compute the real element access type, which includes the hole in case
+ // of holey backing stores.
+ if (elements_kind == FAST_HOLEY_ELEMENTS ||
+ elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ element_access.type = Type::Union(
+ element_type,
+ Type::Constant(factory()->the_hole_value(), graph()->zone()),
+ graph()->zone());
+ }
+ // Perform the actual backing store access.
+ this_value = this_effect = graph()->NewNode(
+ simplified()->LoadElement(element_access), this_elements, this_index,
+ this_effect, this_control);
+ // Handle loading from holey backing stores correctly, by either mapping
+ // the hole to undefined if possible, or deoptimizing otherwise.
+ if (elements_kind == FAST_HOLEY_ELEMENTS ||
+ elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ // Perform the hole check on the result.
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(element_access.type),
+ this_value, jsgraph()->TheHoleConstant());
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ // Check if we are allowed to turn the hole into undefined.
+ Type* initial_holey_array_type = Type::Class(
+ handle(isolate()->get_initial_js_array_map(elements_kind)),
+ graph()->zone());
+ if (receiver_type->NowIs(initial_holey_array_type) &&
+ isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ // Add a code dependency on the array protector cell.
+ AssumePrototypesStable(receiver_type, native_context,
+ isolate()->initial_object_prototype());
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+ // Turn the hole into undefined.
+ this_control =
+ graph()->NewNode(common()->Merge(2), if_true, if_false);
+ this_value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->UndefinedConstant(), this_value, this_control);
+ element_type =
+ Type::Union(element_type, Type::Undefined(), graph()->zone());
+ } else {
+ // Deoptimize in case of the hole.
+ exit_controls.push_back(if_true);
+ this_control = if_false;
+ }
+ // Rename the result to represent the actual type (not polluted by the
+ // hole).
+ this_value = graph()->NewNode(common()->Guard(element_type), this_value,
+ this_control);
+ } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ // Perform the hole check on the result.
+ Node* check =
+ graph()->NewNode(simplified()->NumberIsHoleNaN(), this_value);
+ // Check if we are allowed to return the hole directly.
+ Type* initial_holey_array_type = Type::Class(
+ handle(isolate()->get_initial_js_array_map(elements_kind)),
+ graph()->zone());
+ if (receiver_type->NowIs(initial_holey_array_type) &&
+ isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ // Add a code dependency on the array protector cell.
+ AssumePrototypesStable(receiver_type, native_context,
+ isolate()->initial_object_prototype());
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+ // Turn the hole into undefined.
+ this_value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged,
+ BranchHint::kFalse),
+ check, jsgraph()->UndefinedConstant(), this_value);
+ } else {
+ // Deoptimize in case of the hole.
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ }
+ }
+ } else {
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+ if (IsFastSmiElementsKind(elements_kind)) {
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
+ this_value, this_control);
+ } else if (IsFastDoubleElementsKind(elements_kind)) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(Type::Number()),
+ this_value, this_control);
+ }
+ this_effect = graph()->NewNode(simplified()->StoreElement(element_access),
+ this_elements, this_index, this_value,
+ this_effect, this_control);
+ }
+
+ // Remember the final state for this element access.
+ values.push_back(this_value);
+ effects.push_back(this_effect);
+ controls.push_back(this_control);
+ }
+
+ // Collect the fallthrough control as final "exit" control.
+ if (fallthrough_control != control) {
+ // Mark the last fallthrough branch as deferred.
+ MarkAsDeferred(fallthrough_control);
+ }
+ exit_controls.push_back(fallthrough_control);
+
+ // Generate the single "exit" point, where we get if either all map/instance
+ // type checks failed, or one of the assumptions inside one of the cases
+ // failes (i.e. failing prototype chain check).
+ // TODO(bmeurer): Consider falling back to IC here if deoptimization is
+ // disabled.
+ int const exit_control_count = static_cast<int>(exit_controls.size());
+ Node* exit_control =
+ (exit_control_count == 1)
+ ? exit_controls.front()
+ : graph()->NewNode(common()->Merge(exit_control_count),
+ exit_control_count, &exit_controls.front());
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, exit_effect, exit_control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+
+ // Generate the final merge point for all (polymorphic) branches.
+ int const control_count = static_cast<int>(controls.size());
+ if (control_count == 0) {
+ value = effect = control = jsgraph()->Dead();
+ } else if (control_count == 1) {
+ value = values.front();
+ effect = effects.front();
+ control = controls.front();
+ } else {
+ control = graph()->NewNode(common()->Merge(control_count), control_count,
+ &controls.front());
+ values.push_back(control);
+ value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, control_count),
+ control_count + 1, &values.front());
+ effects.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(control_count),
+ control_count + 1, &effects.front());
+ }
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
+ Node* node, Node* index, Node* value, FeedbackNexus const& nexus,
+ AccessMode access_mode, LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSStoreProperty);
+
+ // Extract receiver maps from the {nexus}.
+ MapHandleList receiver_maps;
+ if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+ DCHECK_LT(0, receiver_maps.length());
+
+ // Optimize access for constant {index}.
+ HeapObjectMatcher mindex(index);
+ if (mindex.HasValue() && mindex.Value()->IsPrimitive()) {
+ // Keyed access requires a ToPropertyKey on the {index} first before
+ // looking up the property on the object (see ES6 section 12.3.2.1).
+ // We can only do this for non-observable ToPropertyKey invocations,
+ // so we limit the constant indices to primitives at this point.
+ Handle<Name> name;
+ if (Object::ToName(isolate(), mindex.Value()).ToHandle(&name)) {
+ uint32_t array_index;
+ if (name->AsArrayIndex(&array_index)) {
+ // Use the constant array index.
+ index = jsgraph()->Constant(static_cast<double>(array_index));
+ } else {
+ name = factory()->InternalizeName(name);
+ return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
+ language_mode);
+ }
+ }
+ }
+
+ // Check if we have feedback for a named access.
+ if (Name* name = nexus.FindFirstName()) {
+ return ReduceNamedAccess(node, value, receiver_maps,
+ handle(name, isolate()), access_mode,
+ language_mode, index);
+ }
+
+ // Try to lower the element access based on the {receiver_maps}.
+ return ReduceElementAccess(node, index, value, receiver_maps, access_mode,
+ language_mode, store_mode);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
+ PropertyAccess const& p = PropertyAccessOf(node->op());
+ Node* const index = NodeProperties::GetValueInput(node, 1);
+ Node* const value = jsgraph()->Dead();
+
+ // Extract receiver maps from the KEYED_LOAD_IC using the KeyedLoadICNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ KeyedLoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ // Try to lower the keyed access based on the {nexus}.
+ return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kLoad,
+ p.language_mode(), STANDARD_STORE);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode());
+ PropertyAccess const& p = PropertyAccessOf(node->op());
+ Node* const index = NodeProperties::GetValueInput(node, 1);
+ Node* const value = NodeProperties::GetValueInput(node, 2);
+
+ // Extract receiver maps from the KEYED_STORE_IC using the KeyedStoreICNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ KeyedStoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ // Extract the keyed access store mode from the KEYED_STORE_IC.
+ KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
+
+ // Try to lower the keyed access based on the {nexus}.
+ return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kStore,
+ p.language_mode(), store_mode);
+}
+
+
+void JSNativeContextSpecialization::AssumePrototypesStable(
+ Type* receiver_type, Handle<Context> native_context,
+ Handle<JSObject> holder) {
+ // Determine actual holder and perform prototype chain checks.
+ for (auto i = receiver_type->Classes(); !i.Done(); i.Advance()) {
+ Handle<Map> map = i.Current();
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ Handle<JSFunction> constructor;
+ if (Map::GetConstructorFunction(map, native_context)
+ .ToHandle(&constructor)) {
+ map = handle(constructor->initial_map(), isolate());
+ }
+ dependencies()->AssumePrototypeMapsStable(map, holder);
+ }
+}
+
+
+void JSNativeContextSpecialization::MarkAsDeferred(Node* if_projection) {
+ Node* branch = NodeProperties::GetControlInput(if_projection);
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+ if (if_projection->opcode() == IrOpcode::kIfTrue) {
+ NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kFalse));
+ } else {
+ DCHECK_EQ(IrOpcode::kIfFalse, if_projection->opcode());
+ NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kTrue));
+ }
+}
+
+
+MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
+ Node* node) {
+ Node* const context = NodeProperties::GetContextInput(node);
+ return NodeProperties::GetSpecializationNativeContext(context,
+ native_context());
+}
+
+
+Graph* JSNativeContextSpecialization::graph() const {
+ return jsgraph()->graph();
+}
+
+
+Isolate* JSNativeContextSpecialization::isolate() const {
+ return jsgraph()->isolate();
+}
+
+
+Factory* JSNativeContextSpecialization::factory() const {
+ return isolate()->factory();
+}
+
+
+MachineOperatorBuilder* JSNativeContextSpecialization::machine() const {
+ return jsgraph()->machine();
+}
+
+
+CommonOperatorBuilder* JSNativeContextSpecialization::common() const {
+ return jsgraph()->common();
+}
+
+
+JSOperatorBuilder* JSNativeContextSpecialization::javascript() const {
+ return jsgraph()->javascript();
+}
+
+
+SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const {
+ return jsgraph()->simplified();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
new file mode 100644
index 0000000..45ff87f
--- /dev/null
+++ b/src/compiler/js-native-context-specialization.h
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_NATIVE_CONTEXT_SPECIALIZATION_H_
+#define V8_COMPILER_JS_NATIVE_CONTEXT_SPECIALIZATION_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+class FeedbackNexus;
+class TypeCache;
+
+
+namespace compiler {
+
+// Forward declarations.
+enum class AccessMode;
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
+
+
+// Specializes a given JSGraph to a given native context, potentially constant
+// folding some {LoadGlobal} nodes or strength reducing some {StoreGlobal}
+// nodes. And also specializes {LoadNamed} and {StoreNamed} nodes according
+// to type feedback (if available).
+class JSNativeContextSpecialization final : public AdvancedReducer {
+ public:
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context,
+ CompilationDependencies* dependencies,
+ Zone* zone);
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceJSLoadNamed(Node* node);
+ Reduction ReduceJSStoreNamed(Node* node);
+ Reduction ReduceJSLoadProperty(Node* node);
+ Reduction ReduceJSStoreProperty(Node* node);
+
+ Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
+ MapHandleList const& receiver_maps,
+ AccessMode access_mode,
+ LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode);
+ Reduction ReduceKeyedAccess(Node* node, Node* index, Node* value,
+ FeedbackNexus const& nexus,
+ AccessMode access_mode,
+ LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode);
+ Reduction ReduceNamedAccess(Node* node, Node* value,
+ MapHandleList const& receiver_maps,
+ Handle<Name> name, AccessMode access_mode,
+ LanguageMode language_mode,
+ Node* index = nullptr);
+
+ // Adds stability dependencies on all prototypes of every class in
+ // {receiver_type} up to (and including) the {holder}.
+ void AssumePrototypesStable(Type* receiver_type,
+ Handle<Context> native_context,
+ Handle<JSObject> holder);
+
+ // Assuming that {if_projection} is either IfTrue or IfFalse, adds a hint on
+ // the dominating Branch that {if_projection} is the unlikely (deferred) case.
+ void MarkAsDeferred(Node* if_projection);
+
+ // Retrieve the native context from the given {node} if known.
+ MaybeHandle<Context> GetNativeContext(Node* node);
+
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ Factory* factory() const;
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ MachineOperatorBuilder* machine() const;
+ Flags flags() const { return flags_; }
+ MaybeHandle<Context> native_context() const { return native_context_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Zone* zone() const { return zone_; }
+
+ JSGraph* const jsgraph_;
+ Flags const flags_;
+ MaybeHandle<Context> native_context_;
+ CompilationDependencies* const dependencies_;
+ Zone* const zone_;
+ TypeCache const& type_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSNativeContextSpecialization);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSNativeContextSpecialization::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_NATIVE_CONTEXT_SPECIALIZATION_H_
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index aa76a3b..1455f0a 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -9,30 +9,138 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-feedback-vector-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
-bool operator==(CallFunctionParameters const& lhs,
- CallFunctionParameters const& rhs) {
- return lhs.arity() == rhs.arity() && lhs.flags() == rhs.flags();
+VectorSlotPair::VectorSlotPair() {}
+
+
+int VectorSlotPair::index() const {
+ return vector_.is_null() ? -1 : vector_->GetIndex(slot_);
}
-bool operator!=(CallFunctionParameters const& lhs,
- CallFunctionParameters const& rhs) {
+bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
+ return lhs.slot() == rhs.slot() &&
+ lhs.vector().location() == rhs.vector().location();
+}
+
+
+bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(CallFunctionParameters const& p) {
- return base::hash_combine(p.arity(), p.flags());
+size_t hash_value(VectorSlotPair const& p) {
+ return base::hash_combine(p.slot(), p.vector().location());
+}
+
+
+ConvertReceiverMode ConvertReceiverModeOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSConvertReceiver, op->opcode());
+ return OpParameter<ConvertReceiverMode>(op);
+}
+
+
+ToBooleanHints ToBooleanHintsOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSToBoolean, op->opcode());
+ return OpParameter<ToBooleanHints>(op);
+}
+
+
+size_t hash_value(TailCallMode mode) {
+ return base::hash_value(static_cast<unsigned>(mode));
+}
+
+
+std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
+ switch (mode) {
+ case TailCallMode::kAllow:
+ return os << "ALLOW_TAIL_CALLS";
+ case TailCallMode::kDisallow:
+ return os << "DISALLOW_TAIL_CALLS";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+bool operator==(BinaryOperationParameters const& lhs,
+ BinaryOperationParameters const& rhs) {
+ return lhs.language_mode() == rhs.language_mode() &&
+ lhs.hints() == rhs.hints();
+}
+
+
+bool operator!=(BinaryOperationParameters const& lhs,
+ BinaryOperationParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(BinaryOperationParameters const& p) {
+ return base::hash_combine(p.language_mode(), p.hints());
+}
+
+
+std::ostream& operator<<(std::ostream& os, BinaryOperationParameters const& p) {
+ return os << p.language_mode() << ", " << p.hints();
+}
+
+
+BinaryOperationParameters const& BinaryOperationParametersOf(
+ Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
+ op->opcode() == IrOpcode::kJSBitwiseXor ||
+ op->opcode() == IrOpcode::kJSBitwiseAnd ||
+ op->opcode() == IrOpcode::kJSShiftLeft ||
+ op->opcode() == IrOpcode::kJSShiftRight ||
+ op->opcode() == IrOpcode::kJSShiftRightLogical ||
+ op->opcode() == IrOpcode::kJSAdd ||
+ op->opcode() == IrOpcode::kJSSubtract ||
+ op->opcode() == IrOpcode::kJSMultiply ||
+ op->opcode() == IrOpcode::kJSDivide ||
+ op->opcode() == IrOpcode::kJSModulus);
+ return OpParameter<BinaryOperationParameters>(op);
+}
+
+
+bool operator==(CallConstructParameters const& lhs,
+ CallConstructParameters const& rhs) {
+ return lhs.arity() == rhs.arity() && lhs.feedback() == rhs.feedback();
+}
+
+
+bool operator!=(CallConstructParameters const& lhs,
+ CallConstructParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(CallConstructParameters const& p) {
+ return base::hash_combine(p.arity(), p.feedback());
+}
+
+
+std::ostream& operator<<(std::ostream& os, CallConstructParameters const& p) {
+ return os << p.arity();
+}
+
+
+CallConstructParameters const& CallConstructParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, op->opcode());
+ return OpParameter<CallConstructParameters>(op);
}
std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
- return os << p.arity() << ", " << p.flags();
+ os << p.arity() << ", " << p.language_mode() << ", " << p.convert_mode()
+ << ", " << p.tail_call_mode();
+ return os;
}
@@ -108,167 +216,342 @@
}
-bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
- return lhs.slot().ToInt() == rhs.slot().ToInt() &&
- lhs.vector().is_identical_to(rhs.vector());
+DynamicAccess::DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode)
+ : name_(name), typeof_mode_(typeof_mode) {}
+
+
+bool operator==(DynamicAccess const& lhs, DynamicAccess const& rhs) {
+ UNIMPLEMENTED();
+ return true;
}
-size_t hash_value(VectorSlotPair const& p) {
- // TODO(mvstanton): include the vector in the hash.
- base::hash<int> h;
- return h(p.slot().ToInt());
+bool operator!=(DynamicAccess const& lhs, DynamicAccess const& rhs) {
+ return !(lhs == rhs);
}
-bool operator==(LoadNamedParameters const& lhs,
- LoadNamedParameters const& rhs) {
- return lhs.name() == rhs.name() &&
- lhs.contextual_mode() == rhs.contextual_mode() &&
+size_t hash_value(DynamicAccess const& access) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
+std::ostream& operator<<(std::ostream& os, DynamicAccess const& access) {
+ return os << Brief(*access.name()) << ", " << access.typeof_mode();
+}
+
+
+DynamicAccess const& DynamicAccessOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSLoadDynamic, op->opcode());
+ return OpParameter<DynamicAccess>(op);
+}
+
+
+bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
+ return lhs.name().location() == rhs.name().location() &&
+ lhs.language_mode() == rhs.language_mode() &&
lhs.feedback() == rhs.feedback();
}
-bool operator!=(LoadNamedParameters const& lhs,
- LoadNamedParameters const& rhs) {
+bool operator!=(NamedAccess const& lhs, NamedAccess const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(LoadNamedParameters const& p) {
- return base::hash_combine(p.name(), p.contextual_mode(), p.feedback());
+size_t hash_value(NamedAccess const& p) {
+ return base::hash_combine(p.name().location(), p.language_mode(),
+ p.feedback());
}
-std::ostream& operator<<(std::ostream& os, LoadNamedParameters const& p) {
- return os << Brief(*p.name().handle()) << ", " << p.contextual_mode();
+std::ostream& operator<<(std::ostream& os, NamedAccess const& p) {
+ return os << Brief(*p.name()) << ", " << p.language_mode();
}
-std::ostream& operator<<(std::ostream& os, LoadPropertyParameters const& p) {
- // Nothing special to print.
+NamedAccess const& NamedAccessOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
+ op->opcode() == IrOpcode::kJSStoreNamed);
+ return OpParameter<NamedAccess>(op);
+}
+
+
+std::ostream& operator<<(std::ostream& os, PropertyAccess const& p) {
+ return os << p.language_mode();
+}
+
+
+bool operator==(PropertyAccess const& lhs, PropertyAccess const& rhs) {
+ return lhs.language_mode() == rhs.language_mode() &&
+ lhs.feedback() == rhs.feedback();
+}
+
+
+bool operator!=(PropertyAccess const& lhs, PropertyAccess const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+PropertyAccess const& PropertyAccessOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSLoadProperty ||
+ op->opcode() == IrOpcode::kJSStoreProperty);
+ return OpParameter<PropertyAccess>(op);
+}
+
+
+size_t hash_value(PropertyAccess const& p) {
+ return base::hash_combine(p.language_mode(), p.feedback());
+}
+
+
+bool operator==(LoadGlobalParameters const& lhs,
+ LoadGlobalParameters const& rhs) {
+ return lhs.name().location() == rhs.name().location() &&
+ lhs.feedback() == rhs.feedback() &&
+ lhs.typeof_mode() == rhs.typeof_mode();
+}
+
+
+bool operator!=(LoadGlobalParameters const& lhs,
+ LoadGlobalParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(LoadGlobalParameters const& p) {
+ return base::hash_combine(p.name().location(), p.typeof_mode());
+}
+
+
+std::ostream& operator<<(std::ostream& os, LoadGlobalParameters const& p) {
+ return os << Brief(*p.name()) << ", " << p.typeof_mode();
+}
+
+
+const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSLoadGlobal, op->opcode());
+ return OpParameter<LoadGlobalParameters>(op);
+}
+
+
+bool operator==(StoreGlobalParameters const& lhs,
+ StoreGlobalParameters const& rhs) {
+ return lhs.language_mode() == rhs.language_mode() &&
+ lhs.name().location() == rhs.name().location() &&
+ lhs.feedback() == rhs.feedback();
+}
+
+
+bool operator!=(StoreGlobalParameters const& lhs,
+ StoreGlobalParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(StoreGlobalParameters const& p) {
+ return base::hash_combine(p.language_mode(), p.name().location(),
+ p.feedback());
+}
+
+
+std::ostream& operator<<(std::ostream& os, StoreGlobalParameters const& p) {
+ return os << p.language_mode() << ", " << Brief(*p.name());
+}
+
+
+const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, op->opcode());
+ return OpParameter<StoreGlobalParameters>(op);
+}
+
+
+bool operator==(CreateArgumentsParameters const& lhs,
+ CreateArgumentsParameters const& rhs) {
+ return lhs.type() == rhs.type() && lhs.start_index() == rhs.start_index();
+}
+
+
+bool operator!=(CreateArgumentsParameters const& lhs,
+ CreateArgumentsParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(CreateArgumentsParameters const& p) {
+ return base::hash_combine(p.type(), p.start_index());
+}
+
+
+std::ostream& operator<<(std::ostream& os, CreateArgumentsParameters const& p) {
+ return os << p.type() << ", " << p.start_index();
+}
+
+
+const CreateArgumentsParameters& CreateArgumentsParametersOf(
+ const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateArguments, op->opcode());
+ return OpParameter<CreateArgumentsParameters>(op);
+}
+
+
+bool operator==(CreateArrayParameters const& lhs,
+ CreateArrayParameters const& rhs) {
+ return lhs.arity() == rhs.arity() &&
+ lhs.site().location() == rhs.site().location();
+}
+
+
+bool operator!=(CreateArrayParameters const& lhs,
+ CreateArrayParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(CreateArrayParameters const& p) {
+ return base::hash_combine(p.arity(), p.site().location());
+}
+
+
+std::ostream& operator<<(std::ostream& os, CreateArrayParameters const& p) {
+ os << p.arity();
+ if (!p.site().is_null()) os << ", " << Brief(*p.site());
return os;
}
-bool operator==(LoadPropertyParameters const& lhs,
- LoadPropertyParameters const& rhs) {
- return lhs.feedback() == rhs.feedback();
+const CreateArrayParameters& CreateArrayParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, op->opcode());
+ return OpParameter<CreateArrayParameters>(op);
}
-bool operator!=(LoadPropertyParameters const& lhs,
- LoadPropertyParameters const& rhs) {
+bool operator==(CreateClosureParameters const& lhs,
+ CreateClosureParameters const& rhs) {
+ return lhs.pretenure() == rhs.pretenure() &&
+ lhs.shared_info().location() == rhs.shared_info().location();
+}
+
+
+bool operator!=(CreateClosureParameters const& lhs,
+ CreateClosureParameters const& rhs) {
return !(lhs == rhs);
}
-const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode());
- return OpParameter<LoadPropertyParameters>(op);
+size_t hash_value(CreateClosureParameters const& p) {
+ return base::hash_combine(p.pretenure(), p.shared_info().location());
}
-size_t hash_value(LoadPropertyParameters const& p) {
- return hash_value(p.feedback());
+std::ostream& operator<<(std::ostream& os, CreateClosureParameters const& p) {
+ return os << p.pretenure() << ", " << Brief(*p.shared_info());
}
-const LoadNamedParameters& LoadNamedParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
- return OpParameter<LoadNamedParameters>(op);
+const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateClosure, op->opcode());
+ return OpParameter<CreateClosureParameters>(op);
}
-bool operator==(StoreNamedParameters const& lhs,
- StoreNamedParameters const& rhs) {
- return lhs.strict_mode() == rhs.strict_mode() && lhs.name() == rhs.name();
+bool operator==(CreateLiteralParameters const& lhs,
+ CreateLiteralParameters const& rhs) {
+ return lhs.constant().location() == rhs.constant().location() &&
+ lhs.flags() == rhs.flags() && lhs.index() == rhs.index();
}
-bool operator!=(StoreNamedParameters const& lhs,
- StoreNamedParameters const& rhs) {
+bool operator!=(CreateLiteralParameters const& lhs,
+ CreateLiteralParameters const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(StoreNamedParameters const& p) {
- return base::hash_combine(p.strict_mode(), p.name());
+size_t hash_value(CreateLiteralParameters const& p) {
+ return base::hash_combine(p.constant().location(), p.flags(), p.index());
}
-std::ostream& operator<<(std::ostream& os, StoreNamedParameters const& p) {
- return os << p.strict_mode() << ", " << Brief(*p.name().handle());
+std::ostream& operator<<(std::ostream& os, CreateLiteralParameters const& p) {
+ return os << Brief(*p.constant()) << ", " << p.flags() << ", " << p.index();
}
-const StoreNamedParameters& StoreNamedParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSStoreNamed, op->opcode());
- return OpParameter<StoreNamedParameters>(op);
+const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSCreateLiteralArray ||
+ op->opcode() == IrOpcode::kJSCreateLiteralObject ||
+ op->opcode() == IrOpcode::kJSCreateLiteralRegExp);
+ return OpParameter<CreateLiteralParameters>(op);
}
-#define CACHED_OP_LIST(V) \
- V(Equal, Operator::kNoProperties, 2, 1) \
- V(NotEqual, Operator::kNoProperties, 2, 1) \
- V(StrictEqual, Operator::kPure, 2, 1) \
- V(StrictNotEqual, Operator::kPure, 2, 1) \
- V(LessThan, Operator::kNoProperties, 2, 1) \
- V(GreaterThan, Operator::kNoProperties, 2, 1) \
- V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(BitwiseOr, Operator::kNoProperties, 2, 1) \
- V(BitwiseXor, Operator::kNoProperties, 2, 1) \
- V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
- V(ShiftLeft, Operator::kNoProperties, 2, 1) \
- V(ShiftRight, Operator::kNoProperties, 2, 1) \
- V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
- V(Add, Operator::kNoProperties, 2, 1) \
- V(Subtract, Operator::kNoProperties, 2, 1) \
- V(Multiply, Operator::kNoProperties, 2, 1) \
- V(Divide, Operator::kNoProperties, 2, 1) \
- V(Modulus, Operator::kNoProperties, 2, 1) \
- V(UnaryNot, Operator::kPure, 1, 1) \
- V(ToBoolean, Operator::kPure, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kNoProperties, 1, 1) \
- V(Yield, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kEliminatable, 0, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(TypeOf, Operator::kPure, 1, 1) \
- V(InstanceOf, Operator::kNoProperties, 2, 1) \
- V(Debugger, Operator::kNoProperties, 0, 0) \
- V(CreateFunctionContext, Operator::kNoProperties, 1, 1) \
- V(CreateWithContext, Operator::kNoProperties, 2, 1) \
- V(CreateBlockContext, Operator::kNoProperties, 2, 1) \
- V(CreateModuleContext, Operator::kNoProperties, 2, 1) \
- V(CreateScriptContext, Operator::kNoProperties, 2, 1)
+#define CACHED_OP_LIST(V) \
+ V(Equal, Operator::kNoProperties, 2, 1) \
+ V(NotEqual, Operator::kNoProperties, 2, 1) \
+ V(StrictEqual, Operator::kNoThrow, 2, 1) \
+ V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kNoProperties, 1, 1) \
+ V(Yield, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kEliminatable, 2, 1) \
+ V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(TypeOf, Operator::kEliminatable, 1, 1) \
+ V(InstanceOf, Operator::kNoProperties, 2, 1) \
+ V(ForInDone, Operator::kPure, 2, 1) \
+ V(ForInNext, Operator::kNoProperties, 4, 1) \
+ V(ForInPrepare, Operator::kNoProperties, 1, 3) \
+ V(ForInStep, Operator::kPure, 1, 1) \
+ V(LoadMessage, Operator::kNoThrow, 0, 1) \
+ V(StoreMessage, Operator::kNoThrow, 1, 0) \
+ V(StackCheck, Operator::kNoProperties, 0, 0) \
+ V(CreateWithContext, Operator::kNoProperties, 2, 1) \
+ V(CreateModuleContext, Operator::kNoProperties, 2, 1)
-struct JSOperatorGlobalCache FINAL {
+#define CACHED_OP_LIST_WITH_LANGUAGE_MODE(V) \
+ V(LessThan, Operator::kNoProperties, 2, 1) \
+ V(GreaterThan, Operator::kNoProperties, 2, 1) \
+ V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
+ V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)
+
+
+struct JSOperatorGlobalCache final {
#define CACHED(Name, properties, value_input_count, value_output_count) \
- struct Name##Operator FINAL : public Operator { \
+ struct Name##Operator final : public Operator { \
Name##Operator() \
: Operator(IrOpcode::kJS##Name, properties, "JS" #Name, \
value_input_count, Operator::ZeroIfPure(properties), \
- Operator::ZeroIfPure(properties), value_output_count, \
- Operator::ZeroIfPure(properties), 0) {} \
+ Operator::ZeroIfEliminatable(properties), \
+ value_output_count, Operator::ZeroIfPure(properties), \
+ Operator::ZeroIfNoThrow(properties)) {} \
}; \
Name##Operator k##Name##Operator;
CACHED_OP_LIST(CACHED)
#undef CACHED
- template <StrictMode kStrictMode>
- struct StorePropertyOperator FINAL : public Operator1<StrictMode> {
- StorePropertyOperator()
- : Operator1<StrictMode>(IrOpcode::kJSStoreProperty,
- Operator::kNoProperties, "JSStoreProperty", 3,
- 1, 1, 0, 1, 0, kStrictMode) {}
- };
- StorePropertyOperator<SLOPPY> kStorePropertySloppyOperator;
- StorePropertyOperator<STRICT> kStorePropertyStrictOperator;
+
+#define CACHED_WITH_LANGUAGE_MODE(Name, properties, value_input_count, \
+ value_output_count) \
+ template <LanguageMode kLanguageMode> \
+ struct Name##Operator final : public Operator1<LanguageMode> { \
+ Name##Operator() \
+ : Operator1<LanguageMode>( \
+ IrOpcode::kJS##Name, properties, "JS" #Name, value_input_count, \
+ Operator::ZeroIfPure(properties), \
+ Operator::ZeroIfEliminatable(properties), value_output_count, \
+ Operator::ZeroIfPure(properties), \
+ Operator::ZeroIfNoThrow(properties), kLanguageMode) {} \
+ }; \
+ Name##Operator<SLOPPY> k##Name##SloppyOperator; \
+ Name##Operator<STRICT> k##Name##StrictOperator; \
+ Name##Operator<STRONG> k##Name##StrongOperator;
+ CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
+#undef CACHED_WITH_LANGUAGE_MODE
};
@@ -288,13 +571,177 @@
#undef CACHED
-const Operator* JSOperatorBuilder::CallFunction(size_t arity,
- CallFunctionFlags flags) {
- CallFunctionParameters parameters(arity, flags);
+#define CACHED_WITH_LANGUAGE_MODE(Name, properties, value_input_count, \
+ value_output_count) \
+ const Operator* JSOperatorBuilder::Name(LanguageMode language_mode) { \
+ switch (language_mode) { \
+ case SLOPPY: \
+ return &cache_.k##Name##SloppyOperator; \
+ case STRICT: \
+ return &cache_.k##Name##StrictOperator; \
+ case STRONG: \
+ return &cache_.k##Name##StrongOperator; \
+ default: \
+ break; /* %*!%^$#@ */ \
+ } \
+ UNREACHABLE(); \
+ return nullptr; \
+ }
+CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
+#undef CACHED_WITH_LANGUAGE_MODE
+
+
+const Operator* JSOperatorBuilder::BitwiseOr(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSBitwiseOr, Operator::kNoProperties, // opcode
+ "JSBitwiseOr", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::BitwiseXor(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSBitwiseXor, Operator::kNoProperties, // opcode
+ "JSBitwiseXor", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::BitwiseAnd(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSBitwiseAnd, Operator::kNoProperties, // opcode
+ "JSBitwiseAnd", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ShiftLeft(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSShiftLeft, Operator::kNoProperties, // opcode
+ "JSShiftLeft", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ShiftRight(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSShiftRight, Operator::kNoProperties, // opcode
+ "JSShiftRight", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ShiftRightLogical(
+ LanguageMode language_mode, BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSShiftRightLogical, Operator::kNoProperties, // opcode
+ "JSShiftRightLogical", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Add(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSAdd, Operator::kNoProperties, // opcode
+ "JSAdd", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Subtract(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSSubtract, Operator::kNoProperties, // opcode
+ "JSSubtract", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Multiply(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSMultiply, Operator::kNoProperties, // opcode
+ "JSMultiply", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Divide(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSDivide, Operator::kNoProperties, // opcode
+ "JSDivide", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Modulus(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSModulus, Operator::kNoProperties, // opcode
+ "JSModulus", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<ToBooleanHints>( //--
+ IrOpcode::kJSToBoolean, Operator::kEliminatable, // opcode
+ "JSToBoolean", // name
+ 1, 1, 0, 1, 1, 0, // inputs/outputs
+ hints); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CallFunction(
+ size_t arity, LanguageMode language_mode, VectorSlotPair const& feedback,
+ ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
+ CallFunctionParameters parameters(arity, language_mode, feedback,
+ tail_call_mode, convert_mode);
return new (zone()) Operator1<CallFunctionParameters>( // --
IrOpcode::kJSCallFunction, Operator::kNoProperties, // opcode
"JSCallFunction", // name
- parameters.arity(), 1, 1, 1, 1, 0, // inputs/outputs
+ parameters.arity(), 1, 1, 1, 1, 2, // inputs/outputs
parameters); // parameter
}
@@ -307,105 +754,257 @@
return new (zone()) Operator1<CallRuntimeParameters>( // --
IrOpcode::kJSCallRuntime, Operator::kNoProperties, // opcode
"JSCallRuntime", // name
- parameters.arity(), 1, 1, f->result_size, 1, 0, // inputs/outputs
+ parameters.arity(), 1, 1, f->result_size, 1, 2, // inputs/outputs
parameters); // parameter
}
-const Operator* JSOperatorBuilder::CallConstruct(int arguments) {
- return new (zone()) Operator1<int>( // --
+const Operator* JSOperatorBuilder::CallConstruct(
+ size_t arity, VectorSlotPair const& feedback) {
+ CallConstructParameters parameters(arity, feedback);
+ return new (zone()) Operator1<CallConstructParameters>( // --
IrOpcode::kJSCallConstruct, Operator::kNoProperties, // opcode
"JSCallConstruct", // name
- arguments, 1, 1, 1, 1, 0, // counts
- arguments); // parameter
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
-const Operator* JSOperatorBuilder::LoadNamed(const Unique<Name>& name,
- const VectorSlotPair& feedback,
- ContextualMode contextual_mode) {
- LoadNamedParameters parameters(name, feedback, contextual_mode);
- return new (zone()) Operator1<LoadNamedParameters>( // --
+const Operator* JSOperatorBuilder::ConvertReceiver(
+ ConvertReceiverMode convert_mode) {
+ return new (zone()) Operator1<ConvertReceiverMode>( // --
+ IrOpcode::kJSConvertReceiver, Operator::kNoThrow, // opcode
+ "JSConvertReceiver", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ convert_mode); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::LoadNamed(LanguageMode language_mode,
+ Handle<Name> name,
+ const VectorSlotPair& feedback) {
+ NamedAccess access(language_mode, name, feedback);
+ return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode
"JSLoadNamed", // name
- 1, 1, 1, 1, 1, 0, // counts
- parameters); // parameter
+ 2, 1, 1, 1, 1, 2, // counts
+ access); // parameter
}
const Operator* JSOperatorBuilder::LoadProperty(
- const VectorSlotPair& feedback) {
- LoadPropertyParameters parameters(feedback);
- return new (zone()) Operator1<LoadPropertyParameters>( // --
+ LanguageMode language_mode, VectorSlotPair const& feedback) {
+ PropertyAccess access(language_mode, feedback);
+ return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSLoadProperty, Operator::kNoProperties, // opcode
"JSLoadProperty", // name
- 2, 1, 1, 1, 1, 0, // counts
- parameters); // parameter
+ 3, 1, 1, 1, 1, 2, // counts
+ access); // parameter
}
-const Operator* JSOperatorBuilder::StoreProperty(StrictMode strict_mode) {
- switch (strict_mode) {
- case SLOPPY:
- return &cache_.kStorePropertySloppyOperator;
- case STRICT:
- return &cache_.kStorePropertyStrictOperator;
- }
- UNREACHABLE();
- return nullptr;
-}
-
-
-const Operator* JSOperatorBuilder::StoreNamed(StrictMode strict_mode,
- const Unique<Name>& name) {
- StoreNamedParameters parameters(strict_mode, name);
- return new (zone()) Operator1<StoreNamedParameters>( // --
+const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
+ Handle<Name> name,
+ VectorSlotPair const& feedback) {
+ NamedAccess access(language_mode, name, feedback);
+ return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSStoreNamed, Operator::kNoProperties, // opcode
"JSStoreNamed", // name
- 2, 1, 1, 0, 1, 0, // counts
+ 3, 1, 1, 0, 1, 2, // counts
+ access); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::StoreProperty(
+ LanguageMode language_mode, VectorSlotPair const& feedback) {
+ PropertyAccess access(language_mode, feedback);
+ return new (zone()) Operator1<PropertyAccess>( // --
+ IrOpcode::kJSStoreProperty, Operator::kNoProperties, // opcode
+ "JSStoreProperty", // name
+ 4, 1, 1, 0, 1, 2, // counts
+ access); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
+ return new (zone()) Operator1<LanguageMode>( // --
+ IrOpcode::kJSDeleteProperty, Operator::kNoProperties, // opcode
+ "JSDeleteProperty", // name
+ 2, 1, 1, 1, 1, 2, // counts
+ language_mode); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
+ const VectorSlotPair& feedback,
+ TypeofMode typeof_mode) {
+ LoadGlobalParameters parameters(name, feedback, typeof_mode);
+ return new (zone()) Operator1<LoadGlobalParameters>( // --
+ IrOpcode::kJSLoadGlobal, Operator::kNoProperties, // opcode
+ "JSLoadGlobal", // name
+ 1, 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
-const Operator* JSOperatorBuilder::DeleteProperty(StrictMode strict_mode) {
- return new (zone()) Operator1<StrictMode>( // --
- IrOpcode::kJSDeleteProperty, Operator::kNoProperties, // opcode
- "JSDeleteProperty", // name
- 2, 1, 1, 1, 1, 0, // counts
- strict_mode); // parameter
+const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
+ const Handle<Name>& name,
+ const VectorSlotPair& feedback) {
+ StoreGlobalParameters parameters(language_mode, feedback, name);
+ return new (zone()) Operator1<StoreGlobalParameters>( // --
+ IrOpcode::kJSStoreGlobal, Operator::kNoProperties, // opcode
+ "JSStoreGlobal", // name
+ 2, 1, 1, 0, 1, 2, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::LoadContext(size_t depth, size_t index,
bool immutable) {
ContextAccess access(depth, index, immutable);
- return new (zone()) Operator1<ContextAccess>( // --
- IrOpcode::kJSLoadContext, Operator::kNoWrite, // opcode
- "JSLoadContext", // name
- 1, 1, 0, 1, 1, 0, // counts
- access); // parameter
+ return new (zone()) Operator1<ContextAccess>( // --
+ IrOpcode::kJSLoadContext, // opcode
+ Operator::kNoWrite | Operator::kNoThrow, // flags
+ "JSLoadContext", // name
+ 1, 1, 0, 1, 1, 0, // counts
+ access); // parameter
}
const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
ContextAccess access(depth, index, false);
- return new (zone()) Operator1<ContextAccess>( // --
- IrOpcode::kJSStoreContext, Operator::kNoRead, // opcode
- "JSStoreContext", // name
- 2, 1, 1, 0, 1, 0, // counts
- access); // parameter
+ return new (zone()) Operator1<ContextAccess>( // --
+ IrOpcode::kJSStoreContext, // opcode
+ Operator::kNoRead | Operator::kNoThrow, // flags
+ "JSStoreContext", // name
+ 2, 1, 1, 0, 1, 0, // counts
+ access); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::LoadDynamic(const Handle<String>& name,
+ TypeofMode typeof_mode) {
+ DynamicAccess access(name, typeof_mode);
+ return new (zone()) Operator1<DynamicAccess>( // --
+ IrOpcode::kJSLoadDynamic, Operator::kNoProperties, // opcode
+ "JSLoadDynamic", // name
+ 2, 1, 1, 1, 1, 2, // counts
+ access); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateArguments(
+ CreateArgumentsParameters::Type type, int start_index) {
+ DCHECK_IMPLIES(start_index, type == CreateArgumentsParameters::kRestArray);
+ CreateArgumentsParameters parameters(type, start_index);
+ return new (zone()) Operator1<CreateArgumentsParameters>( // --
+ IrOpcode::kJSCreateArguments, Operator::kNoThrow, // opcode
+ "JSCreateArguments", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateArray(size_t arity,
+ Handle<AllocationSite> site) {
+ // constructor, new_target, arg1, ..., argN
+ int const value_input_count = static_cast<int>(arity) + 2;
+ CreateArrayParameters parameters(arity, site);
+ return new (zone()) Operator1<CreateArrayParameters>( // --
+ IrOpcode::kJSCreateArray, Operator::kNoProperties, // opcode
+ "JSCreateArray", // name
+ value_input_count, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateClosure(
+ Handle<SharedFunctionInfo> shared_info, PretenureFlag pretenure) {
+ CreateClosureParameters parameters(shared_info, pretenure);
+ return new (zone()) Operator1<CreateClosureParameters>( // --
+ IrOpcode::kJSCreateClosure, Operator::kNoThrow, // opcode
+ "JSCreateClosure", // name
+ 0, 1, 1, 1, 1, 0, // counts
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateLiteralArray(
+ Handle<FixedArray> constant_elements, int literal_flags,
+ int literal_index) {
+ CreateLiteralParameters parameters(constant_elements, literal_flags,
+ literal_index);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
+ IrOpcode::kJSCreateLiteralArray, Operator::kNoProperties, // opcode
+ "JSCreateLiteralArray", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateLiteralObject(
+ Handle<FixedArray> constant_properties, int literal_flags,
+ int literal_index) {
+ CreateLiteralParameters parameters(constant_properties, literal_flags,
+ literal_index);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
+ IrOpcode::kJSCreateLiteralObject, Operator::kNoProperties, // opcode
+ "JSCreateLiteralObject", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateLiteralRegExp(
+ Handle<String> constant_pattern, int literal_flags, int literal_index) {
+ CreateLiteralParameters parameters(constant_pattern, literal_flags,
+ literal_index);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
+ IrOpcode::kJSCreateLiteralRegExp, Operator::kNoProperties, // opcode
+ "JSCreateLiteralRegExp", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kJSCreateFunctionContext, Operator::kNoProperties, // opcode
+ "JSCreateFunctionContext", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ slot_count); // parameter
}
const Operator* JSOperatorBuilder::CreateCatchContext(
- const Unique<String>& name) {
- return new (zone()) Operator1<Unique<String>>( // --
+ const Handle<String>& name) {
+ return new (zone()) Operator1<Handle<String>>( // --
IrOpcode::kJSCreateCatchContext, Operator::kNoProperties, // opcode
"JSCreateCatchContext", // name
- 1, 1, 1, 1, 1, 0, // counts
+ 2, 1, 1, 1, 1, 2, // counts
name); // parameter
}
+
+const Operator* JSOperatorBuilder::CreateBlockContext(
+ const Handle<ScopeInfo>& scpope_info) {
+ return new (zone()) Operator1<Handle<ScopeInfo>>( // --
+ IrOpcode::kJSCreateBlockContext, Operator::kNoProperties, // opcode
+ "JSCreateBlockContext", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ scpope_info); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateScriptContext(
+ const Handle<ScopeInfo>& scpope_info) {
+ return new (zone()) Operator1<Handle<ScopeInfo>>( // --
+ IrOpcode::kJSCreateScriptContext, Operator::kNoProperties, // opcode
+ "JSCreateScriptContext", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ scpope_info); // parameter
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index e716a8e..ca7c7ea 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_JS_OPERATOR_H_
#define V8_COMPILER_JS_OPERATOR_H_
+#include "src/compiler/type-hints.h"
#include "src/runtime/runtime.h"
-#include "src/unique.h"
namespace v8 {
namespace internal {
@@ -17,23 +17,148 @@
struct JSOperatorGlobalCache;
-// Defines the arity and the call flags for a JavaScript function call. This is
-// used as a parameter by JSCallFunction operators.
-class CallFunctionParameters FINAL {
+// Defines a pair of {TypeFeedbackVector} and {TypeFeedbackVectorSlot}, which
+// is used to access the type feedback for a certain {Node}.
+class VectorSlotPair {
public:
- CallFunctionParameters(size_t arity, CallFunctionFlags flags)
- : arity_(arity), flags_(flags) {}
+ VectorSlotPair();
+ VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+ : vector_(vector), slot_(slot) {}
- size_t arity() const { return arity_; }
- CallFunctionFlags flags() const { return flags_; }
+ bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
+
+ Handle<TypeFeedbackVector> vector() const { return vector_; }
+ FeedbackVectorSlot slot() const { return slot_; }
+
+ int index() const;
private:
- const size_t arity_;
- const CallFunctionFlags flags_;
+ const Handle<TypeFeedbackVector> vector_;
+ const FeedbackVectorSlot slot_;
};
-bool operator==(CallFunctionParameters const&, CallFunctionParameters const&);
-bool operator!=(CallFunctionParameters const&, CallFunctionParameters const&);
+bool operator==(VectorSlotPair const&, VectorSlotPair const&);
+bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
+
+size_t hash_value(VectorSlotPair const&);
+
+
+// The ConvertReceiverMode is used as parameter by JSConvertReceiver operators.
+ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
+
+
+// The ToBooleanHints are used as parameter by JSToBoolean operators.
+ToBooleanHints ToBooleanHintsOf(Operator const* op);
+
+
+// Defines whether tail call optimization is allowed.
+enum class TailCallMode : unsigned { kAllow, kDisallow };
+
+size_t hash_value(TailCallMode);
+
+std::ostream& operator<<(std::ostream&, TailCallMode);
+
+
+// Defines the language mode and hints for a JavaScript binary operations.
+// This is used as parameter by JSAdd, JSSubtract, etc. operators.
+class BinaryOperationParameters final {
+ public:
+ BinaryOperationParameters(LanguageMode language_mode,
+ BinaryOperationHints hints)
+ : language_mode_(language_mode), hints_(hints) {}
+
+ LanguageMode language_mode() const { return language_mode_; }
+ BinaryOperationHints hints() const { return hints_; }
+
+ private:
+ LanguageMode const language_mode_;
+ BinaryOperationHints const hints_;
+};
+
+bool operator==(BinaryOperationParameters const&,
+ BinaryOperationParameters const&);
+bool operator!=(BinaryOperationParameters const&,
+ BinaryOperationParameters const&);
+
+size_t hash_value(BinaryOperationParameters const&);
+
+std::ostream& operator<<(std::ostream&, BinaryOperationParameters const&);
+
+BinaryOperationParameters const& BinaryOperationParametersOf(Operator const*);
+
+
+// Defines the arity and the feedback for a JavaScript constructor call. This is
+// used as a parameter by JSCallConstruct operators.
+class CallConstructParameters final {
+ public:
+ CallConstructParameters(size_t arity, VectorSlotPair const& feedback)
+ : arity_(arity), feedback_(feedback) {}
+
+ size_t arity() const { return arity_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ size_t const arity_;
+ VectorSlotPair const feedback_;
+};
+
+bool operator==(CallConstructParameters const&, CallConstructParameters const&);
+bool operator!=(CallConstructParameters const&, CallConstructParameters const&);
+
+size_t hash_value(CallConstructParameters const&);
+
+std::ostream& operator<<(std::ostream&, CallConstructParameters const&);
+
+CallConstructParameters const& CallConstructParametersOf(Operator const*);
+
+
+// Defines the arity and the call flags for a JavaScript function call. This is
+// used as a parameter by JSCallFunction operators.
+class CallFunctionParameters final {
+ public:
+ CallFunctionParameters(size_t arity, LanguageMode language_mode,
+ VectorSlotPair const& feedback,
+ TailCallMode tail_call_mode,
+ ConvertReceiverMode convert_mode)
+ : bit_field_(ArityField::encode(arity) |
+ ConvertReceiverModeField::encode(convert_mode) |
+ LanguageModeField::encode(language_mode) |
+ TailCallModeField::encode(tail_call_mode)),
+ feedback_(feedback) {}
+
+ size_t arity() const { return ArityField::decode(bit_field_); }
+ LanguageMode language_mode() const {
+ return LanguageModeField::decode(bit_field_);
+ }
+ ConvertReceiverMode convert_mode() const {
+ return ConvertReceiverModeField::decode(bit_field_);
+ }
+ TailCallMode tail_call_mode() const {
+ return TailCallModeField::decode(bit_field_);
+ }
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ bool operator==(CallFunctionParameters const& that) const {
+ return this->bit_field_ == that.bit_field_ &&
+ this->feedback_ == that.feedback_;
+ }
+ bool operator!=(CallFunctionParameters const& that) const {
+ return !(*this == that);
+ }
+
+ private:
+ friend size_t hash_value(CallFunctionParameters const& p) {
+ return base::hash_combine(p.bit_field_, p.feedback_);
+ }
+
+ typedef BitField<size_t, 0, 27> ArityField;
+ typedef BitField<ConvertReceiverMode, 27, 2> ConvertReceiverModeField;
+ typedef BitField<LanguageMode, 29, 2> LanguageModeField;
+ typedef BitField<TailCallMode, 31, 1> TailCallModeField;
+
+ const uint32_t bit_field_;
+ const VectorSlotPair feedback_;
+};
size_t hash_value(CallFunctionParameters const&);
@@ -44,7 +169,7 @@
// Defines the arity and the ID for a runtime function call. This is used as a
// parameter by JSCallRuntime operators.
-class CallRuntimeParameters FINAL {
+class CallRuntimeParameters final {
public:
CallRuntimeParameters(Runtime::FunctionId id, size_t arity)
: id_(id), arity_(arity) {}
@@ -70,7 +195,7 @@
// Defines the location of a context slot relative to a specific scope. This is
// used as a parameter by JSLoadContext and JSStoreContext operators and allows
// accessing a context-allocated variable without keeping track of the scope.
-class ContextAccess FINAL {
+class ContextAccess final {
public:
ContextAccess(size_t depth, size_t index, bool immutable);
@@ -96,106 +221,253 @@
ContextAccess const& ContextAccessOf(Operator const*);
-class VectorSlotPair {
+// Defines the name for a dynamic variable lookup. This is used as a parameter
+// by JSLoadDynamic and JSStoreDynamic operators.
+class DynamicAccess final {
public:
- VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
- : vector_(vector), slot_(slot) {}
+ DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode);
- Handle<TypeFeedbackVector> vector() const { return vector_; }
- FeedbackVectorICSlot slot() const { return slot_; }
-
- int index() const { return vector_->GetIndex(slot_); }
+ const Handle<String>& name() const { return name_; }
+ TypeofMode typeof_mode() const { return typeof_mode_; }
private:
- const Handle<TypeFeedbackVector> vector_;
- const FeedbackVectorICSlot slot_;
+ const Handle<String> name_;
+ const TypeofMode typeof_mode_;
};
+size_t hash_value(DynamicAccess const&);
-bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs);
+bool operator==(DynamicAccess const&, DynamicAccess const&);
+bool operator!=(DynamicAccess const&, DynamicAccess const&);
+
+std::ostream& operator<<(std::ostream&, DynamicAccess const&);
+
+DynamicAccess const& DynamicAccessOf(Operator const*);
+
+
+// Defines the property of an object for a named access. This is
+// used as a parameter by the JSLoadNamed and JSStoreNamed operators.
+class NamedAccess final {
+ public:
+ NamedAccess(LanguageMode language_mode, Handle<Name> name,
+ VectorSlotPair const& feedback)
+ : name_(name), feedback_(feedback), language_mode_(language_mode) {}
+
+ Handle<Name> name() const { return name_; }
+ LanguageMode language_mode() const { return language_mode_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ Handle<Name> const name_;
+ VectorSlotPair const feedback_;
+ LanguageMode const language_mode_;
+};
+
+bool operator==(NamedAccess const&, NamedAccess const&);
+bool operator!=(NamedAccess const&, NamedAccess const&);
+
+size_t hash_value(NamedAccess const&);
+
+std::ostream& operator<<(std::ostream&, NamedAccess const&);
+
+const NamedAccess& NamedAccessOf(const Operator* op);
// Defines the property being loaded from an object by a named load. This is
-// used as a parameter by JSLoadNamed operators.
-class LoadNamedParameters FINAL {
+// used as a parameter by JSLoadGlobal operator.
+class LoadGlobalParameters final {
public:
- LoadNamedParameters(const Unique<Name>& name, const VectorSlotPair& feedback,
- ContextualMode contextual_mode)
- : name_(name), contextual_mode_(contextual_mode), feedback_(feedback) {}
+ LoadGlobalParameters(const Handle<Name>& name, const VectorSlotPair& feedback,
+ TypeofMode typeof_mode)
+ : name_(name), feedback_(feedback), typeof_mode_(typeof_mode) {}
- const Unique<Name>& name() const { return name_; }
- ContextualMode contextual_mode() const { return contextual_mode_; }
+ const Handle<Name>& name() const { return name_; }
+ TypeofMode typeof_mode() const { return typeof_mode_; }
const VectorSlotPair& feedback() const { return feedback_; }
private:
- const Unique<Name> name_;
- const ContextualMode contextual_mode_;
+ const Handle<Name> name_;
const VectorSlotPair feedback_;
+ const TypeofMode typeof_mode_;
};
-bool operator==(LoadNamedParameters const&, LoadNamedParameters const&);
-bool operator!=(LoadNamedParameters const&, LoadNamedParameters const&);
+bool operator==(LoadGlobalParameters const&, LoadGlobalParameters const&);
+bool operator!=(LoadGlobalParameters const&, LoadGlobalParameters const&);
-size_t hash_value(LoadNamedParameters const&);
+size_t hash_value(LoadGlobalParameters const&);
-std::ostream& operator<<(std::ostream&, LoadNamedParameters const&);
+std::ostream& operator<<(std::ostream&, LoadGlobalParameters const&);
-const LoadNamedParameters& LoadNamedParametersOf(const Operator* op);
-
-
-// Defines the property being loaded from an object. This is
-// used as a parameter by JSLoadProperty operators.
-class LoadPropertyParameters FINAL {
- public:
- explicit LoadPropertyParameters(const VectorSlotPair& feedback)
- : feedback_(feedback) {}
-
- const VectorSlotPair& feedback() const { return feedback_; }
-
- private:
- const VectorSlotPair feedback_;
-};
-
-bool operator==(LoadPropertyParameters const&, LoadPropertyParameters const&);
-bool operator!=(LoadPropertyParameters const&, LoadPropertyParameters const&);
-
-size_t hash_value(LoadPropertyParameters const&);
-
-std::ostream& operator<<(std::ostream&, LoadPropertyParameters const&);
-
-const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op);
+const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op);
// Defines the property being stored to an object by a named store. This is
-// used as a parameter by JSStoreNamed operators.
-class StoreNamedParameters FINAL {
+// used as a parameter by JSStoreGlobal operator.
+class StoreGlobalParameters final {
public:
- StoreNamedParameters(StrictMode strict_mode, const Unique<Name>& name)
- : strict_mode_(strict_mode), name_(name) {}
+ StoreGlobalParameters(LanguageMode language_mode,
+ const VectorSlotPair& feedback,
+ const Handle<Name>& name)
+ : language_mode_(language_mode), name_(name), feedback_(feedback) {}
- StrictMode strict_mode() const { return strict_mode_; }
- const Unique<Name>& name() const { return name_; }
+ LanguageMode language_mode() const { return language_mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+ const Handle<Name>& name() const { return name_; }
private:
- const StrictMode strict_mode_;
- const Unique<Name> name_;
+ const LanguageMode language_mode_;
+ const Handle<Name> name_;
+ const VectorSlotPair feedback_;
};
-bool operator==(StoreNamedParameters const&, StoreNamedParameters const&);
-bool operator!=(StoreNamedParameters const&, StoreNamedParameters const&);
+bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&);
+bool operator!=(StoreGlobalParameters const&, StoreGlobalParameters const&);
-size_t hash_value(StoreNamedParameters const&);
+size_t hash_value(StoreGlobalParameters const&);
-std::ostream& operator<<(std::ostream&, StoreNamedParameters const&);
+std::ostream& operator<<(std::ostream&, StoreGlobalParameters const&);
-const StoreNamedParameters& StoreNamedParametersOf(const Operator* op);
+const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op);
+
+
+// Defines the property of an object for a keyed access. This is used
+// as a parameter by the JSLoadProperty and JSStoreProperty operators.
+class PropertyAccess final {
+ public:
+ PropertyAccess(LanguageMode language_mode, VectorSlotPair const& feedback)
+ : feedback_(feedback), language_mode_(language_mode) {}
+
+ LanguageMode language_mode() const { return language_mode_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ VectorSlotPair const feedback_;
+ LanguageMode const language_mode_;
+};
+
+bool operator==(PropertyAccess const&, PropertyAccess const&);
+bool operator!=(PropertyAccess const&, PropertyAccess const&);
+
+size_t hash_value(PropertyAccess const&);
+
+std::ostream& operator<<(std::ostream&, PropertyAccess const&);
+
+PropertyAccess const& PropertyAccessOf(const Operator* op);
+
+
+// Defines specifics about arguments object or rest parameter creation. This is
+// used as a parameter by JSCreateArguments operators.
+class CreateArgumentsParameters final {
+ public:
+ enum Type { kMappedArguments, kUnmappedArguments, kRestArray };
+ CreateArgumentsParameters(Type type, int start_index)
+ : type_(type), start_index_(start_index) {}
+
+ Type type() const { return type_; }
+ int start_index() const { return start_index_; }
+
+ private:
+ const Type type_;
+ const int start_index_;
+};
+
+bool operator==(CreateArgumentsParameters const&,
+ CreateArgumentsParameters const&);
+bool operator!=(CreateArgumentsParameters const&,
+ CreateArgumentsParameters const&);
+
+size_t hash_value(CreateArgumentsParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateArgumentsParameters const&);
+
+const CreateArgumentsParameters& CreateArgumentsParametersOf(
+ const Operator* op);
+
+
+// Defines shared information for the array that should be created. This is
+// used as parameter by JSCreateArray operators.
+class CreateArrayParameters final {
+ public:
+ explicit CreateArrayParameters(size_t arity, Handle<AllocationSite> site)
+ : arity_(arity), site_(site) {}
+
+ size_t arity() const { return arity_; }
+ Handle<AllocationSite> site() const { return site_; }
+
+ private:
+ size_t const arity_;
+ Handle<AllocationSite> const site_;
+};
+
+bool operator==(CreateArrayParameters const&, CreateArrayParameters const&);
+bool operator!=(CreateArrayParameters const&, CreateArrayParameters const&);
+
+size_t hash_value(CreateArrayParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateArrayParameters const&);
+
+const CreateArrayParameters& CreateArrayParametersOf(const Operator* op);
+
+
+// Defines shared information for the closure that should be created. This is
+// used as a parameter by JSCreateClosure operators.
+class CreateClosureParameters final {
+ public:
+ CreateClosureParameters(Handle<SharedFunctionInfo> shared_info,
+ PretenureFlag pretenure)
+ : shared_info_(shared_info), pretenure_(pretenure) {}
+
+ Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+ PretenureFlag pretenure() const { return pretenure_; }
+
+ private:
+ const Handle<SharedFunctionInfo> shared_info_;
+ const PretenureFlag pretenure_;
+};
+
+bool operator==(CreateClosureParameters const&, CreateClosureParameters const&);
+bool operator!=(CreateClosureParameters const&, CreateClosureParameters const&);
+
+size_t hash_value(CreateClosureParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateClosureParameters const&);
+
+const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
+
+
+// Defines shared information for the literal that should be created. This is
+// used as parameter by JSCreateLiteralArray, JSCreateLiteralObject and
+// JSCreateLiteralRegExp operators.
+class CreateLiteralParameters final {
+ public:
+ CreateLiteralParameters(Handle<HeapObject> constant, int flags, int index)
+ : constant_(constant), flags_(flags), index_(index) {}
+
+ Handle<HeapObject> constant() const { return constant_; }
+ int flags() const { return flags_; }
+ int index() const { return index_; }
+
+ private:
+ Handle<HeapObject> const constant_;
+ int const flags_;
+ int const index_;
+};
+
+bool operator==(CreateLiteralParameters const&, CreateLiteralParameters const&);
+bool operator!=(CreateLiteralParameters const&, CreateLiteralParameters const&);
+
+size_t hash_value(CreateLiteralParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
+
+const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
// graphs.
-class JSOperatorBuilder FINAL : public ZoneObject {
+class JSOperatorBuilder final : public ZoneObject {
public:
explicit JSOperatorBuilder(Zone* zone);
@@ -203,24 +475,33 @@
const Operator* NotEqual();
const Operator* StrictEqual();
const Operator* StrictNotEqual();
- const Operator* LessThan();
- const Operator* GreaterThan();
- const Operator* LessThanOrEqual();
- const Operator* GreaterThanOrEqual();
- const Operator* BitwiseOr();
- const Operator* BitwiseXor();
- const Operator* BitwiseAnd();
- const Operator* ShiftLeft();
- const Operator* ShiftRight();
- const Operator* ShiftRightLogical();
- const Operator* Add();
- const Operator* Subtract();
- const Operator* Multiply();
- const Operator* Divide();
- const Operator* Modulus();
+ const Operator* LessThan(LanguageMode language_mode);
+ const Operator* GreaterThan(LanguageMode language_mode);
+ const Operator* LessThanOrEqual(LanguageMode language_mode);
+ const Operator* GreaterThanOrEqual(LanguageMode language_mode);
+ const Operator* BitwiseOr(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* BitwiseXor(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* BitwiseAnd(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* ShiftLeft(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* ShiftRight(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* ShiftRightLogical(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Add(LanguageMode language_mode, BinaryOperationHints hints);
+ const Operator* Subtract(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Multiply(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Divide(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Modulus(LanguageMode language_mode,
+ BinaryOperationHints hints);
- const Operator* UnaryNot();
- const Operator* ToBoolean();
+ const Operator* ToBoolean(ToBooleanHints hints);
const Operator* ToNumber();
const Operator* ToString();
const Operator* ToName();
@@ -228,38 +509,75 @@
const Operator* Yield();
const Operator* Create();
+ const Operator* CreateArguments(CreateArgumentsParameters::Type type,
+ int start_index);
+ const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
+ const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
+ PretenureFlag pretenure);
+ const Operator* CreateIterResultObject();
+ const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
+ int literal_flags, int literal_index);
+ const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
+ int literal_flags, int literal_index);
+ const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
+ int literal_flags, int literal_index);
- const Operator* CallFunction(size_t arity, CallFunctionFlags flags);
+ const Operator* CallFunction(
+ size_t arity, LanguageMode language_mode,
+ VectorSlotPair const& feedback = VectorSlotPair(),
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
+ const Operator* CallConstruct(size_t arity, VectorSlotPair const& feedback);
- const Operator* CallConstruct(int arguments);
+ const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
- const Operator* LoadProperty(const VectorSlotPair& feedback);
- const Operator* LoadNamed(const Unique<Name>& name,
- const VectorSlotPair& feedback,
- ContextualMode contextual_mode = NOT_CONTEXTUAL);
+ const Operator* LoadProperty(LanguageMode language_mode,
+ VectorSlotPair const& feedback);
+ const Operator* LoadNamed(LanguageMode language_mode, Handle<Name> name,
+ VectorSlotPair const& feedback);
- const Operator* StoreProperty(StrictMode strict_mode);
- const Operator* StoreNamed(StrictMode strict_mode, const Unique<Name>& name);
+ const Operator* StoreProperty(LanguageMode language_mode,
+ VectorSlotPair const& feedback);
+ const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name,
+ VectorSlotPair const& feedback);
- const Operator* DeleteProperty(StrictMode strict_mode);
+ const Operator* DeleteProperty(LanguageMode language_mode);
const Operator* HasProperty();
+ const Operator* LoadGlobal(const Handle<Name>& name,
+ const VectorSlotPair& feedback,
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+ const Operator* StoreGlobal(LanguageMode language_mode,
+ const Handle<Name>& name,
+ const VectorSlotPair& feedback);
+
const Operator* LoadContext(size_t depth, size_t index, bool immutable);
const Operator* StoreContext(size_t depth, size_t index);
+ const Operator* LoadDynamic(const Handle<String>& name,
+ TypeofMode typeof_mode);
+
const Operator* TypeOf();
const Operator* InstanceOf();
- const Operator* Debugger();
- // TODO(titzer): nail down the static parts of each of these context flavors.
- const Operator* CreateFunctionContext();
- const Operator* CreateCatchContext(const Unique<String>& name);
+ const Operator* ForInDone();
+ const Operator* ForInNext();
+ const Operator* ForInPrepare();
+ const Operator* ForInStep();
+
+ const Operator* LoadMessage();
+ const Operator* StoreMessage();
+
+ const Operator* StackCheck();
+
+ const Operator* CreateFunctionContext(int slot_count);
+ const Operator* CreateCatchContext(const Handle<String>& name);
const Operator* CreateWithContext();
- const Operator* CreateBlockContext();
+ const Operator* CreateBlockContext(const Handle<ScopeInfo>& scpope_info);
const Operator* CreateModuleContext();
- const Operator* CreateScriptContext();
+ const Operator* CreateScriptContext(const Handle<ScopeInfo>& scpope_info);
private:
Zone* zone() const { return zone_; }
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 7618375..5e0712a 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -2,82 +2,143 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-typed-lowering.h"
-#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/state-values-utils.h"
+#include "src/type-cache.h"
#include "src/types.h"
namespace v8 {
namespace internal {
namespace compiler {
-// TODO(turbofan): js-typed-lowering improvements possible
-// - immediately put in type bounds for all new nodes
-// - relax effects from generic but not-side-effecting operations
+namespace {
+// A helper class to construct inline allocations on the simplified operator
+// level. This keeps track of the effect chain for initial stores on a newly
+// allocated object and also provides helpers for commonly allocated objects.
+class AllocationBuilder final {
+ public:
+ AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
+ : jsgraph_(jsgraph),
+ allocation_(nullptr),
+ effect_(effect),
+ control_(control) {}
-// Relax the effects of {node} by immediately replacing effect uses of {node}
-// with the effect input to {node}.
-// TODO(turbofan): replace the effect input to {node} with {graph->start()}.
-// TODO(titzer): move into a GraphEditor?
-static void RelaxEffects(Node* node) {
- NodeProperties::ReplaceWithValue(node, node, NULL);
-}
-
-
-JSTypedLowering::JSTypedLowering(JSGraph* jsgraph, Zone* zone)
- : jsgraph_(jsgraph), simplified_(graph()->zone()), conversions_(zone) {
- Handle<Object> zero = factory()->NewNumber(0.0);
- Handle<Object> one = factory()->NewNumber(1.0);
- zero_range_ = Type::Range(zero, zero, graph()->zone());
- one_range_ = Type::Range(one, one, graph()->zone());
- Handle<Object> thirtyone = factory()->NewNumber(31.0);
- zero_thirtyone_range_ = Type::Range(zero, thirtyone, graph()->zone());
- // TODO(jarin): Can we have a correctification of the stupid type system?
- // These stupid work-arounds are just stupid!
- shifted_int32_ranges_[0] = Type::Signed32();
- if (SmiValuesAre31Bits()) {
- shifted_int32_ranges_[1] = Type::SignedSmall();
- for (size_t k = 2; k < arraysize(shifted_int32_ranges_); ++k) {
- Handle<Object> min = factory()->NewNumber(kMinInt / (1 << k));
- Handle<Object> max = factory()->NewNumber(kMaxInt / (1 << k));
- shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
- }
- } else {
- for (size_t k = 1; k < arraysize(shifted_int32_ranges_); ++k) {
- Handle<Object> min = factory()->NewNumber(kMinInt / (1 << k));
- Handle<Object> max = factory()->NewNumber(kMaxInt / (1 << k));
- shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
- }
+ // Primitive allocation of static size.
+ void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
+ effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
+ allocation_ =
+ graph()->NewNode(simplified()->Allocate(pretenure),
+ jsgraph()->Constant(size), effect_, control_);
+ effect_ = allocation_;
}
-}
+ // Primitive store into a field.
+ void Store(const FieldAccess& access, Node* value) {
+ effect_ = graph()->NewNode(simplified()->StoreField(access), allocation_,
+ value, effect_, control_);
+ }
-Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
- NodeProperties::ReplaceWithValue(old, node, node);
- return Changed(node);
-}
+ // Primitive store into an element.
+ void Store(ElementAccess const& access, Node* index, Node* value) {
+ effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
+ index, value, effect_, control_);
+ }
+
+ // Compound allocation of a FixedArray.
+ void AllocateArray(int length, Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED) {
+ DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
+ map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+ int size = (map->instance_type() == FIXED_ARRAY_TYPE)
+ ? FixedArray::SizeFor(length)
+ : FixedDoubleArray::SizeFor(length);
+ Allocate(size, pretenure);
+ Store(AccessBuilder::ForMap(), map);
+ Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
+ }
+
+ // Compound store of a constant into a field.
+ void Store(const FieldAccess& access, Handle<Object> value) {
+ Store(access, jsgraph()->Constant(value));
+ }
+
+ void FinishAndChange(Node* node) {
+ NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
+ node->ReplaceInput(0, allocation_);
+ node->ReplaceInput(1, effect_);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, common()->FinishRegion());
+ }
+
+ Node* Finish() {
+ return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
+ }
+
+ protected:
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph() { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() { return jsgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
+
+ private:
+ JSGraph* const jsgraph_;
+ Node* allocation_;
+ Node* effect_;
+ Node* control_;
+};
+
+} // namespace
// A helper class to simplify the process of reducing a single binop node with a
// JSOperator. This class manages the rewriting of context, control, and effect
// dependencies during lowering of a binop and contains numerous helper
// functions for matching the types of inputs to an operation.
-class JSBinopReduction FINAL {
+class JSBinopReduction final {
public:
JSBinopReduction(JSTypedLowering* lowering, Node* node)
- : lowering_(lowering),
- node_(node),
- left_type_(NodeProperties::GetBounds(node->InputAt(0)).upper),
- right_type_(NodeProperties::GetBounds(node->InputAt(1)).upper) {}
+ : lowering_(lowering), node_(node) {}
- void ConvertInputsToNumber() {
- node_->ReplaceInput(0, ConvertToNumber(left()));
- node_->ReplaceInput(1, ConvertToNumber(right()));
+ void ConvertInputsToNumber(Node* frame_state) {
+ // To convert the inputs to numbers, we have to provide frame states
+ // for lazy bailouts in the ToNumber conversions.
+ // We use a little hack here: we take the frame state before the binary
+ // operation and use it to construct the frame states for the conversion
+ // so that after the deoptimization, the binary operation IC gets
+ // already converted values from full code. This way we are sure that we
+ // will not re-do any of the side effects.
+
+ Node* left_input = nullptr;
+ Node* right_input = nullptr;
+ bool left_is_primitive = left_type()->Is(Type::PlainPrimitive());
+ bool right_is_primitive = right_type()->Is(Type::PlainPrimitive());
+ bool handles_exception = NodeProperties::IsExceptionalCall(node_);
+
+ if (!left_is_primitive && !right_is_primitive && handles_exception) {
+ ConvertBothInputsToNumber(&left_input, &right_input, frame_state);
+ } else {
+ left_input = left_is_primitive
+ ? ConvertPlainPrimitiveToNumber(left())
+ : ConvertSingleInputToNumber(
+ left(), CreateFrameStateForLeftInput(frame_state));
+ right_input = right_is_primitive
+ ? ConvertPlainPrimitiveToNumber(right())
+ : ConvertSingleInputToNumber(
+ right(), CreateFrameStateForRightInput(
+ frame_state, left_input));
+ }
+
+ node_->ReplaceInput(0, left_input);
+ node_->ReplaceInput(1, right_input);
}
void ConvertInputsToUI32(Signedness left_signedness,
@@ -86,29 +147,11 @@
node_->ReplaceInput(1, ConvertToUI32(right(), right_signedness));
}
- void ConvertInputsToString() {
- node_->ReplaceInput(0, ConvertToString(left()));
- node_->ReplaceInput(1, ConvertToString(right()));
- }
-
- // Convert inputs for bitwise shift operation (ES5 spec 11.7).
- void ConvertInputsForShift(Signedness left_signedness) {
- node_->ReplaceInput(0, ConvertToUI32(left(), left_signedness));
- Node* rnum = ConvertToUI32(right(), kUnsigned);
- Type* rnum_type = NodeProperties::GetBounds(rnum).upper;
- if (!rnum_type->Is(lowering_->zero_thirtyone_range_)) {
- rnum = graph()->NewNode(machine()->Word32And(), rnum,
- jsgraph()->Int32Constant(0x1F));
- }
- node_->ReplaceInput(1, rnum);
- }
-
void SwapInputs() {
Node* l = left();
Node* r = right();
node_->ReplaceInput(0, r);
node_->ReplaceInput(1, l);
- std::swap(left_type_, right_type_);
}
// Remove all effect and control inputs and outputs to this node and change
@@ -120,19 +163,19 @@
DCHECK_EQ(0, op->ControlInputCount());
DCHECK_EQ(2, op->ValueInputCount());
- // Remove the effects from the node, if any, and update its effect usages.
+ // Remove the effects from the node, and update its effect/control usages.
if (node_->op()->EffectInputCount() > 0) {
- RelaxEffects(node_);
+ lowering_->RelaxEffectsAndControls(node_);
}
// Remove the inputs corresponding to context, effect, and control.
NodeProperties::RemoveNonValueInputs(node_);
// Finally, update the operator to the new one.
- node_->set_op(op);
+ NodeProperties::ChangeOp(node_, op);
// TODO(jarin): Replace the explicit typing hack with a call to some method
// that encapsulates changing the operator and re-typing.
- Bounds const bounds = NodeProperties::GetBounds(node_);
- NodeProperties::SetBounds(node_, Bounds::NarrowUpper(bounds, type, zone()));
+ Type* node_type = NodeProperties::GetType(node_);
+ NodeProperties::SetType(node_, Type::Intersect(node_type, type, zone()));
if (invert) {
// Insert an boolean not to invert the value.
@@ -145,22 +188,61 @@
return lowering_->Changed(node_);
}
+ Reduction ChangeToStringComparisonOperator(const Operator* op,
+ bool invert = false) {
+ if (node_->op()->ControlInputCount() > 0) {
+ lowering_->RelaxControls(node_);
+ }
+ // String comparison operators need effect and control inputs, so copy them
+ // over.
+ Node* effect = NodeProperties::GetEffectInput(node_);
+ Node* control = NodeProperties::GetControlInput(node_);
+ node_->ReplaceInput(2, effect);
+ node_->ReplaceInput(3, control);
+
+ node_->TrimInputCount(4);
+ NodeProperties::ChangeOp(node_, op);
+
+ if (invert) {
+ // Insert a boolean-not to invert the value.
+ Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
+ node_->ReplaceUses(value);
+ // Note: ReplaceUses() smashes all uses, so smash it back here.
+ value->ReplaceInput(0, node_);
+ return lowering_->Replace(value);
+ }
+ return lowering_->Changed(node_);
+ }
+
Reduction ChangeToPureOperator(const Operator* op, Type* type) {
return ChangeToPureOperator(op, false, type);
}
- bool OneInputIs(Type* t) { return left_type_->Is(t) || right_type_->Is(t); }
-
- bool BothInputsAre(Type* t) {
- return left_type_->Is(t) && right_type_->Is(t);
+ // TODO(turbofan): Strong mode should be killed soonish!
+ bool IsStrong() const {
+ if (node_->opcode() == IrOpcode::kJSLessThan ||
+ node_->opcode() == IrOpcode::kJSLessThanOrEqual ||
+ node_->opcode() == IrOpcode::kJSGreaterThan ||
+ node_->opcode() == IrOpcode::kJSGreaterThanOrEqual) {
+ return is_strong(OpParameter<LanguageMode>(node_));
+ }
+ return is_strong(BinaryOperationParametersOf(node_->op()).language_mode());
}
+ bool LeftInputIs(Type* t) { return left_type()->Is(t); }
+
+ bool RightInputIs(Type* t) { return right_type()->Is(t); }
+
+ bool OneInputIs(Type* t) { return LeftInputIs(t) || RightInputIs(t); }
+
+ bool BothInputsAre(Type* t) { return LeftInputIs(t) && RightInputIs(t); }
+
bool OneInputCannotBe(Type* t) {
- return !left_type_->Maybe(t) || !right_type_->Maybe(t);
+ return !left_type()->Maybe(t) || !right_type()->Maybe(t);
}
bool NeitherInputCanBe(Type* t) {
- return !left_type_->Maybe(t) && !right_type_->Maybe(t);
+ return !left_type()->Maybe(t) && !right_type()->Maybe(t);
}
Node* effect() { return NodeProperties::GetEffectInput(node_); }
@@ -168,46 +250,161 @@
Node* context() { return NodeProperties::GetContextInput(node_); }
Node* left() { return NodeProperties::GetValueInput(node_, 0); }
Node* right() { return NodeProperties::GetValueInput(node_, 1); }
- Type* left_type() { return left_type_; }
- Type* right_type() { return right_type_; }
+ Type* left_type() { return NodeProperties::GetType(node_->InputAt(0)); }
+ Type* right_type() { return NodeProperties::GetType(node_->InputAt(1)); }
SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
Graph* graph() const { return lowering_->graph(); }
JSGraph* jsgraph() { return lowering_->jsgraph(); }
JSOperatorBuilder* javascript() { return lowering_->javascript(); }
MachineOperatorBuilder* machine() { return lowering_->machine(); }
+ CommonOperatorBuilder* common() { return jsgraph()->common(); }
Zone* zone() const { return graph()->zone(); }
private:
JSTypedLowering* lowering_; // The containing lowering instance.
Node* node_; // The original node.
- Type* left_type_; // Cache of the left input's type.
- Type* right_type_; // Cache of the right input's type.
- Node* ConvertToString(Node* node) {
- // Avoid introducing too many eager ToString() operations.
- Reduction reduced = lowering_->ReduceJSToStringInput(node);
- if (reduced.Changed()) return reduced.replacement();
- Node* n = graph()->NewNode(javascript()->ToString(), node, context(),
- effect(), control());
+ Node* CreateFrameStateForLeftInput(Node* frame_state) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+
+ if (state_info.bailout_id() == BailoutId::None()) {
+ // Dummy frame state => just leave it as is.
+ return frame_state;
+ }
+
+ // If the frame state is already the right one, just return it.
+ if (state_info.state_combine().kind() == OutputFrameStateCombine::kPokeAt &&
+ state_info.state_combine().GetOffsetToPokeAt() == 1) {
+ return frame_state;
+ }
+
+ // Here, we smash the result of the conversion into the slot just below
+ // the stack top. This is the slot that full code uses to store the
+ // left operand.
+ const Operator* op = jsgraph()->common()->FrameState(
+ state_info.bailout_id(), OutputFrameStateCombine::PokeAt(1),
+ state_info.function_info());
+
+ return graph()->NewNode(op,
+ frame_state->InputAt(kFrameStateParametersInput),
+ frame_state->InputAt(kFrameStateLocalsInput),
+ frame_state->InputAt(kFrameStateStackInput),
+ frame_state->InputAt(kFrameStateContextInput),
+ frame_state->InputAt(kFrameStateFunctionInput),
+ frame_state->InputAt(kFrameStateOuterStateInput));
+ }
+
+ Node* CreateFrameStateForRightInput(Node* frame_state, Node* converted_left) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+
+ if (state_info.bailout_id() == BailoutId::None()) {
+ // Dummy frame state => just leave it as is.
+ return frame_state;
+ }
+
+ // Create a frame state that stores the result of the operation to the
+ // top of the stack (i.e., the slot used for the right operand).
+ const Operator* op = jsgraph()->common()->FrameState(
+ state_info.bailout_id(), OutputFrameStateCombine::PokeAt(0),
+ state_info.function_info());
+
+ // Change the left operand {converted_left} on the expression stack.
+ Node* stack = frame_state->InputAt(2);
+ DCHECK_EQ(stack->opcode(), IrOpcode::kStateValues);
+ DCHECK_GE(stack->InputCount(), 2);
+
+ // TODO(jarin) Allocate in a local zone or a reusable buffer.
+ NodeVector new_values(stack->InputCount(), zone());
+ for (int i = 0; i < stack->InputCount(); i++) {
+ if (i == stack->InputCount() - 2) {
+ new_values[i] = converted_left;
+ } else {
+ new_values[i] = stack->InputAt(i);
+ }
+ }
+ Node* new_stack =
+ graph()->NewNode(stack->op(), stack->InputCount(), &new_values.front());
+
+ return graph()->NewNode(
+ op, frame_state->InputAt(kFrameStateParametersInput),
+ frame_state->InputAt(kFrameStateLocalsInput), new_stack,
+ frame_state->InputAt(kFrameStateContextInput),
+ frame_state->InputAt(kFrameStateFunctionInput),
+ frame_state->InputAt(kFrameStateOuterStateInput));
+ }
+
+ Node* ConvertPlainPrimitiveToNumber(Node* node) {
+ DCHECK(NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
+ // Avoid inserting too many eager ToNumber() operations.
+ Reduction const reduction = lowering_->ReduceJSToNumberInput(node);
+ if (reduction.Changed()) return reduction.replacement();
+ // TODO(jarin) Use PlainPrimitiveToNumber once we have it.
+ return graph()->NewNode(
+ javascript()->ToNumber(), node, jsgraph()->NoContextConstant(),
+ jsgraph()->EmptyFrameState(), graph()->start(), graph()->start());
+ }
+
+ Node* ConvertSingleInputToNumber(Node* node, Node* frame_state) {
+ DCHECK(!NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
+ Node* const n = graph()->NewNode(javascript()->ToNumber(), node, context(),
+ frame_state, effect(), control());
+ NodeProperties::ReplaceUses(node_, node_, node_, n, n);
update_effect(n);
return n;
}
- Node* ConvertToNumber(Node* node) {
- if (NodeProperties::GetBounds(node).upper->Is(Type::PlainPrimitive())) {
- return lowering_->ConvertToNumber(node);
+ void ConvertBothInputsToNumber(Node** left_result, Node** right_result,
+ Node* frame_state) {
+ Node* projections[2];
+
+ // Find {IfSuccess} and {IfException} continuations of the operation.
+ NodeProperties::CollectControlProjections(node_, projections, 2);
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(projections[1]);
+ Node* if_exception = projections[1];
+ Node* if_success = projections[0];
+
+ // Insert two ToNumber() operations that both potentially throw.
+ Node* left_state = CreateFrameStateForLeftInput(frame_state);
+ Node* left_conv =
+ graph()->NewNode(javascript()->ToNumber(), left(), context(),
+ left_state, effect(), control());
+ Node* left_success = graph()->NewNode(common()->IfSuccess(), left_conv);
+ Node* right_state = CreateFrameStateForRightInput(frame_state, left_conv);
+ Node* right_conv =
+ graph()->NewNode(javascript()->ToNumber(), right(), context(),
+ right_state, left_conv, left_success);
+ Node* left_exception =
+ graph()->NewNode(common()->IfException(hint), left_conv, left_conv);
+ Node* right_exception =
+ graph()->NewNode(common()->IfException(hint), right_conv, right_conv);
+ NodeProperties::ReplaceControlInput(if_success, right_conv);
+ update_effect(right_conv);
+
+ // Wire conversions to existing {IfException} continuation.
+ Node* exception_merge = if_exception;
+ Node* exception_value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ left_exception, right_exception, exception_merge);
+ Node* exception_effect =
+ graph()->NewNode(common()->EffectPhi(2), left_exception,
+ right_exception, exception_merge);
+ for (Edge edge : exception_merge->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) edge.UpdateTo(exception_effect);
+ if (NodeProperties::IsValueEdge(edge)) edge.UpdateTo(exception_value);
}
- Node* n = graph()->NewNode(javascript()->ToNumber(), node, context(),
- effect(), control());
- update_effect(n);
- return n;
+ NodeProperties::RemoveType(exception_merge);
+ exception_merge->ReplaceInput(0, left_exception);
+ exception_merge->ReplaceInput(1, right_exception);
+ NodeProperties::ChangeOp(exception_merge, common()->Merge(2));
+
+ *left_result = left_conv;
+ *right_result = right_conv;
}
Node* ConvertToUI32(Node* node, Signedness signedness) {
// Avoid introducing too many eager NumberToXXnt32() operations.
- node = ConvertToNumber(node);
- Type* type = NodeProperties::GetBounds(node).upper;
+ Type* type = NodeProperties::GetType(node);
if (signedness == kSigned) {
if (!type->Is(Type::Signed32())) {
node = graph()->NewNode(simplified()->NumberToInt32(), node);
@@ -227,127 +424,134 @@
};
+// TODO(turbofan): js-typed-lowering improvements possible
+// - immediately put in type bounds for all new nodes
+// - relax effects from generic but not-side-effecting operations
+
+
+JSTypedLowering::JSTypedLowering(Editor* editor,
+ CompilationDependencies* dependencies,
+ Flags flags, JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor),
+ dependencies_(dependencies),
+ flags_(flags),
+ jsgraph_(jsgraph),
+ true_type_(Type::Constant(factory()->true_value(), graph()->zone())),
+ false_type_(Type::Constant(factory()->false_value(), graph()->zone())),
+ the_hole_type_(
+ Type::Constant(factory()->the_hole_value(), graph()->zone())),
+ type_cache_(TypeCache::Get()) {
+ for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
+ double min = kMinInt / (1 << k);
+ double max = kMaxInt / (1 << k);
+ shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
+ }
+}
+
+
Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
// JSAdd(x:number, y:number) => NumberAdd(x, y)
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
- if (r.BothInputsAre(Type::Primitive()) &&
- r.NeitherInputCanBe(Type::StringOrReceiver())) {
+ if (r.NeitherInputCanBe(Type::StringOrReceiver()) && !r.IsStrong()) {
// JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
- r.ConvertInputsToNumber();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
-#if 0
- // TODO(turbofan): General ToNumber disabled for now because:
- // a) The inserted ToNumber operation screws up observability of valueOf.
- // b) Deoptimization at ToNumber doesn't have corresponding bailout id.
- Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
- if (r.NeitherInputCanBe(maybe_string)) {
- ...
- }
-#endif
-#if 0
- // TODO(turbofan): Lowering of StringAdd is disabled for now because:
- // a) The inserted ToString operation screws up valueOf vs. toString order.
- // b) Deoptimization at ToString doesn't have corresponding bailout id.
- // c) Our current StringAddStub is actually non-pure and requires context.
- if (r.OneInputIs(Type::String())) {
- // JSAdd(x:string, y:string) => StringAdd(x, y)
- // JSAdd(x:string, y) => StringAdd(x, ToString(y))
- // JSAdd(x, y:string) => StringAdd(ToString(x), y)
- r.ConvertInputsToString();
- return r.ChangeToPureOperator(simplified()->StringAdd());
- }
-#endif
- return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSBitwiseOr(Node* node) {
- JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Primitive()) || r.OneInputIs(zero_range_)) {
- // TODO(jarin): Propagate frame state input from non-primitive input node to
- // JSToNumber node.
- // TODO(titzer): some Smi bitwise operations don't really require going
- // all the way to int32, which can save tagging/untagging for some
- // operations
- // on some platforms.
- // TODO(turbofan): make this heuristic configurable for code size.
- r.ConvertInputsToUI32(kSigned, kSigned);
- return r.ChangeToPureOperator(machine()->Word32Or(), Type::Integral32());
+ if (r.BothInputsAre(Type::String())) {
+ // JSAdd(x:string, y:string) => CallStub[StringAdd](x, y)
+ Callable const callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node->op()));
+ node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
}
return NoChange();
}
-Reduction JSTypedLowering::ReduceJSMultiply(Node* node) {
+Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Primitive()) || r.OneInputIs(one_range_)) {
- // TODO(jarin): Propagate frame state input from non-primitive input node to
- // JSToNumber node.
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(simplified()->NumberMultiply(),
+ if (r.BothInputsAre(Type::Number())) {
+ // JSModulus(x:number, x:number) => NumberModulus(x, y)
+ return r.ChangeToPureOperator(simplified()->NumberModulus(),
Type::Number());
}
- // TODO(turbofan): relax/remove the effects of this operator in other cases.
return NoChange();
}
Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
const Operator* numberOp) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Primitive())) {
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(numberOp, Type::Number());
+ if (r.IsStrong() || numberOp == simplified()->NumberModulus()) {
+ if (r.BothInputsAre(Type::Number())) {
+ return r.ChangeToPureOperator(numberOp, Type::Number());
+ }
+ return NoChange();
}
-#if 0
- // TODO(turbofan): General ToNumber disabled for now because:
- // a) The inserted ToNumber operation screws up observability of valueOf.
- // b) Deoptimization at ToNumber doesn't have corresponding bailout id.
- if (r.OneInputIs(Type::Primitive())) {
- // If at least one input is a primitive, then insert appropriate conversions
- // to number and reduce this operator to the given numeric one.
- // TODO(turbofan): make this heuristic configurable for code size.
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(numberOp);
- }
-#endif
- // TODO(turbofan): relax/remove the effects of this operator in other cases.
- return NoChange();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
+ return r.ChangeToPureOperator(numberOp, Type::Number());
}
Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Primitive())) {
- // TODO(titzer): some Smi bitwise operations don't really require going
- // all the way to int32, which can save tagging/untagging for some
- // operations
- // on some platforms.
- // TODO(turbofan): make this heuristic configurable for code size.
- r.ConvertInputsToUI32(kSigned, kSigned);
- return r.ChangeToPureOperator(intOp, Type::Integral32());
+ if (r.IsStrong()) {
+ if (r.BothInputsAre(Type::Number())) {
+ r.ConvertInputsToUI32(kSigned, kSigned);
+ return r.ChangeToPureOperator(intOp, Type::Integral32());
+ }
+ return NoChange();
}
- return NoChange();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
+ r.ConvertInputsToUI32(kSigned, kSigned);
+ return r.ChangeToPureOperator(intOp, Type::Integral32());
}
Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
Signedness left_signedness,
const Operator* shift_op) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Primitive())) {
- r.ConvertInputsForShift(left_signedness);
- return r.ChangeToPureOperator(shift_op, Type::Integral32());
+ if (r.IsStrong()) {
+ if (r.BothInputsAre(Type::Number())) {
+ r.ConvertInputsToUI32(left_signedness, kUnsigned);
+ return r.ChangeToPureOperator(shift_op);
+ }
+ return NoChange();
}
- return NoChange();
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
+ r.ConvertInputsToUI32(left_signedness, kUnsigned);
+ return r.ChangeToPureOperator(shift_op);
}
Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::String())) {
// If both inputs are definitely strings, perform a string comparison.
@@ -370,20 +574,10 @@
default:
return NoChange();
}
- return r.ChangeToPureOperator(stringOp);
+ r.ChangeToStringComparisonOperator(stringOp);
+ return Changed(node);
}
-#if 0
- // TODO(turbofan): General ToNumber disabled for now because:
- // a) The inserted ToNumber operation screws up observability of valueOf.
- // b) Deoptimization at ToNumber doesn't have corresponding bailout id.
- Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
- if (r.OneInputCannotBe(maybe_string)) {
- // If one input cannot be a string, then emit a number comparison.
- ...
- }
-#endif
- if (r.BothInputsAre(Type::Primitive()) &&
- r.OneInputCannotBe(Type::StringOrReceiver())) {
+ if (r.OneInputCannotBe(Type::StringOrReceiver())) {
const Operator* less_than;
const Operator* less_than_or_equal;
if (r.BothInputsAre(Type::Unsigned32())) {
@@ -394,7 +588,11 @@
less_than_or_equal = machine()->Int32LessThanOrEqual();
} else {
// TODO(turbofan): mixed signed/unsigned int32 comparisons.
- r.ConvertInputsToNumber();
+ if (r.IsStrong() && !r.BothInputsAre(Type::Number())) {
+ return NoChange();
+ }
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
less_than = simplified()->NumberLessThan();
less_than_or_equal = simplified()->NumberLessThanOrEqual();
}
@@ -425,40 +623,73 @@
Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
}
if (r.BothInputsAre(Type::String())) {
- return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+ return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
+ invert);
+ }
+ if (r.BothInputsAre(Type::Boolean())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
+ invert);
}
if (r.BothInputsAre(Type::Receiver())) {
return r.ChangeToPureOperator(
simplified()->ReferenceEqual(Type::Receiver()), invert);
}
- // TODO(turbofan): js-typed-lowering of Equal(undefined)
- // TODO(turbofan): js-typed-lowering of Equal(null)
- // TODO(turbofan): js-typed-lowering of Equal(boolean)
+ if (r.OneInputIs(Type::NullOrUndefined())) {
+ Callable const callable = CodeFactory::CompareNilIC(isolate(), kNullValue);
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ node->RemoveInput(r.LeftInputIs(Type::NullOrUndefined()) ? 0 : 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ if (invert) {
+ // Insert an boolean not to invert the value.
+ Node* value = graph()->NewNode(simplified()->BooleanNot(), node);
+ node->ReplaceUses(value);
+ // Note: ReplaceUses() smashes all uses, so smash it back here.
+ value->ReplaceInput(0, node);
+ return Replace(value);
+ }
+ return Changed(node);
+ }
return NoChange();
}
Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.left() == r.right()) {
// x === x is always true if x != NaN
if (!r.left_type()->Maybe(Type::NaN())) {
- return ReplaceEagerly(node, jsgraph()->BooleanConstant(!invert));
+ Node* replacement = jsgraph()->BooleanConstant(!invert);
+ ReplaceWithValue(node, replacement);
+ return Replace(replacement);
}
}
if (r.OneInputCannotBe(Type::NumberOrString())) {
// For values with canonical representation (i.e. not string nor number) an
// empty type intersection means the values cannot be strictly equal.
if (!r.left_type()->Maybe(r.right_type())) {
- return ReplaceEagerly(node, jsgraph()->BooleanConstant(invert));
+ Node* replacement = jsgraph()->BooleanConstant(invert);
+ ReplaceWithValue(node, replacement);
+ return Replace(replacement);
}
}
+ if (r.OneInputIs(the_hole_type_)) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(the_hole_type_),
+ invert);
+ }
if (r.OneInputIs(Type::Undefined())) {
return r.ChangeToPureOperator(
simplified()->ReferenceEqual(Type::Undefined()), invert);
@@ -479,8 +710,13 @@
return r.ChangeToPureOperator(
simplified()->ReferenceEqual(Type::Receiver()), invert);
}
+ if (r.BothInputsAre(Type::Unique())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Unique()),
+ invert);
+ }
if (r.BothInputsAre(Type::String())) {
- return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+ return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
+ invert);
}
if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
@@ -490,34 +726,35 @@
}
-Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
- Node* input = node->InputAt(0);
- Type* input_type = NodeProperties::GetBounds(input).upper;
+Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
+ Node* const input = node->InputAt(0);
+ Type* const input_type = NodeProperties::GetType(input);
+ Node* const effect = NodeProperties::GetEffectInput(node);
if (input_type->Is(Type::Boolean())) {
- // JSUnaryNot(x:boolean,context) => BooleanNot(x)
- node->set_op(simplified()->BooleanNot());
+ // JSToBoolean(x:boolean) => x
+ ReplaceWithValue(node, input, effect);
+ return Replace(input);
+ } else if (input_type->Is(Type::OrderedNumber())) {
+ // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, graph()->NewNode(simplified()->NumberEqual(), input,
+ jsgraph()->ZeroConstant()));
node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ } else if (input_type->Is(Type::String())) {
+ // JSToBoolean(x:string) => NumberLessThan(#0,x.length)
+ FieldAccess const access = AccessBuilder::ForStringLength();
+ Node* length = graph()->NewNode(simplified()->LoadField(access), input,
+ effect, graph()->start());
+ ReplaceWithValue(node, node, length);
+ node->ReplaceInput(0, jsgraph()->ZeroConstant());
+ node->ReplaceInput(1, length);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
return Changed(node);
}
- // JSUnaryNot(x,context) => BooleanNot(AnyToBoolean(x))
- node->set_op(simplified()->BooleanNot());
- node->ReplaceInput(0, graph()->NewNode(simplified()->AnyToBoolean(), input));
- node->TrimInputCount(1);
- return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
- Node* input = node->InputAt(0);
- Type* input_type = NodeProperties::GetBounds(input).upper;
- if (input_type->Is(Type::Boolean())) {
- // JSToBoolean(x:boolean,context) => x
- return Replace(input);
- }
- // JSToBoolean(x,context) => AnyToBoolean(x)
- node->set_op(simplified()->AnyToBoolean());
- node->TrimInputCount(1);
- return Changed(node);
+ return NoChange();
}
@@ -528,10 +765,23 @@
if (result.Changed()) return result;
return Changed(input); // JSToNumber(JSToNumber(x)) => JSToNumber(x)
}
+ // Check for ToNumber truncation of signaling NaN to undefined mapping.
+ if (input->opcode() == IrOpcode::kSelect) {
+ Node* check = NodeProperties::GetValueInput(input, 0);
+ Node* vtrue = NodeProperties::GetValueInput(input, 1);
+ Type* vtrue_type = NodeProperties::GetType(vtrue);
+ Node* vfalse = NodeProperties::GetValueInput(input, 2);
+ Type* vfalse_type = NodeProperties::GetType(vfalse);
+ if (vtrue_type->Is(Type::Undefined()) && vfalse_type->Is(Type::Number())) {
+ if (check->opcode() == IrOpcode::kNumberIsHoleNaN &&
+ check->InputAt(0) == vfalse) {
+ // JSToNumber(Select(NumberIsHoleNaN(x), y:undefined, x:number)) => x
+ return Replace(vfalse);
+ }
+ }
+ }
// Check if we have a cached conversion.
- Node* conversion = FindConversion<IrOpcode::kJSToNumber>(input);
- if (conversion) return Replace(conversion);
- Type* input_type = NodeProperties::GetBounds(input).upper;
+ Type* input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::Number())) {
// JSToNumber(x:number) => x
return Changed(input);
@@ -558,81 +808,24 @@
Node* const input = node->InputAt(0);
Reduction reduction = ReduceJSToNumberInput(input);
if (reduction.Changed()) {
- NodeProperties::ReplaceWithValue(node, reduction.replacement());
+ ReplaceWithValue(node, reduction.replacement());
return reduction;
}
- Type* const input_type = NodeProperties::GetBounds(input).upper;
+ Type* const input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::PlainPrimitive())) {
- if (input->opcode() == IrOpcode::kPhi) {
- // JSToNumber(phi(x1,...,xn,control):plain-primitive,context)
- // => phi(JSToNumber(x1,no-context),
- // ...,
- // JSToNumber(xn,no-context),control)
- int const input_count = input->InputCount() - 1;
- Node* const control = input->InputAt(input_count);
- DCHECK_LE(0, input_count);
- DCHECK(NodeProperties::IsControl(control));
- DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::Number()));
- DCHECK(!NodeProperties::GetBounds(input).upper->Is(Type::Number()));
- RelaxEffects(node);
- node->set_op(common()->Phi(kMachAnyTagged, input_count));
- for (int i = 0; i < input_count; ++i) {
- // We must be very careful not to introduce cycles when pushing
- // operations into phis. It is safe for {value}, since it appears
- // as input to the phi that we are replacing, but it's not safe
- // to simply reuse the context of the {node}. However, ToNumber()
- // does not require a context anyways, so it's safe to discard it
- // here and pass the dummy context.
- Node* const value = ConvertToNumber(input->InputAt(i));
- if (i < node->InputCount()) {
- node->ReplaceInput(i, value);
- } else {
- node->AppendInput(graph()->zone(), value);
- }
- }
- if (input_count < node->InputCount()) {
- node->ReplaceInput(input_count, control);
- } else {
- node->AppendInput(graph()->zone(), control);
- }
- node->TrimInputCount(input_count + 1);
- return Changed(node);
- }
- if (input->opcode() == IrOpcode::kSelect) {
- // JSToNumber(select(c,x1,x2):plain-primitive,context)
- // => select(c,JSToNumber(x1,no-context),JSToNumber(x2,no-context))
- int const input_count = input->InputCount();
- BranchHint const input_hint = SelectParametersOf(input->op()).hint();
- DCHECK_EQ(3, input_count);
- DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::Number()));
- DCHECK(!NodeProperties::GetBounds(input).upper->Is(Type::Number()));
- RelaxEffects(node);
- node->set_op(common()->Select(kMachAnyTagged, input_hint));
- node->ReplaceInput(0, input->InputAt(0));
- for (int i = 1; i < input_count; ++i) {
- // We must be very careful not to introduce cycles when pushing
- // operations into selects. It is safe for {value}, since it appears
- // as input to the select that we are replacing, but it's not safe
- // to simply reuse the context of the {node}. However, ToNumber()
- // does not require a context anyways, so it's safe to discard it
- // here and pass the dummy context.
- Node* const value = ConvertToNumber(input->InputAt(i));
- node->ReplaceInput(i, value);
- }
- node->TrimInputCount(input_count);
- return Changed(node);
- }
- // Remember this conversion.
- InsertConversion(node);
- if (node->InputAt(1) != jsgraph()->NoContextConstant() ||
- node->InputAt(2) != graph()->start() ||
- node->InputAt(3) != graph()->start()) {
+ if (NodeProperties::GetContextInput(node) !=
+ jsgraph()->NoContextConstant() ||
+ NodeProperties::GetEffectInput(node) != graph()->start() ||
+ NodeProperties::GetControlInput(node) != graph()->start()) {
// JSToNumber(x:plain-primitive,context,effect,control)
// => JSToNumber(x,no-context,start,start)
- RelaxEffects(node);
- node->ReplaceInput(1, jsgraph()->NoContextConstant());
- node->ReplaceInput(2, graph()->start());
- node->ReplaceInput(3, graph()->start());
+ RelaxEffectsAndControls(node);
+ NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
+ NodeProperties::ReplaceControlInput(node, graph()->start());
+ NodeProperties::ReplaceEffectInput(node, graph()->start());
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ NodeProperties::ReplaceFrameStateInput(node, 0,
+ jsgraph()->EmptyFrameState());
return Changed(node);
}
}
@@ -647,17 +840,22 @@
if (result.Changed()) return result;
return Changed(input); // JSToString(JSToString(x)) => JSToString(x)
}
- Type* input_type = NodeProperties::GetBounds(input).upper;
+ Type* input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::String())) {
return Changed(input); // JSToString(x:string) => x
}
+ if (input_type->Is(Type::Boolean())) {
+ return Replace(graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged), input,
+ jsgraph()->HeapConstant(factory()->true_string()),
+ jsgraph()->HeapConstant(factory()->false_string())));
+ }
if (input_type->Is(Type::Undefined())) {
return Replace(jsgraph()->HeapConstant(factory()->undefined_string()));
}
if (input_type->Is(Type::Null())) {
return Replace(jsgraph()->HeapConstant(factory()->null_string()));
}
- // TODO(turbofan): js-typed-lowering of ToString(x:boolean)
// TODO(turbofan): js-typed-lowering of ToString(x:number)
return NoChange();
}
@@ -668,50 +866,173 @@
Node* const input = node->InputAt(0);
Reduction reduction = ReduceJSToStringInput(input);
if (reduction.Changed()) {
- NodeProperties::ReplaceWithValue(node, reduction.replacement());
+ ReplaceWithValue(node, reduction.replacement());
return reduction;
}
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSToObject, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!receiver_type->Is(Type::Receiver())) {
+ // TODO(bmeurer/mstarzinger): Add support for lowering inside try blocks.
+ if (receiver_type->Maybe(Type::NullOrUndefined()) &&
+ NodeProperties::IsExceptionalCall(node)) {
+ // ToObject throws for null or undefined inputs.
+ return NoChange();
+ }
+
+ // Check whether {receiver} is a Smi.
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+
+ // Determine the instance type of {receiver}.
+ Node* receiver_map = efalse0 =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, efalse0, if_false0);
+ Node* receiver_instance_type = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ receiver_map, efalse0, if_false0);
+
+ // Check whether {receiver} is a spec object.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Node* check1 =
+ graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
+ receiver_instance_type);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check1, if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+
+ // Convert {receiver} using the ToObjectStub.
+ Node* if_convert =
+ graph()->NewNode(common()->Merge(2), if_true0, if_false1);
+ Node* econvert =
+ graph()->NewNode(common()->EffectPhi(2), etrue0, efalse1, if_convert);
+ Node* rconvert;
+ {
+ Callable callable = CodeFactory::ToObject(isolate());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ rconvert = econvert = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+ receiver, context, frame_state, econvert, if_convert);
+ }
+
+ // The {receiver} is already a spec object.
+ Node* if_done = if_true1;
+ Node* edone = etrue1;
+ Node* rdone = receiver;
+
+ control = graph()->NewNode(common()->Merge(2), if_convert, if_done);
+ effect = graph()->NewNode(common()->EffectPhi(2), econvert, edone, control);
+ receiver =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ rconvert, rdone, control);
+ }
+ ReplaceWithValue(node, receiver, effect, control);
+ return Changed(receiver);
+}
+
+
+Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Handle<Name> name = NamedAccessOf(node->op()).name();
+ // Optimize "length" property of strings.
+ if (name.is_identical_to(factory()->length_string()) &&
+ receiver_type->Is(Type::String())) {
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
+ effect, control);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+ // Optimize "prototype" property of functions.
+ if (name.is_identical_to(factory()->prototype_string()) &&
+ receiver_type->IsConstant() &&
+ receiver_type->AsConstant()->Value()->IsJSFunction()) {
+ // TODO(turbofan): This lowering might not kick in if we ever lower
+ // the C++ accessor for "prototype" in an earlier optimization pass.
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(receiver_type->AsConstant()->Value());
+ if (function->has_initial_map()) {
+ // We need to add a code dependency on the initial map of the {function}
+ // in order to be notified about changes to the "prototype" of {function},
+ // so it doesn't make sense to continue unless deoptimization is enabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ Node* value =
+ jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
+ return NoChange();
+}
+
+
Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
Node* key = NodeProperties::GetValueInput(node, 1);
Node* base = NodeProperties::GetValueInput(node, 0);
- Type* key_type = NodeProperties::GetBounds(key).upper;
- // TODO(mstarzinger): This lowering is not correct if:
- // a) The typed array or it's buffer is neutered.
- HeapObjectMatcher<Object> mbase(base);
- if (mbase.HasValue() && mbase.Value().handle()->IsJSTypedArray()) {
+ Type* key_type = NodeProperties::GetType(key);
+ HeapObjectMatcher mbase(base);
+ if (mbase.HasValue() && mbase.Value()->IsJSTypedArray()) {
Handle<JSTypedArray> const array =
- Handle<JSTypedArray>::cast(mbase.Value().handle());
- array->GetBuffer()->set_is_neuterable(false);
- BufferAccess const access(array->type());
- size_t const k = ElementSizeLog2Of(access.machine_type());
- double const byte_length = array->byte_length()->Number();
- CHECK_LT(k, arraysize(shifted_int32_ranges_));
- if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
- key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
- // JSLoadProperty(typed-array, int32)
- Handle<ExternalArray> elements =
- Handle<ExternalArray>::cast(handle(array->elements()));
- Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
- Node* length = jsgraph()->Constant(byte_length);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // Check if we can avoid the bounds check.
- if (key_type->Min() >= 0 && key_type->Max() < array->length()->Number()) {
- Node* load = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForTypedArrayElement(array->type(), true)),
- buffer, key, effect, control);
- return ReplaceEagerly(node, load);
+ Handle<JSTypedArray>::cast(mbase.Value());
+ if (!array->GetBuffer()->was_neutered()) {
+ array->GetBuffer()->set_is_neuterable(false);
+ BufferAccess const access(array->type());
+ size_t const k =
+ ElementSizeLog2Of(access.machine_type().representation());
+ double const byte_length = array->byte_length()->Number();
+ CHECK_LT(k, arraysize(shifted_int32_ranges_));
+ if (key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
+ // JSLoadProperty(typed-array, int32)
+ Handle<FixedTypedArrayBase> elements =
+ Handle<FixedTypedArrayBase>::cast(handle(array->elements()));
+ Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
+ Node* length = jsgraph()->Constant(byte_length);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ // Check if we can avoid the bounds check.
+ if (key_type->Min() >= 0 && key_type->Max() < array->length_value()) {
+ Node* load = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForTypedArrayElement(array->type(), true)),
+ buffer, key, effect, control);
+ ReplaceWithValue(node, load, load);
+ return Replace(load);
+ }
+ // Compute byte offset.
+ Node* offset = Word32Shl(key, static_cast<int>(k));
+ Node* load = graph()->NewNode(simplified()->LoadBuffer(access), buffer,
+ offset, length, effect, control);
+ ReplaceWithValue(node, load, load);
+ return Replace(load);
}
- // Compute byte offset.
- Node* offset = Word32Shl(key, static_cast<int>(k));
- Node* load = graph()->NewNode(simplified()->LoadBuffer(access), buffer,
- offset, length, effect, control);
- return ReplaceEagerly(node, load);
}
}
return NoChange();
@@ -722,95 +1043,264 @@
Node* key = NodeProperties::GetValueInput(node, 1);
Node* base = NodeProperties::GetValueInput(node, 0);
Node* value = NodeProperties::GetValueInput(node, 2);
- Type* key_type = NodeProperties::GetBounds(key).upper;
- Type* value_type = NodeProperties::GetBounds(value).upper;
- // TODO(mstarzinger): This lowering is not correct if:
- // a) The typed array or its buffer is neutered.
- HeapObjectMatcher<Object> mbase(base);
- if (mbase.HasValue() && mbase.Value().handle()->IsJSTypedArray()) {
+ Type* key_type = NodeProperties::GetType(key);
+ Type* value_type = NodeProperties::GetType(value);
+ HeapObjectMatcher mbase(base);
+ if (mbase.HasValue() && mbase.Value()->IsJSTypedArray()) {
Handle<JSTypedArray> const array =
- Handle<JSTypedArray>::cast(mbase.Value().handle());
- array->GetBuffer()->set_is_neuterable(false);
- BufferAccess const access(array->type());
- size_t const k = ElementSizeLog2Of(access.machine_type());
- double const byte_length = array->byte_length()->Number();
- CHECK_LT(k, arraysize(shifted_int32_ranges_));
- if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
- access.external_array_type() != kExternalUint8ClampedArray &&
- key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
- // JSLoadProperty(typed-array, int32)
- Handle<ExternalArray> elements =
- Handle<ExternalArray>::cast(handle(array->elements()));
- Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
- Node* length = jsgraph()->Constant(byte_length);
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // Convert to a number first.
- if (!value_type->Is(Type::Number())) {
- Reduction number_reduction = ReduceJSToNumberInput(value);
- if (number_reduction.Changed()) {
- value = number_reduction.replacement();
- } else {
- value = effect = graph()->NewNode(javascript()->ToNumber(), value,
- context, effect, control);
+ Handle<JSTypedArray>::cast(mbase.Value());
+ if (!array->GetBuffer()->was_neutered()) {
+ array->GetBuffer()->set_is_neuterable(false);
+ BufferAccess const access(array->type());
+ size_t const k =
+ ElementSizeLog2Of(access.machine_type().representation());
+ double const byte_length = array->byte_length()->Number();
+ CHECK_LT(k, arraysize(shifted_int32_ranges_));
+ if (access.external_array_type() != kExternalUint8ClampedArray &&
+ key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
+ // JSLoadProperty(typed-array, int32)
+ Handle<FixedTypedArrayBase> elements =
+ Handle<FixedTypedArrayBase>::cast(handle(array->elements()));
+ Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
+ Node* length = jsgraph()->Constant(byte_length);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ // Convert to a number first.
+ if (!value_type->Is(Type::Number())) {
+ Reduction number_reduction = ReduceJSToNumberInput(value);
+ if (number_reduction.Changed()) {
+ value = number_reduction.replacement();
+ } else {
+ Node* frame_state_for_to_number =
+ NodeProperties::GetFrameStateInput(node, 1);
+ value = effect =
+ graph()->NewNode(javascript()->ToNumber(), value, context,
+ frame_state_for_to_number, effect, control);
+ }
}
- }
- // For integer-typed arrays, convert to the integer type.
- if (TypeOf(access.machine_type()) == kTypeInt32 &&
- !value_type->Is(Type::Signed32())) {
- value = graph()->NewNode(simplified()->NumberToInt32(), value);
- } else if (TypeOf(access.machine_type()) == kTypeUint32 &&
- !value_type->Is(Type::Unsigned32())) {
- value = graph()->NewNode(simplified()->NumberToUint32(), value);
- }
- // Check if we can avoid the bounds check.
- if (key_type->Min() >= 0 && key_type->Max() < array->length()->Number()) {
- node->set_op(simplified()->StoreElement(
- AccessBuilder::ForTypedArrayElement(array->type(), true)));
+ // Check if we can avoid the bounds check.
+ if (key_type->Min() >= 0 && key_type->Max() < array->length_value()) {
+ RelaxControls(node);
+ node->ReplaceInput(0, buffer);
+ DCHECK_EQ(key, node->InputAt(1));
+ node->ReplaceInput(2, value);
+ node->ReplaceInput(3, effect);
+ node->ReplaceInput(4, control);
+ node->TrimInputCount(5);
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->StoreElement(
+ AccessBuilder::ForTypedArrayElement(array->type(), true)));
+ return Changed(node);
+ }
+ // Compute byte offset.
+ Node* offset = Word32Shl(key, static_cast<int>(k));
+ // Turn into a StoreBuffer operation.
+ RelaxControls(node);
node->ReplaceInput(0, buffer);
- DCHECK_EQ(key, node->InputAt(1));
- node->ReplaceInput(2, value);
- node->ReplaceInput(3, effect);
- node->ReplaceInput(4, control);
- node->TrimInputCount(5);
+ node->ReplaceInput(1, offset);
+ node->ReplaceInput(2, length);
+ node->ReplaceInput(3, value);
+ node->ReplaceInput(4, effect);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(node, simplified()->StoreBuffer(access));
return Changed(node);
}
- // Compute byte offset.
- Node* offset = Word32Shl(key, static_cast<int>(k));
- // Turn into a StoreBuffer operation.
- node->set_op(simplified()->StoreBuffer(access));
- node->ReplaceInput(0, buffer);
- node->ReplaceInput(1, offset);
- node->ReplaceInput(2, length);
- node->ReplaceInput(3, value);
- node->ReplaceInput(4, effect);
- DCHECK_EQ(control, node->InputAt(5));
- DCHECK_EQ(6, node->InputCount());
- return Changed(node);
}
}
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+
+ // If deoptimization is disabled, we cannot optimize.
+ if (!(flags() & kDeoptimizationEnabled) ||
+ (flags() & kDisableBinaryOpReduction)) {
+ return NoChange();
+ }
+
+ // If we are in a try block, don't optimize since the runtime call
+ // in the proxy case can throw.
+ if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
+ JSBinopReduction r(this, node);
+ Node* effect = r.effect();
+ Node* control = r.control();
+
+ if (!r.right_type()->IsConstant() ||
+ !r.right_type()->AsConstant()->Value()->IsJSFunction()) {
+ return NoChange();
+ }
+
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(r.right_type()->AsConstant()->Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ if (!function->IsConstructor() ||
+ function->map()->has_non_instance_prototype()) {
+ return NoChange();
+ }
+
+ JSFunction::EnsureHasInitialMap(function);
+ DCHECK(function->has_initial_map());
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ this->dependencies()->AssumeInitialMapCantChange(initial_map);
+ Node* prototype =
+ jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+
+ Node* if_is_smi = nullptr;
+ Node* e_is_smi = nullptr;
+ // If the left hand side is an object, no smi check is needed.
+ if (r.left_type()->Maybe(Type::TaggedSigned())) {
+ Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
+ Node* branch_is_smi =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
+ if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
+ e_is_smi = effect;
+ control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
+ }
+
+ Node* object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ r.left(), effect, control);
+
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+
+ Node* loop_effect = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+
+ Node* loop_object_map =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ object_map, r.left(), loop);
+
+ // Check if the lhs needs access checks.
+ Node* map_bit_field = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMapBitField()),
+ loop_object_map, loop_effect, control);
+ int is_access_check_needed_bit = 1 << Map::kIsAccessCheckNeeded;
+ Node* is_access_check_needed_num =
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), map_bit_field,
+ jsgraph()->Uint32Constant(is_access_check_needed_bit));
+ Node* is_access_check_needed =
+ graph()->NewNode(machine()->Word32Equal(), is_access_check_needed_num,
+ jsgraph()->Uint32Constant(is_access_check_needed_bit));
+
+ Node* branch_is_access_check_needed = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), is_access_check_needed, control);
+ Node* if_is_access_check_needed =
+ graph()->NewNode(common()->IfTrue(), branch_is_access_check_needed);
+ Node* e_is_access_check_needed = effect;
+
+ control =
+ graph()->NewNode(common()->IfFalse(), branch_is_access_check_needed);
+
+ // Check if the lhs is a proxy.
+ Node* map_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ loop_object_map, loop_effect, control);
+ Node* is_proxy = graph()->NewNode(machine()->Word32Equal(), map_instance_type,
+ jsgraph()->Uint32Constant(JS_PROXY_TYPE));
+ Node* branch_is_proxy =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), is_proxy, control);
+ Node* if_is_proxy = graph()->NewNode(common()->IfTrue(), branch_is_proxy);
+ Node* e_is_proxy = effect;
+
+
+ Node* runtime_has_in_proto_chain = control = graph()->NewNode(
+ common()->Merge(2), if_is_access_check_needed, if_is_proxy);
+ effect = graph()->NewNode(common()->EffectPhi(2), e_is_access_check_needed,
+ e_is_proxy, control);
+
+ // If we need an access check or the object is a Proxy, make a runtime call
+ // to finish the lowering.
+ Node* bool_result_runtime_has_in_proto_chain_case = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kHasInPrototypeChain, 2), r.left(),
+ prototype, context, frame_state, effect, control);
+
+ control = graph()->NewNode(common()->IfFalse(), branch_is_proxy);
+
+ Node* object_prototype = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()),
+ loop_object_map, loop_effect, control);
+
+ // Check if object prototype is equal to function prototype.
+ Node* eq_proto =
+ graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
+ object_prototype, prototype);
+ Node* branch_eq_proto =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), eq_proto, control);
+ Node* if_eq_proto = graph()->NewNode(common()->IfTrue(), branch_eq_proto);
+ Node* e_eq_proto = effect;
+
+ control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
+
+ // If not, check if object prototype is the null prototype.
+ Node* null_proto =
+ graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
+ object_prototype, jsgraph()->NullConstant());
+ Node* branch_null_proto = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), null_proto, control);
+ Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
+ Node* e_null_proto = effect;
+
+ control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
+ Node* load_object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ object_prototype, effect, control);
+ // Close the loop.
+ loop_effect->ReplaceInput(1, effect);
+ loop_object_map->ReplaceInput(1, load_object_map);
+ loop->ReplaceInput(1, control);
+
+ control = graph()->NewNode(common()->Merge(3), runtime_has_in_proto_chain,
+ if_eq_proto, if_null_proto);
+ effect = graph()->NewNode(common()->EffectPhi(3),
+ bool_result_runtime_has_in_proto_chain_case,
+ e_eq_proto, e_null_proto, control);
+
+ Node* result = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 3),
+ bool_result_runtime_has_in_proto_chain_case, jsgraph()->TrueConstant(),
+ jsgraph()->FalseConstant(), control);
+
+ if (if_is_smi != nullptr) {
+ DCHECK_NOT_NULL(e_is_smi);
+ control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
+ result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->FalseConstant(), result, control);
+ }
+
+ ReplaceWithValue(node, result, effect, control);
+ return Changed(result);
+}
+
+
Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = graph()->start();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = graph()->start();
for (size_t i = 0; i < access.depth(); ++i) {
- node->ReplaceInput(
- 0, graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
- NodeProperties::GetValueInput(node, 0), effect, control));
+ Node* previous = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+ NodeProperties::GetValueInput(node, 0), effect, control);
+ node->ReplaceInput(0, previous);
}
- node->set_op(
- simplified()->LoadField(AccessBuilder::ForContextSlot(access.index())));
node->ReplaceInput(1, effect);
node->ReplaceInput(2, control);
- DCHECK_EQ(3, node->InputCount());
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->LoadField(AccessBuilder::ForContextSlot(access.index())));
return Changed(node);
}
@@ -818,53 +1308,1276 @@
Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = graph()->start();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = graph()->start();
for (size_t i = 0; i < access.depth(); ++i) {
- node->ReplaceInput(
- 0, graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
- NodeProperties::GetValueInput(node, 0), effect, control));
+ Node* previous = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+ NodeProperties::GetValueInput(node, 0), effect, control);
+ node->ReplaceInput(0, previous);
}
- node->set_op(
- simplified()->StoreField(AccessBuilder::ForContextSlot(access.index())));
node->RemoveInput(2);
- DCHECK_EQ(4, node->InputCount());
+ node->ReplaceInput(2, effect);
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->StoreField(AccessBuilder::ForContextSlot(access.index())));
return Changed(node);
}
+Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConvertReceiver, node->opcode());
+ ConvertReceiverMode mode = ConvertReceiverModeOf(node->op());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* context = NodeProperties::GetContextInput(node);
+ Type* context_type = NodeProperties::GetType(context);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!receiver_type->Is(Type::Receiver())) {
+ if (receiver_type->Is(Type::NullOrUndefined()) ||
+ mode == ConvertReceiverMode::kNullOrUndefined) {
+ if (context_type->IsConstant()) {
+ Handle<JSObject> global_proxy(
+ Handle<Context>::cast(context_type->AsConstant()->Value())
+ ->global_proxy(),
+ isolate());
+ receiver = jsgraph()->Constant(global_proxy);
+ } else {
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ receiver = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
+ native_context, native_context, effect);
+ }
+ } else if (!receiver_type->Maybe(Type::NullOrUndefined()) ||
+ mode == ConvertReceiverMode::kNotNullOrUndefined) {
+ receiver = effect =
+ graph()->NewNode(javascript()->ToObject(), receiver, context,
+ frame_state, effect, control);
+ } else {
+ // Check {receiver} for undefined.
+ Node* check0 =
+ graph()->NewNode(simplified()->ReferenceEqual(receiver_type),
+ receiver, jsgraph()->UndefinedConstant());
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check0, control);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+
+ // Check {receiver} for null.
+ Node* check1 =
+ graph()->NewNode(simplified()->ReferenceEqual(receiver_type),
+ receiver, jsgraph()->NullConstant());
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+
+ // Convert {receiver} using ToObject.
+ Node* if_convert = if_false1;
+ Node* econvert = effect;
+ Node* rconvert;
+ {
+ rconvert = econvert =
+ graph()->NewNode(javascript()->ToObject(), receiver, context,
+ frame_state, econvert, if_convert);
+ }
+
+ // Replace {receiver} with global proxy of {context}.
+ Node* if_global =
+ graph()->NewNode(common()->Merge(2), if_true0, if_true1);
+ Node* eglobal = effect;
+ Node* rglobal;
+ {
+ if (context_type->IsConstant()) {
+ Handle<JSObject> global_proxy(
+ Handle<Context>::cast(context_type->AsConstant()->Value())
+ ->global_proxy(),
+ isolate());
+ rglobal = jsgraph()->Constant(global_proxy);
+ } else {
+ Node* native_context = eglobal = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, eglobal);
+ rglobal = eglobal = graph()->NewNode(
+ javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
+ native_context, native_context, eglobal);
+ }
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_convert, if_global);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), econvert, eglobal, control);
+ receiver =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ rconvert, rglobal, control);
+ }
+ }
+ ReplaceWithValue(node, receiver, effect, control);
+ return Changed(receiver);
+}
+
+
+namespace {
+
+// Maximum instance size for which allocations will be inlined.
+const int kMaxInlineInstanceSize = 64 * kPointerSize;
+
+
+// Checks whether allocation using the given constructor can be inlined.
+bool IsAllocationInlineable(Handle<JSFunction> constructor) {
+ // TODO(bmeurer): Further relax restrictions on inlining, i.e.
+ // instance type and maybe instance size (inobject properties
+ // are limited anyways by the runtime).
+ return constructor->has_initial_map() &&
+ constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
+ constructor->initial_map()->instance_size() < kMaxInlineInstanceSize;
+}
+
+} // namespace
+
+
+Reduction JSTypedLowering::ReduceJSCreate(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
+ Node* const target = NodeProperties::GetValueInput(node, 0);
+ Type* const target_type = NodeProperties::GetType(target);
+ Node* const new_target = NodeProperties::GetValueInput(node, 1);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ // TODO(turbofan): Add support for NewTarget passed to JSCreate.
+ if (target != new_target) return NoChange();
+ // Extract constructor function.
+ if (target_type->IsConstant() &&
+ target_type->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> constructor =
+ Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ DCHECK(constructor->IsConstructor());
+ // Force completion of inobject slack tracking before
+ // generating code to finalize the instance size.
+ constructor->CompleteInobjectSlackTrackingIfActive();
+
+ // TODO(bmeurer): We fall back to the runtime in case we cannot inline
+ // the allocation here, which is sort of expensive. We should think about
+ // a soft fallback to some NewObjectCodeStub.
+ if (IsAllocationInlineable(constructor)) {
+ // Compute instance size from initial map of {constructor}.
+ Handle<Map> initial_map(constructor->initial_map(), isolate());
+ int const instance_size = initial_map->instance_size();
+
+ // Add a dependency on the {initial_map} to make sure that this code is
+ // deoptimized whenever the {initial_map} of the {constructor} changes.
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+
+ // Emit code to allocate the JSObject instance for the {constructor}.
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(instance_size);
+ a.Store(AccessBuilder::ForMap(), initial_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ jsgraph()->UndefinedConstant());
+ }
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+namespace {
+
+// Retrieves the frame state holding actual argument values.
+Node* GetArgumentsFrameState(Node* frame_state) {
+ Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
+ return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
+ ? outer_state
+ : frame_state;
+}
+
+} // namespace
+
+
+Reduction JSTypedLowering::ReduceJSCreateArguments(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
+ CreateArgumentsParameters const& p = CreateArgumentsParametersOf(node->op());
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+
+ // Use the ArgumentsAccessStub for materializing both mapped and unmapped
+ // arguments object, but only for non-inlined (i.e. outermost) frames.
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ Isolate* isolate = jsgraph()->isolate();
+ int parameter_count = state_info.parameter_count() - 1;
+ int parameter_offset = parameter_count * kPointerSize;
+ int offset = StandardFrameConstants::kCallerSPOffset + parameter_offset;
+ Node* parameter_pointer = graph()->NewNode(
+ machine()->IntAdd(), graph()->NewNode(machine()->LoadFramePointer()),
+ jsgraph()->IntPtrConstant(offset));
+
+ if (p.type() != CreateArgumentsParameters::kRestArray) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ bool unmapped = p.type() == CreateArgumentsParameters::kUnmappedArguments;
+ Callable callable = CodeFactory::ArgumentsAccess(
+ isolate, unmapped, shared->has_duplicate_parameters());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(parameter_count));
+ node->InsertInput(graph()->zone(), 3, parameter_pointer);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ } else {
+ Callable callable = CodeFactory::RestArgumentsAccess(isolate);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->ReplaceInput(1, jsgraph()->Constant(parameter_count));
+ node->InsertInput(graph()->zone(), 2, parameter_pointer);
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(p.start_index()));
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+ } else if (outer_state->opcode() == IrOpcode::kFrameState) {
+ // Use inline allocation for all mapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ if (p.type() == CreateArgumentsParameters::kMappedArguments) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ Node* const callee = NodeProperties::GetValueInput(node, 0);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // TODO(mstarzinger): Duplicate parameters are not handled yet.
+ if (shared->has_duplicate_parameters()) return NoChange();
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by arguments object.
+ bool has_aliased_arguments = false;
+ Node* const elements = AllocateAliasedArguments(
+ effect, control, args_state, context, shared, &has_aliased_arguments);
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the arguments object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_arguments_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
+ : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ int length = args_state_info.parameter_count() - 1; // Minus receiver.
+ STATIC_ASSERT(Heap::kSloppyArgumentsObjectSize == 5 * kPointerSize);
+ a.Allocate(Heap::kSloppyArgumentsObjectSize);
+ a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+ a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ } else if (p.type() == CreateArgumentsParameters::kUnmappedArguments) {
+ // Use inline allocation for all unmapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by arguments object.
+ Node* const elements = AllocateArguments(effect, control, args_state);
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the arguments object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_arguments_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ Context::STRICT_ARGUMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ int length = args_state_info.parameter_count() - 1; // Minus receiver.
+ STATIC_ASSERT(Heap::kStrictArgumentsObjectSize == 4 * kPointerSize);
+ a.Allocate(Heap::kStrictArgumentsObjectSize);
+ a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ } else if (p.type() == CreateArgumentsParameters::kRestArray) {
+ // Use inline allocation for all unmapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by the rest array.
+ Node* const elements =
+ AllocateRestArguments(effect, control, args_state, p.start_index());
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the JSArray object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_jsarray_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the jsarray.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+ // -1 to minus receiver
+ int argument_count = args_state_info.parameter_count() - 1;
+ int length = std::max(0, argument_count - p.start_index());
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ a.Allocate(JSArray::kSize);
+ a.Store(AccessBuilder::ForMap(), load_jsarray_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
+ jsgraph()->Constant(length));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+ }
+
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceNewArray(Node* node, Node* length,
+ int capacity,
+ Handle<AllocationSite> site) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Extract transition and tenuring feedback from the {site} and add
+ // appropriate code dependencies on the {site} if deoptimization is
+ // enabled.
+ PretenureFlag pretenure = site->GetPretenureMode();
+ ElementsKind elements_kind = site->GetElementsKind();
+ DCHECK(IsFastElementsKind(elements_kind));
+ if (flags() & kDeoptimizationEnabled) {
+ dependencies()->AssumeTenuringDecision(site);
+ dependencies()->AssumeTransitionStable(site);
+ }
+
+ // Retrieve the initial map for the array from the appropriate native context.
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* js_array_map = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
+ native_context, native_context, effect);
+
+ // Setup elements and properties.
+ Node* elements;
+ if (capacity == 0) {
+ elements = jsgraph()->EmptyFixedArrayConstant();
+ } else {
+ elements = effect =
+ AllocateElements(effect, control, elements_kind, capacity, pretenure);
+ }
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+ // Perform the allocation of the actual JSArray object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSArray::kSize, pretenure);
+ a.Store(AccessBuilder::ForMap(), js_array_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateArray(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = NodeProperties::GetValueInput(node, 1);
+
+ // TODO(bmeurer): Optimize the subclassing case.
+ if (target != new_target) return NoChange();
+
+ // Check if we have a feedback {site} on the {node}.
+ Handle<AllocationSite> site = p.site();
+ if (p.site().is_null()) return NoChange();
+
+ // Attempt to inline calls to the Array constructor for the relevant cases
+ // where either no arguments are provided, or exactly one unsigned number
+ // argument is given.
+ if (site->CanInlineCall()) {
+ if (p.arity() == 0) {
+ Node* length = jsgraph()->ZeroConstant();
+ int capacity = JSArray::kPreallocatedArrayElements;
+ return ReduceNewArray(node, length, capacity, site);
+ } else if (p.arity() == 1) {
+ Node* length = NodeProperties::GetValueInput(node, 2);
+ Type* length_type = NodeProperties::GetType(length);
+ if (length_type->Is(type_cache_.kElementLoopUnrollType)) {
+ int capacity = static_cast<int>(length_type->Max());
+ return ReduceNewArray(node, length, capacity, site);
+ }
+ }
+ }
+
+ // Reduce {node} to the appropriate ArrayConstructorStub backend.
+ // Note that these stubs "behave" like JSFunctions, which means they
+ // expect a receiver on the stack, which they remove. We just push
+ // undefined for the receiver.
+ ElementsKind elements_kind = site->GetElementsKind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ if (p.arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ } else if (p.arity() == 1) {
+ // TODO(bmeurer): Optimize for the 0 length non-holey case?
+ ArraySingleArgumentConstructorStub stub(
+ isolate(), GetHoleyElementsKind(elements_kind), override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ } else {
+ int const arity = static_cast<int>(p.arity());
+ ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+ arity + 1, CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ }
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
+ CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+ Handle<SharedFunctionInfo> shared = p.shared_info();
+
+ // Use the FastNewClosureStub that allocates in new space only for nested
+ // functions that don't need literals cloning.
+ if (p.pretenure() == NOT_TENURED && shared->num_literals() == 0) {
+ Isolate* isolate = jsgraph()->isolate();
+ Callable callable = CodeFactory::FastNewClosure(
+ isolate, shared->language_mode(), shared->kind());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 1, jsgraph()->HeapConstant(shared));
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateIterResultObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* done = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Load the JSIteratorResult map for the {context}.
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* iterator_result_map = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
+ native_context, native_context, effect);
+
+ // Emit code to allocate the JSIteratorResult instance.
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(JSIteratorResult::kSize);
+ a.Store(AccessBuilder::ForMap(), iterator_result_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSIteratorResultValue(), value);
+ a.Store(AccessBuilder::ForJSIteratorResultDone(), done);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateLiteralArray, node->opcode());
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
+ int const length = constants->length();
+ int const flags = p.flags();
+
+ // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
+ // initial length limit for arrays with "fast" elements kind.
+ // TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
+ if ((flags & ArrayLiteral::kShallowElements) != 0 &&
+ (flags & ArrayLiteral::kIsStrong) == 0 &&
+ length < JSArray::kInitialMaxFastElementArray) {
+ Isolate* isolate = jsgraph()->isolate();
+ Callable callable = CodeFactory::FastCloneShallowArray(isolate);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ (OperatorProperties::GetFrameStateInputCount(node->op()) != 0)
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* literal_index = jsgraph()->SmiConstant(p.index());
+ Node* constant_elements = jsgraph()->HeapConstant(constants);
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2, literal_index);
+ node->InsertInput(graph()->zone(), 3, constant_elements);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateLiteralObject, node->opcode());
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
+ // Constants are pairs, see ObjectLiteral::properties_count().
+ int const length = constants->length() / 2;
+ int const flags = p.flags();
+
+ // Use the FastCloneShallowObjectStub only for shallow boilerplates without
+ // elements up to the number of properties that the stubs can handle.
+ if ((flags & ObjectLiteral::kShallowProperties) != 0 &&
+ length <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ Isolate* isolate = jsgraph()->isolate();
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate, length);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ (OperatorProperties::GetFrameStateInputCount(node->op()) != 0)
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* literal_index = jsgraph()->SmiConstant(p.index());
+ Node* literal_flags = jsgraph()->SmiConstant(flags);
+ Node* constant_elements = jsgraph()->HeapConstant(constants);
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2, literal_index);
+ node->InsertInput(graph()->zone(), 3, constant_elements);
+ node->InsertInput(graph()->zone(), 4, literal_flags);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateFunctionContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
+ int slot_count = OpParameter<int>(node->op());
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+ // Use inline allocation for function contexts up to a size limit.
+ if (slot_count < kFunctionContextAllocationLimit) {
+ // JSCreateFunctionContext[slot_count < limit]](fun)
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* extension = jsgraph()->TheHoleConstant();
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
+ a.AllocateArray(context_length, factory()->function_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
+ a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
+ }
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
+ // Use the FastNewContextStub only for function contexts up maximum size.
+ if (slot_count <= FastNewContextStub::kMaximumSlots) {
+ Isolate* isolate = jsgraph()->isolate();
+ Callable callable = CodeFactory::FastNewContext(isolate, slot_count);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* closure = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateCatchContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
+ Handle<String> name = OpParameter<Handle<String>>(node);
+ Node* exception = NodeProperties::GetValueInput(node, 0);
+ Node* closure = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
+ factory()->catch_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
+ exception);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
+ Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ int context_length = scope_info->ContextLength();
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+ // Use inline allocation for block contexts up to a size limit.
+ if (context_length < kBlockContextAllocationLimit) {
+ // JSCreateBlockContext[scope[length < limit]](fun)
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* extension = jsgraph()->Constant(scope_info);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ a.AllocateArray(context_length, factory()->block_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
+ a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
+ }
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
+ CallConstructParameters const& p = CallConstructParametersOf(node->op());
+ DCHECK_LE(2u, p.arity());
+ int const arity = static_cast<int>(p.arity() - 2);
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Type* target_type = NodeProperties::GetType(target);
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+
+ // Check if {target} is a known JSFunction.
+ if (target_type->IsConstant() &&
+ target_type->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Patch {node} to an indirect call via the {function}s construct stub.
+ Callable callable(handle(shared->construct_stub(), isolate()),
+ ConstructStubDescriptor(isolate()));
+ node->RemoveInput(arity + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
+ CallDescriptor::kNeedsFrameState)));
+ return Changed(node);
+ }
+
+ // Check if {target} is a JSFunction.
+ if (target_type->Is(Type::Function())) {
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Patch {node} to an indirect call via the ConstructFunction builtin.
+ Callable callable = CodeFactory::ConstructFunction(isolate());
+ node->RemoveInput(arity + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
+ CallDescriptor::kNeedsFrameState)));
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ int const arity = static_cast<int>(p.arity() - 2);
+ ConvertReceiverMode convert_mode = p.convert_mode();
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Type* target_type = NodeProperties::GetType(target);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to infer receiver {convert_mode} from {receiver} type.
+ if (receiver_type->Is(Type::NullOrUndefined())) {
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ } else if (!receiver_type->Maybe(Type::NullOrUndefined())) {
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
+ }
+
+ // Check if {target} is a known JSFunction.
+ if (target_type->IsConstant() &&
+ target_type->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ // Class constructors are callable, but [[Call]] will raise an exception.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
+ if (IsClassConstructor(shared->kind())) return NoChange();
+
+ // Load the context from the {target}.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+ NodeProperties::ReplaceContextInput(node, context);
+
+ // Check if we need to convert the {receiver}.
+ if (is_sloppy(shared->language_mode()) && !shared->native() &&
+ !receiver_type->Is(Type::Receiver())) {
+ receiver = effect =
+ graph()->NewNode(javascript()->ConvertReceiver(convert_mode),
+ receiver, context, frame_state, effect, control);
+ NodeProperties::ReplaceValueInput(node, receiver, 1);
+ }
+
+ // Update the effect dependency for the {node}.
+ NodeProperties::ReplaceEffectInput(node, effect);
+
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Compute flags for the call.
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
+
+ Node* new_target = jsgraph()->UndefinedConstant();
+ Node* argument_count = jsgraph()->Int32Constant(arity);
+ if (shared->internal_formal_parameter_count() == arity ||
+ shared->internal_formal_parameter_count() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Patch {node} to a direct call.
+ node->InsertInput(graph()->zone(), arity + 2, new_target);
+ node->InsertInput(graph()->zone(), arity + 3, argument_count);
+ NodeProperties::ChangeOp(node,
+ common()->Call(Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, 1 + arity, flags)));
+ } else {
+ // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
+ Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, argument_count);
+ node->InsertInput(
+ graph()->zone(), 4,
+ jsgraph()->Int32Constant(shared->internal_formal_parameter_count()));
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(),
+ 1 + arity, flags)));
+ }
+ return Changed(node);
+ }
+
+ // Check if {target} is a JSFunction.
+ if (target_type->Is(Type::Function())) {
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Compute flags for the call.
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
+
+ // Patch {node} to an indirect call via the CallFunction builtin.
+ Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Int32Constant(arity));
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
+ flags)));
+ return Changed(node);
+ }
+
+ // Maybe we did at least learn something about the {receiver}.
+ if (p.convert_mode() != convert_mode) {
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->CallFunction(p.arity(), p.language_mode(), p.feedback(),
+ convert_mode, p.tail_call_mode()));
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSForInDone(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSForInDone, node->opcode());
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Word32Equal());
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSForInPrepare, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Get the set of properties to enumerate.
+ Node* cache_type = effect = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), receiver,
+ context, frame_state, effect, control);
+ control = graph()->NewNode(common()->IfSuccess(), cache_type);
+
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* cache_type_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ cache_type, effect, control);
+ Node* meta_map = jsgraph()->HeapConstant(factory()->meta_map());
+
+ // If we got a map from the GetPropertyNamesFast runtime call, we can do a
+ // fast modification check. Otherwise, we got a fixed array, and we have to
+ // perform a slow check on every iteration.
+ Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
+ cache_type_map, meta_map);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* cache_array_true0;
+ Node* cache_length_true0;
+ Node* cache_type_true0;
+ Node* etrue0;
+ {
+ // Enum cache case.
+ Node* cache_type_enum_length = etrue0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField3()), cache_type,
+ effect, if_true0);
+ cache_length_true0 = graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), cache_type_enum_length,
+ jsgraph()->Int32Constant(Map::EnumLengthBits::kMask));
+
+ Node* check1 =
+ graph()->NewNode(machine()->Word32Equal(), cache_length_true0,
+ jsgraph()->Int32Constant(0));
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* cache_array_true1;
+ Node* etrue1;
+ {
+ // No properties to enumerate.
+ cache_array_true1 =
+ jsgraph()->HeapConstant(factory()->empty_fixed_array());
+ etrue1 = etrue0;
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* cache_array_false1;
+ Node* efalse1;
+ {
+ // Load the enumeration cache from the instance descriptors of {receiver}.
+ Node* receiver_map_descriptors = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapDescriptors()),
+ receiver_map, etrue0, if_false1);
+ Node* object_map_enum_cache = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForDescriptorArrayEnumCache()),
+ receiver_map_descriptors, efalse1, if_false1);
+ cache_array_false1 = efalse1 = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache()),
+ object_map_enum_cache, efalse1, if_false1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ etrue0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+ cache_array_true0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true1, cache_array_false1, if_true0);
+
+ cache_type_true0 = cache_type;
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* cache_array_false0;
+ Node* cache_length_false0;
+ Node* cache_type_false0;
+ Node* efalse0;
+ {
+ // FixedArray case.
+ cache_type_false0 = jsgraph()->OneConstant(); // Smi means slow check
+ cache_array_false0 = cache_type;
+ cache_length_false0 = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ cache_array_false0, effect, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ Node* cache_array =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true0, cache_array_false0, control);
+ Node* cache_length =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_length_true0, cache_length_false0, control);
+ cache_type =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_type_true0, cache_type_false0, control);
+
+ for (auto edge : node->use_edges()) {
+ Node* const use = edge.from();
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ Revisit(use);
+ } else {
+ if (NodeProperties::IsControlEdge(edge)) {
+ if (use->opcode() == IrOpcode::kIfSuccess) {
+ Replace(use, control);
+ } else if (use->opcode() == IrOpcode::kIfException) {
+ edge.UpdateTo(cache_type_true0);
+ continue;
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ DCHECK_EQ(IrOpcode::kProjection, use->opcode());
+ switch (ProjectionIndexOf(use->op())) {
+ case 0:
+ Replace(use, cache_type);
+ break;
+ case 1:
+ Replace(use, cache_array);
+ break;
+ case 2:
+ Replace(use, cache_length);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ use->Kill();
+ }
+ }
+ return NoChange(); // All uses were replaced already above.
+}
+
+
+Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* cache_array = NodeProperties::GetValueInput(node, 1);
+ Node* cache_type = NodeProperties::GetValueInput(node, 2);
+ Node* index = NodeProperties::GetValueInput(node, 3);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Load the next {key} from the {cache_array}.
+ Node* key = effect = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ cache_array, index, effect, control);
+
+ // Load the map of the {receiver}.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+
+ // Check if the expected map still matches that of the {receiver}.
+ Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
+ receiver_map, cache_type);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0;
+ Node* vtrue0;
+ {
+ // Don't need filtering since expected map still matches that of the
+ // {receiver}.
+ etrue0 = effect;
+ vtrue0 = key;
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0;
+ Node* vfalse0;
+ {
+ // Check if the {cache_type} is zero, which indicates proxy.
+ Node* check1 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
+ cache_type, jsgraph()->ZeroConstant());
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1;
+ Node* vtrue1;
+ {
+ // Don't do filtering for proxies.
+ etrue1 = effect;
+ vtrue1 = key;
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1;
+ Node* vfalse1;
+ {
+ // Filter the {key} to check if it's still a valid property of the
+ // {receiver} (does the ToName conversion implicitly).
+ vfalse1 = efalse1 = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kForInFilter, 2), receiver, key,
+ context, frame_state, effect, if_false1);
+ if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ ReplaceWithValue(node, node, effect, control);
+ node->ReplaceInput(0, vtrue0);
+ node->ReplaceInput(1, vfalse0);
+ node->ReplaceInput(2, control);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node,
+ common()->Phi(MachineRepresentation::kTagged, 2));
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSForInStep(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSForInStep, node->opcode());
+ node->ReplaceInput(1, jsgraph()->Int32Constant(1));
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceSelect(Node* node) {
+ DCHECK_EQ(IrOpcode::kSelect, node->opcode());
+ Node* const condition = NodeProperties::GetValueInput(node, 0);
+ Type* const condition_type = NodeProperties::GetType(condition);
+ Node* const vtrue = NodeProperties::GetValueInput(node, 1);
+ Type* const vtrue_type = NodeProperties::GetType(vtrue);
+ Node* const vfalse = NodeProperties::GetValueInput(node, 2);
+ Type* const vfalse_type = NodeProperties::GetType(vfalse);
+ if (condition_type->Is(true_type_)) {
+ // Select(condition:true, vtrue, vfalse) => vtrue
+ return Replace(vtrue);
+ }
+ if (condition_type->Is(false_type_)) {
+ // Select(condition:false, vtrue, vfalse) => vfalse
+ return Replace(vfalse);
+ }
+ if (vtrue_type->Is(true_type_) && vfalse_type->Is(false_type_)) {
+ // Select(condition, vtrue:true, vfalse:false) => condition
+ return Replace(condition);
+ }
+ if (vtrue_type->Is(false_type_) && vfalse_type->Is(true_type_)) {
+ // Select(condition, vtrue:false, vfalse:true) => BooleanNot(condition)
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
Reduction JSTypedLowering::Reduce(Node* node) {
// Check if the output type is a singleton. In that case we already know the
// result value and can simply replace the node if it's eliminable.
- if (NodeProperties::IsTyped(node) &&
- !IrOpcode::IsLeafOpcode(node->opcode()) &&
+ if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable)) {
- Type* upper = NodeProperties::GetBounds(node).upper;
+ Type* upper = NodeProperties::GetType(node);
if (upper->IsConstant()) {
Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::MinusZero())) {
Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::NaN())) {
Node* replacement = jsgraph()->NaNConstant();
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::Null())) {
Node* replacement = jsgraph()->NullConstant();
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::PlainNumber()) && upper->Min() == upper->Max()) {
Node* replacement = jsgraph()->Constant(upper->Min());
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::Undefined())) {
Node* replacement = jsgraph()->UndefinedConstant();
- NodeProperties::ReplaceWithValue(node, replacement);
+ ReplaceWithValue(node, replacement);
return Changed(replacement);
}
}
@@ -883,43 +2596,86 @@
case IrOpcode::kJSGreaterThanOrEqual:
return ReduceJSComparison(node);
case IrOpcode::kJSBitwiseOr:
- return ReduceJSBitwiseOr(node);
+ return ReduceInt32Binop(node, simplified()->NumberBitwiseOr());
case IrOpcode::kJSBitwiseXor:
- return ReduceInt32Binop(node, machine()->Word32Xor());
+ return ReduceInt32Binop(node, simplified()->NumberBitwiseXor());
case IrOpcode::kJSBitwiseAnd:
- return ReduceInt32Binop(node, machine()->Word32And());
+ return ReduceInt32Binop(node, simplified()->NumberBitwiseAnd());
case IrOpcode::kJSShiftLeft:
- return ReduceUI32Shift(node, kSigned, machine()->Word32Shl());
+ return ReduceUI32Shift(node, kSigned, simplified()->NumberShiftLeft());
case IrOpcode::kJSShiftRight:
- return ReduceUI32Shift(node, kSigned, machine()->Word32Sar());
+ return ReduceUI32Shift(node, kSigned, simplified()->NumberShiftRight());
case IrOpcode::kJSShiftRightLogical:
- return ReduceUI32Shift(node, kUnsigned, machine()->Word32Shr());
+ return ReduceUI32Shift(node, kUnsigned,
+ simplified()->NumberShiftRightLogical());
case IrOpcode::kJSAdd:
return ReduceJSAdd(node);
case IrOpcode::kJSSubtract:
return ReduceNumberBinop(node, simplified()->NumberSubtract());
case IrOpcode::kJSMultiply:
- return ReduceJSMultiply(node);
+ return ReduceNumberBinop(node, simplified()->NumberMultiply());
case IrOpcode::kJSDivide:
return ReduceNumberBinop(node, simplified()->NumberDivide());
case IrOpcode::kJSModulus:
- return ReduceNumberBinop(node, simplified()->NumberModulus());
- case IrOpcode::kJSUnaryNot:
- return ReduceJSUnaryNot(node);
+ return ReduceJSModulus(node);
case IrOpcode::kJSToBoolean:
return ReduceJSToBoolean(node);
case IrOpcode::kJSToNumber:
return ReduceJSToNumber(node);
case IrOpcode::kJSToString:
return ReduceJSToString(node);
+ case IrOpcode::kJSToObject:
+ return ReduceJSToObject(node);
+ case IrOpcode::kJSLoadNamed:
+ return ReduceJSLoadNamed(node);
case IrOpcode::kJSLoadProperty:
return ReduceJSLoadProperty(node);
case IrOpcode::kJSStoreProperty:
return ReduceJSStoreProperty(node);
+ case IrOpcode::kJSInstanceOf:
+ return ReduceJSInstanceOf(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSStoreContext:
return ReduceJSStoreContext(node);
+ case IrOpcode::kJSConvertReceiver:
+ return ReduceJSConvertReceiver(node);
+ case IrOpcode::kJSCreate:
+ return ReduceJSCreate(node);
+ case IrOpcode::kJSCreateArguments:
+ return ReduceJSCreateArguments(node);
+ case IrOpcode::kJSCreateArray:
+ return ReduceJSCreateArray(node);
+ case IrOpcode::kJSCreateClosure:
+ return ReduceJSCreateClosure(node);
+ case IrOpcode::kJSCreateIterResultObject:
+ return ReduceJSCreateIterResultObject(node);
+ case IrOpcode::kJSCreateLiteralArray:
+ return ReduceJSCreateLiteralArray(node);
+ case IrOpcode::kJSCreateLiteralObject:
+ return ReduceJSCreateLiteralObject(node);
+ case IrOpcode::kJSCreateFunctionContext:
+ return ReduceJSCreateFunctionContext(node);
+ case IrOpcode::kJSCreateWithContext:
+ return ReduceJSCreateWithContext(node);
+ case IrOpcode::kJSCreateCatchContext:
+ return ReduceJSCreateCatchContext(node);
+ case IrOpcode::kJSCreateBlockContext:
+ return ReduceJSCreateBlockContext(node);
+ case IrOpcode::kJSCallConstruct:
+ return ReduceJSCallConstruct(node);
+ case IrOpcode::kJSCallFunction:
+ return ReduceJSCallFunction(node);
+ case IrOpcode::kJSForInDone:
+ return ReduceJSForInDone(node);
+ case IrOpcode::kJSForInNext:
+ return ReduceJSForInNext(node);
+ case IrOpcode::kJSForInPrepare:
+ return ReduceJSForInPrepare(node);
+ case IrOpcode::kJSForInStep:
+ return ReduceJSForInStep(node);
+ case IrOpcode::kSelect:
+ return ReduceSelect(node);
default:
break;
}
@@ -927,42 +2683,6 @@
}
-Node* JSTypedLowering::ConvertToNumber(Node* input) {
- DCHECK(NodeProperties::GetBounds(input).upper->Is(Type::PlainPrimitive()));
- // Avoid inserting too many eager ToNumber() operations.
- Reduction const reduction = ReduceJSToNumberInput(input);
- if (reduction.Changed()) return reduction.replacement();
- Node* const conversion = graph()->NewNode(javascript()->ToNumber(), input,
- jsgraph()->NoContextConstant(),
- graph()->start(), graph()->start());
- InsertConversion(conversion);
- return conversion;
-}
-
-
-template <IrOpcode::Value kOpcode>
-Node* JSTypedLowering::FindConversion(Node* input) {
- size_t const input_id = input->id();
- if (input_id < conversions_.size()) {
- Node* const conversion = conversions_[input_id];
- if (conversion && conversion->opcode() == kOpcode) {
- return conversion;
- }
- }
- return nullptr;
-}
-
-
-void JSTypedLowering::InsertConversion(Node* conversion) {
- DCHECK(conversion->opcode() == IrOpcode::kJSToNumber);
- size_t const input_id = conversion->InputAt(0)->id();
- if (input_id >= conversions_.size()) {
- conversions_.resize(2 * input_id + 1);
- }
- conversions_[input_id] = conversion;
-}
-
-
Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
if (rhs == 0) return lhs;
return graph()->NewNode(machine()->Word32Shl(), lhs,
@@ -970,12 +2690,148 @@
}
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSTypedLowering::AllocateArguments(Node* effect, Node* control,
+ Node* frame_state) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto parameters_it = ++parameters_access.begin();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(argument_count, factory()->fixed_array_map());
+ for (int i = 0; i < argument_count; ++i, ++parameters_it) {
+ a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+ }
+ return a.Finish();
+}
+
+
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSTypedLowering::AllocateRestArguments(Node* effect, Node* control,
+ Node* frame_state,
+ int start_index) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ int num_elements = std::max(0, argument_count - start_index);
+ if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto parameters_it = ++parameters_access.begin();
+
+ // Skip unused arguments.
+ for (int i = 0; i < start_index; i++) {
+ ++parameters_it;
+ }
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(num_elements, factory()->fixed_array_map());
+ for (int i = 0; i < num_elements; ++i, ++parameters_it) {
+ a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+ }
+ return a.Finish();
+}
+
+
+// Helper that allocates a FixedArray serving as a parameter map for values
+// recorded in the given {frame_state}. Some elements map to slots within the
+// given {context}. Serves as backing store for JSCreateArguments nodes.
+Node* JSTypedLowering::AllocateAliasedArguments(
+ Node* effect, Node* control, Node* frame_state, Node* context,
+ Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // If there is no aliasing, the arguments object elements are not special in
+ // any way, we can just return an unmapped backing store instead.
+ int parameter_count = shared->internal_formal_parameter_count();
+ if (parameter_count == 0) {
+ return AllocateArguments(effect, control, frame_state);
+ }
+
+ // Calculate number of argument values being aliased/mapped.
+ int mapped_count = Min(argument_count, parameter_count);
+ *has_aliased_arguments = true;
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto paratemers_it = ++parameters_access.begin();
+
+ // The unmapped argument values recorded in the frame state are stored yet
+ // another indirection away and then linked into the parameter map below,
+ // whereas mapped argument values are replaced with a hole instead.
+ AllocationBuilder aa(jsgraph(), effect, control);
+ aa.AllocateArray(argument_count, factory()->fixed_array_map());
+ for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
+ aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
+ }
+ for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
+ aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+ }
+ Node* arguments = aa.Finish();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), arguments, control);
+ a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
+ a.Store(AccessBuilder::ForFixedArraySlot(0), context);
+ a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
+ for (int i = 0; i < mapped_count; ++i) {
+ int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
+ a.Store(AccessBuilder::ForFixedArraySlot(i + 2), jsgraph()->Constant(idx));
+ }
+ return a.Finish();
+}
+
+
+Node* JSTypedLowering::AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind,
+ int capacity, PretenureFlag pretenure) {
+ DCHECK_LE(1, capacity);
+ DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
+
+ Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+ ? factory()->fixed_double_array_map()
+ : factory()->fixed_array_map();
+ ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+ ? AccessBuilder::ForFixedDoubleArrayElement()
+ : AccessBuilder::ForFixedArrayElement();
+ Node* value =
+ IsFastDoubleElementsKind(elements_kind)
+ ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
+ : jsgraph()->TheHoleConstant();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(capacity, elements_map, pretenure);
+ for (int i = 0; i < capacity; ++i) {
+ Node* index = jsgraph()->Constant(i);
+ a.Store(access, index, value);
+ }
+ return a.Finish();
+}
+
+
Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
Graph* JSTypedLowering::graph() const { return jsgraph()->graph(); }
+Isolate* JSTypedLowering::isolate() const { return jsgraph()->isolate(); }
+
+
JSOperatorBuilder* JSTypedLowering::javascript() const {
return jsgraph()->javascript();
}
@@ -986,10 +2842,20 @@
}
+SimplifiedOperatorBuilder* JSTypedLowering::simplified() const {
+ return jsgraph()->simplified();
+}
+
+
MachineOperatorBuilder* JSTypedLowering::machine() const {
return jsgraph()->machine();
}
+
+CompilationDependencies* JSTypedLowering::dependencies() const {
+ return dependencies_;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 838085e..68ce74e 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -5,11 +5,19 @@
#ifndef V8_COMPILER_JS_TYPED_LOWERING_H_
#define V8_COMPILER_JS_TYPED_LOWERING_H_
+#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
+#include "src/compiler/opcodes.h"
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+class TypeCache;
+
+
namespace compiler {
// Forward declarations.
@@ -17,65 +25,112 @@
class JSGraph;
class JSOperatorBuilder;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
// Lowers JS-level operators to simplified operators based on types.
-class JSTypedLowering FINAL : public Reducer {
+class JSTypedLowering final : public AdvancedReducer {
public:
- JSTypedLowering(JSGraph* jsgraph, Zone* zone);
- ~JSTypedLowering() FINAL {}
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ kDisableBinaryOpReduction = 1u << 1,
+ };
+ typedef base::Flags<Flag> Flags;
- Reduction Reduce(Node* node) FINAL;
+ JSTypedLowering(Editor* editor, CompilationDependencies* dependencies,
+ Flags flags, JSGraph* jsgraph, Zone* zone);
+ ~JSTypedLowering() final {}
+
+ Reduction Reduce(Node* node) final;
private:
friend class JSBinopReduction;
- Reduction ReplaceEagerly(Node* old, Node* node);
Reduction ReduceJSAdd(Node* node);
+ Reduction ReduceJSModulus(Node* node);
Reduction ReduceJSBitwiseOr(Node* node);
Reduction ReduceJSMultiply(Node* node);
Reduction ReduceJSComparison(Node* node);
+ Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
+ Reduction ReduceJSInstanceOf(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
Reduction ReduceJSEqual(Node* node, bool invert);
Reduction ReduceJSStrictEqual(Node* node, bool invert);
- Reduction ReduceJSUnaryNot(Node* node);
Reduction ReduceJSToBoolean(Node* node);
Reduction ReduceJSToNumberInput(Node* input);
Reduction ReduceJSToNumber(Node* node);
Reduction ReduceJSToStringInput(Node* input);
Reduction ReduceJSToString(Node* node);
+ Reduction ReduceJSToObject(Node* node);
+ Reduction ReduceJSConvertReceiver(Node* node);
+ Reduction ReduceJSCreate(Node* node);
+ Reduction ReduceJSCreateArguments(Node* node);
+ Reduction ReduceJSCreateArray(Node* node);
+ Reduction ReduceJSCreateClosure(Node* node);
+ Reduction ReduceJSCreateIterResultObject(Node* node);
+ Reduction ReduceJSCreateLiteralArray(Node* node);
+ Reduction ReduceJSCreateLiteralObject(Node* node);
+ Reduction ReduceJSCreateFunctionContext(Node* node);
+ Reduction ReduceJSCreateWithContext(Node* node);
+ Reduction ReduceJSCreateCatchContext(Node* node);
+ Reduction ReduceJSCreateBlockContext(Node* node);
+ Reduction ReduceJSCallConstruct(Node* node);
+ Reduction ReduceJSCallFunction(Node* node);
+ Reduction ReduceJSForInDone(Node* node);
+ Reduction ReduceJSForInNext(Node* node);
+ Reduction ReduceJSForInPrepare(Node* node);
+ Reduction ReduceJSForInStep(Node* node);
+ Reduction ReduceSelect(Node* node);
Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
const Operator* shift_op);
-
- Node* ConvertToNumber(Node* input);
- template <IrOpcode::Value>
- Node* FindConversion(Node* input);
- void InsertConversion(Node* conversion);
+ Reduction ReduceNewArray(Node* node, Node* length, int capacity,
+ Handle<AllocationSite> site);
Node* Word32Shl(Node* const lhs, int32_t const rhs);
+ Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
+ Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
+ int start_index);
+ Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
+ Node* context, Handle<SharedFunctionInfo>,
+ bool* has_aliased_arguments);
+ Node* AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind, int capacity,
+ PretenureFlag pretenure);
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ SimplifiedOperatorBuilder* simplified() const;
MachineOperatorBuilder* machine() const;
+ CompilationDependencies* dependencies() const;
+ Flags flags() const { return flags_; }
+ // Limits up to which context allocations are inlined.
+ static const int kFunctionContextAllocationLimit = 16;
+ static const int kBlockContextAllocationLimit = 16;
+
+ CompilationDependencies* dependencies_;
+ Flags flags_;
JSGraph* jsgraph_;
- SimplifiedOperatorBuilder simplified_;
- ZoneVector<Node*> conversions_; // Cache inserted JSToXXX() conversions.
- Type* zero_range_;
- Type* one_range_;
- Type* zero_thirtyone_range_;
Type* shifted_int32_ranges_[4];
+ Type* const true_type_;
+ Type* const false_type_;
+ Type* const the_hole_type_;
+ TypeCache const& type_cache_;
};
+DEFINE_OPERATORS_FOR_FLAGS(JSTypedLowering::Flags)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/jump-threading.cc b/src/compiler/jump-threading.cc
index f0bb731..7b53b5c 100644
--- a/src/compiler/jump-threading.cc
+++ b/src/compiler/jump-threading.cc
@@ -9,10 +9,10 @@
namespace internal {
namespace compiler {
-typedef BasicBlock::RpoNumber RpoNumber;
-
-#define TRACE(x) \
- if (FLAG_trace_turbo_jt) PrintF x
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_jt) PrintF(__VA_ARGS__); \
+ } while (false)
struct JumpThreadingState {
bool forwarded;
@@ -31,19 +31,19 @@
RpoNumber to_to = result[to.ToInt()];
bool pop = true;
if (to == from) {
- TRACE((" xx %d\n", from.ToInt()));
+ TRACE(" xx %d\n", from.ToInt());
result[from.ToInt()] = from;
} else if (to_to == unvisited()) {
- TRACE((" fw %d -> %d (recurse)\n", from.ToInt(), to.ToInt()));
+ TRACE(" fw %d -> %d (recurse)\n", from.ToInt(), to.ToInt());
stack.push(to);
result[to.ToInt()] = onstack();
pop = false; // recurse.
} else if (to_to == onstack()) {
- TRACE((" fw %d -> %d (cycle)\n", from.ToInt(), to.ToInt()));
+ TRACE(" fw %d -> %d (cycle)\n", from.ToInt(), to.ToInt());
result[from.ToInt()] = to; // break the cycle.
forwarded = true;
} else {
- TRACE((" fw %d -> %d (forward)\n", from.ToInt(), to.ToInt()));
+ TRACE(" fw %d -> %d (forward)\n", from.ToInt(), to.ToInt());
result[from.ToInt()] = to_to; // forward the block.
forwarded = true;
}
@@ -70,36 +70,32 @@
while (!state.stack.empty()) {
InstructionBlock* block = code->InstructionBlockAt(state.stack.top());
// Process the instructions in a block up to a non-empty instruction.
- TRACE(("jt [%d] B%d RPO%d\n", static_cast<int>(stack.size()),
- block->id().ToInt(), block->rpo_number().ToInt()));
+ TRACE("jt [%d] B%d\n", static_cast<int>(stack.size()),
+ block->rpo_number().ToInt());
bool fallthru = true;
RpoNumber fw = block->rpo_number();
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
- if (instr->IsGapMoves() && GapInstruction::cast(instr)->IsRedundant()) {
- // skip redundant gap moves.
- TRACE((" nop gap\n"));
- continue;
- } else if (instr->IsSourcePosition()) {
- // skip source positions.
- TRACE((" src pos\n"));
- continue;
+ if (!instr->AreMovesRedundant()) {
+ // can't skip instructions with non redundant moves.
+ TRACE(" parallel move\n");
+ fallthru = false;
} else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
// can't skip instructions with flags continuations.
- TRACE((" flags\n"));
+ TRACE(" flags\n");
fallthru = false;
} else if (instr->IsNop()) {
// skip nops.
- TRACE((" nop\n"));
+ TRACE(" nop\n");
continue;
} else if (instr->arch_opcode() == kArchJmp) {
// try to forward the jump instruction.
- TRACE((" jmp\n"));
+ TRACE(" jmp\n");
fw = code->InputRpo(instr, 0);
fallthru = false;
} else {
// can't skip other instructions.
- TRACE((" other\n"));
+ TRACE(" other\n");
fallthru = false;
}
break;
@@ -120,14 +116,12 @@
if (FLAG_trace_turbo_jt) {
for (int i = 0; i < static_cast<int>(result.size()); i++) {
- TRACE(("RPO%d B%d ", i,
- code->InstructionBlockAt(RpoNumber::FromInt(i))->id().ToInt()));
+ TRACE("B%d ", i);
int to = result[i].ToInt();
if (i != to) {
- TRACE(("-> B%d\n",
- code->InstructionBlockAt(RpoNumber::FromInt(to))->id().ToInt()));
+ TRACE("-> B%d\n", to);
} else {
- TRACE(("\n"));
+ TRACE("\n");
}
}
}
@@ -140,7 +134,7 @@
InstructionSequence* code) {
if (!FLAG_turbo_jt) return;
- Zone local_zone(code->zone()->isolate());
+ Zone local_zone;
ZoneVector<bool> skip(static_cast<int>(result.size()), false, &local_zone);
// Skip empty blocks when the previous block doesn't fall through.
@@ -157,7 +151,7 @@
} else if (instr->arch_opcode() == kArchJmp) {
if (skip[block_num]) {
// Overwrite a redundant jump with a nop.
- TRACE(("jt-fw nop @%d\n", i));
+ TRACE("jt-fw nop @%d\n", i);
instr->OverwriteWithNop();
}
fallthru = false; // jumps don't fall through to the next block.
diff --git a/src/compiler/jump-threading.h b/src/compiler/jump-threading.h
index b801fec..fa74ee9 100644
--- a/src/compiler/jump-threading.h
+++ b/src/compiler/jump-threading.h
@@ -17,13 +17,12 @@
public:
// Compute the forwarding map of basic blocks to their ultimate destination.
// Returns {true} if there is at least one block that is forwarded.
- static bool ComputeForwarding(Zone* local_zone,
- ZoneVector<BasicBlock::RpoNumber>& result,
+ static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>& result,
InstructionSequence* code);
// Rewrite the instructions to forward jumps and branches.
// May also negate some branches.
- static void ApplyForwarding(ZoneVector<BasicBlock::RpoNumber>& forwarding,
+ static void ApplyForwarding(ZoneVector<RpoNumber>& forwarding,
InstructionSequence* code);
};
diff --git a/src/compiler/linkage-impl.h b/src/compiler/linkage-impl.h
deleted file mode 100644
index c13bd74..0000000
--- a/src/compiler/linkage-impl.h
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_LINKAGE_IMPL_H_
-#define V8_COMPILER_LINKAGE_IMPL_H_
-
-#include "src/code-stubs.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// TODO(titzer): replace uses of int with size_t in LinkageHelper.
-template <typename LinkageTraits>
-class LinkageHelper {
- public:
- static const RegList kNoCalleeSaved = 0;
-
- static void AddReturnLocations(LocationSignature::Builder* locations) {
- DCHECK(locations->return_count_ <= 2);
- if (locations->return_count_ > 0) {
- locations->AddReturn(regloc(LinkageTraits::ReturnValueReg()));
- }
- if (locations->return_count_ > 1) {
- locations->AddReturn(regloc(LinkageTraits::ReturnValue2Reg()));
- }
- }
-
- // TODO(turbofan): cache call descriptors for JSFunction calls.
- static CallDescriptor* GetJSCallDescriptor(Zone* zone, int js_parameter_count,
- CallDescriptor::Flags flags) {
- const size_t return_count = 1;
- const size_t context_count = 1;
- const size_t parameter_count = js_parameter_count + context_count;
-
- LocationSignature::Builder locations(zone, return_count, parameter_count);
- MachineSignature::Builder types(zone, return_count, parameter_count);
-
- // Add returns.
- AddReturnLocations(&locations);
- for (size_t i = 0; i < return_count; i++) {
- types.AddReturn(kMachAnyTagged);
- }
-
- // All parameters to JS calls go on the stack.
- for (int i = 0; i < js_parameter_count; i++) {
- int spill_slot_index = i - js_parameter_count;
- locations.AddParam(stackloc(spill_slot_index));
- types.AddParam(kMachAnyTagged);
- }
- // Add context.
- locations.AddParam(regloc(LinkageTraits::ContextReg()));
- types.AddParam(kMachAnyTagged);
-
- // The target for JS function calls is the JSFunction object.
- MachineType target_type = kMachAnyTagged;
- LinkageLocation target_loc = regloc(LinkageTraits::JSCallFunctionReg());
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallJSFunction, // kind
- target_type, // target MachineType
- target_loc, // target location
- types.Build(), // machine_sig
- locations.Build(), // location_sig
- js_parameter_count, // js_parameter_count
- Operator::kNoProperties, // properties
- kNoCalleeSaved, // callee-saved
- flags, // flags
- "js-call");
- }
-
-
- // TODO(turbofan): cache call descriptors for runtime calls.
- static CallDescriptor* GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
- Operator::Properties properties) {
- const size_t function_count = 1;
- const size_t num_args_count = 1;
- const size_t context_count = 1;
- const size_t parameter_count = function_count +
- static_cast<size_t>(js_parameter_count) +
- num_args_count + context_count;
-
- const Runtime::Function* function = Runtime::FunctionForId(function_id);
- const size_t return_count = static_cast<size_t>(function->result_size);
-
- LocationSignature::Builder locations(zone, return_count, parameter_count);
- MachineSignature::Builder types(zone, return_count, parameter_count);
-
- // Add returns.
- AddReturnLocations(&locations);
- for (size_t i = 0; i < return_count; i++) {
- types.AddReturn(kMachAnyTagged);
- }
-
- // All parameters to the runtime call go on the stack.
- for (int i = 0; i < js_parameter_count; i++) {
- locations.AddParam(stackloc(i - js_parameter_count));
- types.AddParam(kMachAnyTagged);
- }
- // Add runtime function itself.
- locations.AddParam(regloc(LinkageTraits::RuntimeCallFunctionReg()));
- types.AddParam(kMachAnyTagged);
-
- // Add runtime call argument count.
- locations.AddParam(regloc(LinkageTraits::RuntimeCallArgCountReg()));
- types.AddParam(kMachPtr);
-
- // Add context.
- locations.AddParam(regloc(LinkageTraits::ContextReg()));
- types.AddParam(kMachAnyTagged);
-
- CallDescriptor::Flags flags = Linkage::NeedsFrameState(function_id)
- ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags;
-
- // The target for runtime calls is a code object.
- MachineType target_type = kMachAnyTagged;
- LinkageLocation target_loc = LinkageLocation::AnyRegister();
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallCodeObject, // kind
- target_type, // target MachineType
- target_loc, // target location
- types.Build(), // machine_sig
- locations.Build(), // location_sig
- js_parameter_count, // js_parameter_count
- properties, // properties
- kNoCalleeSaved, // callee-saved
- flags, // flags
- function->name); // debug name
- }
-
-
- // TODO(turbofan): cache call descriptors for code stub calls.
- static CallDescriptor* GetStubCallDescriptor(
- Zone* zone, const CallInterfaceDescriptor& descriptor,
- int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties) {
- const int register_parameter_count =
- descriptor.GetEnvironmentParameterCount();
- const int js_parameter_count =
- register_parameter_count + stack_parameter_count;
- const int context_count = 1;
- const size_t return_count = 1;
- const size_t parameter_count =
- static_cast<size_t>(js_parameter_count + context_count);
-
- LocationSignature::Builder locations(zone, return_count, parameter_count);
- MachineSignature::Builder types(zone, return_count, parameter_count);
-
- // Add return location.
- AddReturnLocations(&locations);
- types.AddReturn(kMachAnyTagged);
-
- // Add parameters in registers and on the stack.
- for (int i = 0; i < js_parameter_count; i++) {
- if (i < register_parameter_count) {
- // The first parameters go in registers.
- Register reg = descriptor.GetEnvironmentParameterRegister(i);
- locations.AddParam(regloc(reg));
- } else {
- // The rest of the parameters go on the stack.
- int stack_slot = i - register_parameter_count - stack_parameter_count;
- locations.AddParam(stackloc(stack_slot));
- }
- types.AddParam(kMachAnyTagged);
- }
- // Add context.
- locations.AddParam(regloc(LinkageTraits::ContextReg()));
- types.AddParam(kMachAnyTagged);
-
- // The target for stub calls is a code object.
- MachineType target_type = kMachAnyTagged;
- LinkageLocation target_loc = LinkageLocation::AnyRegister();
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallCodeObject, // kind
- target_type, // target MachineType
- target_loc, // target location
- types.Build(), // machine_sig
- locations.Build(), // location_sig
- js_parameter_count, // js_parameter_count
- properties, // properties
- kNoCalleeSaved, // callee-saved registers
- flags, // flags
- descriptor.DebugName(zone->isolate()));
- }
-
- static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* msig) {
- LocationSignature::Builder locations(zone, msig->return_count(),
- msig->parameter_count());
- // Add return location(s).
- AddReturnLocations(&locations);
-
- // Add register and/or stack parameter(s).
- const int parameter_count = static_cast<int>(msig->parameter_count());
- for (int i = 0; i < parameter_count; i++) {
- if (i < LinkageTraits::CRegisterParametersLength()) {
- locations.AddParam(regloc(LinkageTraits::CRegisterParameter(i)));
- } else {
- locations.AddParam(stackloc(-1 - i));
- }
- }
-
- // The target for C calls is always an address (i.e. machine pointer).
- MachineType target_type = kMachPtr;
- LinkageLocation target_loc = LinkageLocation::AnyRegister();
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallAddress, // kind
- target_type, // target MachineType
- target_loc, // target location
- msig, // machine_sig
- locations.Build(), // location_sig
- 0, // js_parameter_count
- Operator::kNoProperties, // properties
- LinkageTraits::CCalleeSaveRegisters(), CallDescriptor::kNoFlags,
- "c-call");
- }
-
- static LinkageLocation regloc(Register reg) {
- return LinkageLocation(Register::ToAllocationIndex(reg));
- }
-
- static LinkageLocation stackloc(int i) {
- DCHECK_LT(i, 0);
- return LinkageLocation(i);
- }
-};
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_LINKAGE_IMPL_H_
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index fc6b19e..2eef929 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -2,17 +2,55 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/frame.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node.h"
+#include "src/compiler/osr.h"
#include "src/compiler/pipeline.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+LinkageLocation regloc(Register reg) {
+ return LinkageLocation::ForRegister(reg.code());
+}
+
+
+MachineType reptyp(Representation representation) {
+ switch (representation.kind()) {
+ case Representation::kInteger8:
+ return MachineType::Int8();
+ case Representation::kUInteger8:
+ return MachineType::Uint8();
+ case Representation::kInteger16:
+ return MachineType::Int16();
+ case Representation::kUInteger16:
+ return MachineType::Uint16();
+ case Representation::kInteger32:
+ return MachineType::Int32();
+ case Representation::kSmi:
+ case Representation::kTagged:
+ case Representation::kHeapObject:
+ return MachineType::AnyTagged();
+ case Representation::kDouble:
+ return MachineType::Float64();
+ case Representation::kExternal:
+ return MachineType::Pointer();
+ case Representation::kNone:
+ case Representation::kNumRepresentations:
+ break;
+ }
+ UNREACHABLE();
+ return MachineType::None();
+}
+} // namespace
+
std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
switch (k) {
@@ -25,6 +63,9 @@
case CallDescriptor::kCallAddress:
os << "Addr";
break;
+ case CallDescriptor::kLazyBailout:
+ os << "LazyBail";
+ break;
}
return os;
}
@@ -33,226 +74,466 @@
std::ostream& operator<<(std::ostream& os, const CallDescriptor& d) {
// TODO(svenpanne) Output properties etc. and be less cryptic.
return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
- << "j" << d.JSParameterCount() << "i" << d.InputCount() << "f"
- << d.FrameStateCount();
+ << "s" << d.StackParameterCount() << "i" << d.InputCount() << "f"
+ << d.FrameStateCount() << "t" << d.SupportsTailCalls();
+}
+
+
+bool CallDescriptor::HasSameReturnLocationsAs(
+ const CallDescriptor* other) const {
+ if (ReturnCount() != other->ReturnCount()) return false;
+ for (size_t i = 0; i < ReturnCount(); ++i) {
+ if (GetReturnLocation(i) != other->GetReturnLocation(i)) return false;
+ }
+ return true;
+}
+
+
+bool CallDescriptor::CanTailCall(const Node* node,
+ int* stack_param_delta) const {
+ CallDescriptor const* other = OpParameter<CallDescriptor const*>(node);
+ size_t current_input = 0;
+ size_t other_input = 0;
+ *stack_param_delta = 0;
+ bool more_other = true;
+ bool more_this = true;
+ while (more_other || more_this) {
+ if (other_input < other->InputCount()) {
+ if (!other->GetInputLocation(other_input).IsRegister()) {
+ (*stack_param_delta)--;
+ }
+ } else {
+ more_other = false;
+ }
+ if (current_input < InputCount()) {
+ if (!GetInputLocation(current_input).IsRegister()) {
+ (*stack_param_delta)++;
+ }
+ } else {
+ more_this = false;
+ }
+ ++current_input;
+ ++other_input;
+ }
+ return HasSameReturnLocationsAs(OpParameter<CallDescriptor const*>(node));
}
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
- if (info->function() != NULL) {
+ if (info->code_stub() != nullptr) {
+ // Use the code stub interface descriptor.
+ CodeStub* stub = info->code_stub();
+ CallInterfaceDescriptor descriptor = stub->GetCallInterfaceDescriptor();
+ return GetStubCallDescriptor(
+ info->isolate(), zone, descriptor, stub->GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties);
+ }
+ if (info->has_literal()) {
// If we already have the function literal, use the number of parameters
// plus the receiver.
- return GetJSCallDescriptor(1 + info->function()->parameter_count(), zone,
+ return GetJSCallDescriptor(zone, info->is_osr(),
+ 1 + info->literal()->parameter_count(),
CallDescriptor::kNoFlags);
}
if (!info->closure().is_null()) {
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
SharedFunctionInfo* shared = info->closure()->shared();
- return GetJSCallDescriptor(1 + shared->formal_parameter_count(), zone,
+ return GetJSCallDescriptor(zone, info->is_osr(),
+ 1 + shared->internal_formal_parameter_count(),
CallDescriptor::kNoFlags);
}
- if (info->code_stub() != NULL) {
- // Use the code stub interface descriptor.
- CallInterfaceDescriptor descriptor =
- info->code_stub()->GetCallInterfaceDescriptor();
- return GetStubCallDescriptor(descriptor, 0, CallDescriptor::kNoFlags,
- Operator::kNoProperties, zone);
- }
- return NULL; // TODO(titzer): ?
-}
-
-
-FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame,
- int extra) const {
- if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() ||
- incoming_->kind() == CallDescriptor::kCallAddress) {
- int offset;
- int register_save_area_size = frame->GetRegisterSaveAreaSize();
- if (spill_slot >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- offset =
- -(spill_slot + 1) * kPointerSize - register_save_area_size + extra;
- } else {
- // Incoming parameter. Skip the return address.
- offset = -(spill_slot + 1) * kPointerSize + kFPOnStackSize +
- kPCOnStackSize + extra;
- }
- return FrameOffset::FromFramePointer(offset);
- } else {
- // No frame. Retrieve all parameters relative to stack pointer.
- DCHECK(spill_slot < 0); // Must be a parameter.
- int register_save_area_size = frame->GetRegisterSaveAreaSize();
- int offset = register_save_area_size - (spill_slot + 1) * kPointerSize +
- kPCOnStackSize + extra;
- return FrameOffset::FromStackPointer(offset);
- }
-}
-
-
-CallDescriptor* Linkage::GetJSCallDescriptor(
- int parameter_count, CallDescriptor::Flags flags) const {
- return GetJSCallDescriptor(parameter_count, zone_, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) const {
- return GetRuntimeCallDescriptor(function, parameter_count, properties, zone_);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties) const {
- return GetStubCallDescriptor(descriptor, stack_parameter_count, flags,
- properties, zone_);
+ return nullptr; // TODO(titzer): ?
}
// static
-bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
- if (!FLAG_turbo_deoptimization) {
- return false;
- }
- // TODO(jarin) At the moment, we only add frame state for
- // few chosen runtime functions.
+int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
+ // Most runtime functions need a FrameState. A few chosen ones that we know
+ // not to call into arbitrary JavaScript, not to throw, and not to deoptimize
+ // are blacklisted here and can be called without a FrameState.
switch (function) {
- case Runtime::kApply:
- case Runtime::kArrayBufferNeuter:
- case Runtime::kArrayConcat:
- case Runtime::kBasicJSONStringify:
- case Runtime::kCheckExecutionState:
- case Runtime::kCollectStackTrace:
- case Runtime::kCompileLazy:
- case Runtime::kCompileOptimized:
- case Runtime::kCompileString:
- case Runtime::kCreateObjectLiteral:
- case Runtime::kDebugBreak:
- case Runtime::kDataViewSetInt8:
- case Runtime::kDataViewSetUint8:
- case Runtime::kDataViewSetInt16:
- case Runtime::kDataViewSetUint16:
- case Runtime::kDataViewSetInt32:
- case Runtime::kDataViewSetUint32:
- case Runtime::kDataViewSetFloat32:
- case Runtime::kDataViewSetFloat64:
- case Runtime::kDataViewGetInt8:
- case Runtime::kDataViewGetUint8:
- case Runtime::kDataViewGetInt16:
- case Runtime::kDataViewGetUint16:
- case Runtime::kDataViewGetInt32:
- case Runtime::kDataViewGetUint32:
- case Runtime::kDataViewGetFloat32:
- case Runtime::kDataViewGetFloat64:
- case Runtime::kDebugEvaluate:
- case Runtime::kDebugEvaluateGlobal:
- case Runtime::kDebugGetLoadedScripts:
- case Runtime::kDebugGetPropertyDetails:
- case Runtime::kDebugPromiseEvent:
- case Runtime::kDefineAccessorPropertyUnchecked:
- case Runtime::kDefineDataPropertyUnchecked:
- case Runtime::kDeleteProperty:
- case Runtime::kDeoptimizeFunction:
- case Runtime::kFunctionBindArguments:
- case Runtime::kGetDefaultReceiver:
- case Runtime::kGetFrameCount:
- case Runtime::kGetOwnProperty:
- case Runtime::kGetOwnPropertyNames:
- case Runtime::kGetPropertyNamesFast:
- case Runtime::kGetPrototype:
+ case Runtime::kAllocateInTargetSpace:
+ case Runtime::kCreateIterResultObject:
+ case Runtime::kDefineClassMethod: // TODO(jarin): Is it safe?
+ case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
+ case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
+ case Runtime::kFinalizeClassDefinition: // TODO(conradw): Is it safe?
+ case Runtime::kForInDone:
+ case Runtime::kForInStep:
+ case Runtime::kGetSuperConstructor:
+ case Runtime::kNewClosure:
+ case Runtime::kNewClosure_Tenured:
+ case Runtime::kNewFunctionContext:
+ case Runtime::kPushBlockContext:
+ case Runtime::kPushCatchContext:
+ case Runtime::kReThrow:
+ case Runtime::kStringCompare:
+ case Runtime::kStringEquals:
+ case Runtime::kToFastProperties: // TODO(jarin): Is it safe?
+ case Runtime::kTraceEnter:
+ case Runtime::kTraceExit:
+ return 0;
case Runtime::kInlineArguments:
- case Runtime::kInlineCallFunction:
- case Runtime::kInlineDateField:
+ case Runtime::kInlineArgumentsLength:
+ case Runtime::kInlineGetPrototype:
+ case Runtime::kInlineRegExpConstructResult:
case Runtime::kInlineRegExpExec:
- case Runtime::kInternalSetPrototype:
- case Runtime::kInterrupt:
- case Runtime::kIsPropertyEnumerable:
- case Runtime::kIsSloppyModeFunction:
- case Runtime::kLiveEditGatherCompileInfo:
- case Runtime::kLoadLookupSlot:
- case Runtime::kLoadLookupSlotNoReferenceError:
- case Runtime::kMaterializeRegExpLiteral:
- case Runtime::kNewObject:
- case Runtime::kNewObjectFromBound:
- case Runtime::kNewObjectWithAllocationSite:
- case Runtime::kObjectFreeze:
- case Runtime::kOwnKeys:
- case Runtime::kParseJson:
- case Runtime::kPrepareStep:
- case Runtime::kPreventExtensions:
- case Runtime::kPromiseRejectEvent:
- case Runtime::kPromiseRevokeReject:
- case Runtime::kRegExpInitializeAndCompile:
- case Runtime::kRegExpExecMultiple:
- case Runtime::kResolvePossiblyDirectEval:
- case Runtime::kRunMicrotasks:
- case Runtime::kSetPrototype:
- case Runtime::kSetScriptBreakPoint:
- case Runtime::kSparseJoinWithSeparator:
- case Runtime::kStackGuard:
- case Runtime::kStoreKeyedToSuper_Sloppy:
- case Runtime::kStoreKeyedToSuper_Strict:
- case Runtime::kStoreToSuper_Sloppy:
- case Runtime::kStoreToSuper_Strict:
- case Runtime::kStoreLookupSlot:
- case Runtime::kStringBuilderConcat:
- case Runtime::kStringBuilderJoin:
- case Runtime::kStringMatch:
- case Runtime::kStringReplaceGlobalRegExpWithString:
- case Runtime::kThrowNonMethodError:
- case Runtime::kThrowNotDateError:
- case Runtime::kThrowReferenceError:
- case Runtime::kThrowUnsupportedSuperError:
- case Runtime::kThrow:
- case Runtime::kTypedArraySetFastCases:
- case Runtime::kTypedArrayInitializeFromArrayLike:
-#ifdef V8_I18N_SUPPORT
- case Runtime::kGetImplFromInitializedIntlObject:
-#endif
- return true;
+ case Runtime::kInlineSubString:
+ case Runtime::kInlineToInteger:
+ case Runtime::kInlineToLength:
+ case Runtime::kInlineToName:
+ case Runtime::kInlineToNumber:
+ case Runtime::kInlineToObject:
+ case Runtime::kInlineToPrimitive_Number:
+ case Runtime::kInlineToPrimitive_String:
+ case Runtime::kInlineToPrimitive:
+ case Runtime::kInlineToString:
+ return 1;
+ case Runtime::kInlineCall:
+ case Runtime::kInlineTailCall:
+ case Runtime::kInlineDeoptimizeNow:
+ case Runtime::kInlineThrowNotDateError:
+ return 2;
default:
- return false;
+ break;
}
+
+ // Most inlined runtime functions (except the ones listed above) can be called
+ // without a FrameState or will be lowered by JSIntrinsicLowering internally.
+ const Runtime::Function* const f = Runtime::FunctionForId(function);
+ if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return 0;
+
+ return 1;
}
-//==============================================================================
-// Provide unimplemented methods on unsupported architectures, to at least link.
-//==============================================================================
-#if !V8_TURBOFAN_BACKEND
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
- CallDescriptor::Flags flags) {
- UNIMPLEMENTED();
- return NULL;
+bool CallDescriptor::UsesOnlyRegisters() const {
+ for (size_t i = 0; i < InputCount(); ++i) {
+ if (!GetInputLocation(i).IsRegister()) return false;
+ }
+ for (size_t i = 0; i < ReturnCount(); ++i) {
+ if (!GetReturnLocation(i).IsRegister()) return false;
+ }
+ return true;
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
- UNIMPLEMENTED();
- return NULL;
+ Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
+ Operator::Properties properties, CallDescriptor::Flags flags) {
+ const size_t function_count = 1;
+ const size_t num_args_count = 1;
+ const size_t context_count = 1;
+ const size_t parameter_count = function_count +
+ static_cast<size_t>(js_parameter_count) +
+ num_args_count + context_count;
+
+ const Runtime::Function* function = Runtime::FunctionForId(function_id);
+ const size_t return_count = static_cast<size_t>(function->result_size);
+
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
+
+ // Add returns.
+ if (locations.return_count_ > 0) {
+ locations.AddReturn(regloc(kReturnRegister0));
+ }
+ if (locations.return_count_ > 1) {
+ locations.AddReturn(regloc(kReturnRegister1));
+ }
+ for (size_t i = 0; i < return_count; i++) {
+ types.AddReturn(MachineType::AnyTagged());
+ }
+
+ // All parameters to the runtime call go on the stack.
+ for (int i = 0; i < js_parameter_count; i++) {
+ locations.AddParam(
+ LinkageLocation::ForCallerFrameSlot(i - js_parameter_count));
+ types.AddParam(MachineType::AnyTagged());
+ }
+ // Add runtime function itself.
+ locations.AddParam(regloc(kRuntimeCallFunctionRegister));
+ types.AddParam(MachineType::AnyTagged());
+
+ // Add runtime call argument count.
+ locations.AddParam(regloc(kRuntimeCallArgCountRegister));
+ types.AddParam(MachineType::Pointer());
+
+ // Add context.
+ locations.AddParam(regloc(kContextRegister));
+ types.AddParam(MachineType::AnyTagged());
+
+ if (Linkage::FrameStateInputCount(function_id) == 0) {
+ flags = static_cast<CallDescriptor::Flags>(
+ flags & ~CallDescriptor::kNeedsFrameState);
+ }
+
+ // The target for runtime calls is a code object.
+ MachineType target_type = MachineType::AnyTagged();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ js_parameter_count, // stack_parameter_count
+ properties, // properties
+ kNoCalleeSaved, // callee-saved
+ kNoCalleeSaved, // callee-saved fp
+ flags, // flags
+ function->name); // debug name
}
+CallDescriptor* Linkage::GetLazyBailoutDescriptor(Zone* zone) {
+ const size_t return_count = 0;
+ const size_t parameter_count = 0;
+
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
+
+ // The target is ignored, but we need to give some values here.
+ MachineType target_type = MachineType::AnyTagged();
+ LinkageLocation target_loc = regloc(kJSFunctionRegister);
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kLazyBailout, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoThrow, // properties
+ kNoCalleeSaved, // callee-saved
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kNeedsFrameState, // flags
+ "lazy-bailout");
+}
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int js_parameter_count,
+ CallDescriptor::Flags flags) {
+ const size_t return_count = 1;
+ const size_t context_count = 1;
+ const size_t new_target_count = 1;
+ const size_t num_args_count = 1;
+ const size_t parameter_count =
+ js_parameter_count + new_target_count + num_args_count + context_count;
+
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
+
+ // All JS calls have exactly one return value.
+ locations.AddReturn(regloc(kReturnRegister0));
+ types.AddReturn(MachineType::AnyTagged());
+
+ // All parameters to JS calls go on the stack.
+ for (int i = 0; i < js_parameter_count; i++) {
+ int spill_slot_index = i - js_parameter_count;
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(spill_slot_index));
+ types.AddParam(MachineType::AnyTagged());
+ }
+
+ // Add JavaScript call new target value.
+ locations.AddParam(regloc(kJavaScriptCallNewTargetRegister));
+ types.AddParam(MachineType::AnyTagged());
+
+ // Add JavaScript call argument count.
+ locations.AddParam(regloc(kJavaScriptCallArgCountRegister));
+ types.AddParam(MachineType::Int32());
+
+ // Add context.
+ locations.AddParam(regloc(kContextRegister));
+ types.AddParam(MachineType::AnyTagged());
+
+ // The target for JS function calls is the JSFunction object.
+ MachineType target_type = MachineType::AnyTagged();
+ // TODO(titzer): When entering into an OSR function from unoptimized code,
+ // the JSFunction is not in a register, but it is on the stack in an
+ // unaddressable spill slot. We hack this in the OSR prologue. Fix.
+ LinkageLocation target_loc = regloc(kJSFunctionRegister);
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallJSFunction, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ js_parameter_count, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kCanUseRoots | // flags
+ flags, // flags
+ "js-call");
+}
+
+
+CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
+ MachineSignature::Builder types(zone, 0, 6);
+ LocationSignature::Builder locations(zone, 0, 6);
+
+ // Add registers for fixed parameters passed via interpreter dispatch.
+ STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
+ types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(regloc(kInterpreterAccumulatorRegister));
+
+ STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
+ types.AddParam(MachineType::Pointer());
+ locations.AddParam(regloc(kInterpreterRegisterFileRegister));
+
+ STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
+ types.AddParam(MachineType::IntPtr());
+ locations.AddParam(regloc(kInterpreterBytecodeOffsetRegister));
+
+ STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
+ types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(regloc(kInterpreterBytecodeArrayRegister));
+
+ STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
+ types.AddParam(MachineType::Pointer());
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
+ // TODO(rmcilroy): Make the context param the one spilled to the stack once
+ // Turbofan supports modified stack arguments in tail calls.
+ locations.AddParam(
+ LinkageLocation::ForCallerFrameSlot(kInterpreterDispatchTableSpillSlot));
+#else
+ locations.AddParam(regloc(kInterpreterDispatchTableRegister));
+#endif
+
+ STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
+ types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(regloc(kContextRegister));
+
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ MachineType::None(), // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp regs
+ CallDescriptor::kSupportsTailCalls | // flags
+ CallDescriptor::kCanUseRoots, // flags
+ "interpreter-dispatch");
+}
+
+
+// TODO(all): Add support for return representations/locations to
+// CallInterfaceDescriptor.
+// TODO(turbofan): cache call descriptors for code stub calls.
CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties,
- Zone* zone) {
- UNIMPLEMENTED();
- return NULL;
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties, MachineType return_type,
+ size_t return_count) {
+ const int register_parameter_count = descriptor.GetRegisterParameterCount();
+ const int js_parameter_count =
+ register_parameter_count + stack_parameter_count;
+ const int context_count = 1;
+ const size_t parameter_count =
+ static_cast<size_t>(js_parameter_count + context_count);
+
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
+
+ // Add returns.
+ if (locations.return_count_ > 0) {
+ locations.AddReturn(regloc(kReturnRegister0));
+ }
+ if (locations.return_count_ > 1) {
+ locations.AddReturn(regloc(kReturnRegister1));
+ }
+ for (size_t i = 0; i < return_count; i++) {
+ types.AddReturn(return_type);
+ }
+
+ // Add parameters in registers and on the stack.
+ for (int i = 0; i < js_parameter_count; i++) {
+ if (i < register_parameter_count) {
+ // The first parameters go in registers.
+ Register reg = descriptor.GetRegisterParameter(i);
+ Representation rep =
+ RepresentationFromType(descriptor.GetParameterType(i));
+ locations.AddParam(regloc(reg));
+ types.AddParam(reptyp(rep));
+ } else {
+ // The rest of the parameters go on the stack.
+ int stack_slot = i - register_parameter_count - stack_parameter_count;
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(stack_slot));
+ types.AddParam(MachineType::AnyTagged());
+ }
+ }
+ // Add context.
+ locations.AddParam(regloc(kContextRegister));
+ types.AddParam(MachineType::AnyTagged());
+
+ // The target for stub calls is a code object.
+ MachineType target_type = MachineType::AnyTagged();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ stack_parameter_count, // stack_parameter_count
+ properties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ flags, // flags
+ descriptor.DebugName(isolate));
}
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
- UNIMPLEMENTED();
- return NULL;
+LinkageLocation Linkage::GetOsrValueLocation(int index) const {
+ CHECK(incoming_->IsJSFunctionCall());
+ int parameter_count = static_cast<int>(incoming_->JSParameterCount() - 1);
+ int first_stack_slot = OsrHelper::FirstStackSlotIndex(parameter_count);
+
+ if (index == kOsrContextSpillSlotIndex) {
+ // Context. Use the parameter location of the context spill slot.
+ // Parameter (arity + 2) is special for the context of the function frame.
+ // >> context_index = target + receiver + params + new_target + #args
+ int context_index = 1 + 1 + parameter_count + 1 + 1;
+ return incoming_->GetInputLocation(context_index);
+ } else if (index >= first_stack_slot) {
+ // Local variable stored in this (callee) stack.
+ int spill_index =
+ index - first_stack_slot + StandardFrameConstants::kFixedSlotCount;
+ return LinkageLocation::ForCalleeFrameSlot(spill_index);
+ } else {
+ // Parameter. Use the assigned location from the incoming call descriptor.
+ int parameter_index = 1 + index; // skip index 0, which is the target.
+ return incoming_->GetInputLocation(parameter_index);
+ }
}
-#endif // !V8_TURBOFAN_BACKEND
+
+
+bool Linkage::ParameterHasSecondaryLocation(int index) const {
+ if (incoming_->kind() != CallDescriptor::kCallJSFunction) return false;
+ LinkageLocation loc = GetParameterLocation(index);
+ return (loc == regloc(kJSFunctionRegister) ||
+ loc == regloc(kContextRegister));
}
+
+LinkageLocation Linkage::GetParameterSecondaryLocation(int index) const {
+ DCHECK(ParameterHasSecondaryLocation(index));
+ LinkageLocation loc = GetParameterLocation(index);
+
+ if (loc == regloc(kJSFunctionRegister)) {
+ return LinkageLocation::ForCalleeFrameSlot(Frame::kJSFunctionSlot);
+ } else {
+ DCHECK(loc == regloc(kContextRegister));
+ return LinkageLocation::ForCalleeFrameSlot(Frame::kContextSlot);
+ }
}
-} // namespace v8::internal::compiler
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 0ad0761..252f044 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -7,68 +7,175 @@
#include "src/base/flags.h"
#include "src/compiler/frame.h"
-#include "src/compiler/machine-type.h"
#include "src/compiler/operator.h"
+#include "src/frames.h"
+#include "src/machine-type.h"
+#include "src/runtime/runtime.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
class CallInterfaceDescriptor;
+class CompilationInfo;
namespace compiler {
+const RegList kNoCalleeSaved = 0;
+
+class Node;
+class OsrHelper;
+
// Describes the location for a parameter or a return value to a call.
class LinkageLocation {
public:
- explicit LinkageLocation(int location) : location_(location) {}
+ bool operator==(const LinkageLocation& other) const {
+ return bit_field_ == other.bit_field_;
+ }
- static const int16_t ANY_REGISTER = 32767;
+ bool operator!=(const LinkageLocation& other) const {
+ return !(*this == other);
+ }
- static LinkageLocation AnyRegister() { return LinkageLocation(ANY_REGISTER); }
+ static LinkageLocation ForAnyRegister() {
+ return LinkageLocation(REGISTER, ANY_REGISTER);
+ }
+
+ static LinkageLocation ForRegister(int32_t reg) {
+ DCHECK(reg >= 0);
+ return LinkageLocation(REGISTER, reg);
+ }
+
+ static LinkageLocation ForCallerFrameSlot(int32_t slot) {
+ DCHECK(slot < 0);
+ return LinkageLocation(STACK_SLOT, slot);
+ }
+
+ static LinkageLocation ForCalleeFrameSlot(int32_t slot) {
+ // TODO(titzer): bailout instead of crashing here.
+ DCHECK(slot >= 0 && slot < LinkageLocation::MAX_STACK_SLOT);
+ return LinkageLocation(STACK_SLOT, slot);
+ }
+
+ static LinkageLocation ForSavedCallerReturnAddress() {
+ return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kCallerPCOffset) /
+ kPointerSize);
+ }
+
+ static LinkageLocation ForSavedCallerFramePtr() {
+ return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kCallerFPOffset) /
+ kPointerSize);
+ }
+
+ static LinkageLocation ForSavedCallerConstantPool() {
+ DCHECK(V8_EMBEDDED_CONSTANT_POOL);
+ return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kConstantPoolOffset) /
+ kPointerSize);
+ }
+
+ static LinkageLocation ConvertToTailCallerLocation(
+ LinkageLocation caller_location, int stack_param_delta) {
+ if (!caller_location.IsRegister()) {
+ return LinkageLocation(STACK_SLOT,
+ caller_location.GetLocation() - stack_param_delta);
+ }
+ return caller_location;
+ }
private:
friend class CallDescriptor;
friend class OperandGenerator;
- int16_t location_; // >= 0 implies register, otherwise stack slot.
+
+ enum LocationType { REGISTER, STACK_SLOT };
+
+ class TypeField : public BitField<LocationType, 0, 1> {};
+ class LocationField : public BitField<int32_t, TypeField::kNext, 31> {};
+
+ static const int32_t ANY_REGISTER = -1;
+ static const int32_t MAX_STACK_SLOT = 32767;
+
+ LinkageLocation(LocationType type, int32_t location) {
+ bit_field_ = TypeField::encode(type) |
+ ((location << LocationField::kShift) & LocationField::kMask);
+ }
+
+ int32_t GetLocation() const {
+ return static_cast<int32_t>(bit_field_ & LocationField::kMask) >>
+ LocationField::kShift;
+ }
+
+ bool IsRegister() const { return TypeField::decode(bit_field_) == REGISTER; }
+ bool IsAnyRegister() const {
+ return IsRegister() && GetLocation() == ANY_REGISTER;
+ }
+ bool IsCallerFrameSlot() const { return !IsRegister() && GetLocation() < 0; }
+ bool IsCalleeFrameSlot() const { return !IsRegister() && GetLocation() >= 0; }
+
+ int32_t AsRegister() const {
+ DCHECK(IsRegister());
+ return GetLocation();
+ }
+ int32_t AsCallerFrameSlot() const {
+ DCHECK(IsCallerFrameSlot());
+ return GetLocation();
+ }
+ int32_t AsCalleeFrameSlot() const {
+ DCHECK(IsCalleeFrameSlot());
+ return GetLocation();
+ }
+
+ int32_t bit_field_;
};
typedef Signature<LinkageLocation> LocationSignature;
// Describes a call to various parts of the compiler. Every call has the notion
// of a "target", which is the first input to the call.
-class CallDescriptor FINAL : public ZoneObject {
+class CallDescriptor final : public ZoneObject {
public:
// Describes the kind of this call, which determines the target.
enum Kind {
kCallCodeObject, // target is a Code object
kCallJSFunction, // target is a JSFunction object
- kCallAddress // target is a machine pointer
+ kCallAddress, // target is a machine pointer
+ kLazyBailout // the call is no-op, only used for lazy bailout
};
enum Flag {
- // TODO(jarin) kLazyDeoptimization and kNeedsFrameState should be unified.
kNoFlags = 0u,
kNeedsFrameState = 1u << 0,
kPatchableCallSite = 1u << 1,
kNeedsNopAfterCall = 1u << 2,
+ kHasExceptionHandler = 1u << 3,
+ kHasLocalCatchHandler = 1u << 4,
+ kSupportsTailCalls = 1u << 5,
+ kCanUseRoots = 1u << 6,
+ // Indicates that the native stack should be used for a code object. This
+ // information is important for native calls on arm64.
+ kUseNativeStack = 1u << 7,
kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
};
typedef base::Flags<Flag> Flags;
CallDescriptor(Kind kind, MachineType target_type, LinkageLocation target_loc,
- MachineSignature* machine_sig, LocationSignature* location_sig,
- size_t js_param_count, Operator::Properties properties,
- RegList callee_saved_registers, Flags flags,
+ const MachineSignature* machine_sig,
+ LocationSignature* location_sig, size_t stack_param_count,
+ Operator::Properties properties,
+ RegList callee_saved_registers,
+ RegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "")
: kind_(kind),
target_type_(target_type),
target_loc_(target_loc),
machine_sig_(machine_sig),
location_sig_(location_sig),
- js_param_count_(js_param_count),
+ stack_param_count_(stack_param_count),
properties_(properties),
callee_saved_registers_(callee_saved_registers),
+ callee_saved_fp_registers_(callee_saved_fp_registers),
flags_(flags),
debug_name_(debug_name) {
DCHECK(machine_sig->return_count() == location_sig->return_count());
@@ -78,15 +185,30 @@
// Returns the kind of this call.
Kind kind() const { return kind_; }
+ // Returns {true} if this descriptor is a call to a C function.
+ bool IsCFunctionCall() const { return kind_ == kCallAddress; }
+
// Returns {true} if this descriptor is a call to a JSFunction.
bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
+ bool RequiresFrameAsIncoming() const {
+ return IsCFunctionCall() || IsJSFunctionCall();
+ }
+
// The number of return values from this call.
size_t ReturnCount() const { return machine_sig_->return_count(); }
- // The number of JavaScript parameters to this call, including the receiver
- // object.
- size_t JSParameterCount() const { return js_param_count_; }
+ // The number of C parameters to this call.
+ size_t CParameterCount() const { return machine_sig_->parameter_count(); }
+
+ // The number of stack parameters to the call.
+ size_t StackParameterCount() const { return stack_param_count_; }
+
+ // The number of parameters to the JS function call.
+ size_t JSParameterCount() const {
+ DCHECK(IsJSFunctionCall());
+ return stack_param_count_;
+ }
// The total number of inputs to this call, which includes the target,
// receiver, context, etc.
@@ -98,6 +220,8 @@
Flags flags() const { return flags_; }
bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
+ bool SupportsTailCalls() const { return flags() & kSupportsTailCalls; }
+ bool UseNativeStack() const { return flags() & kUseNativeStack; }
LinkageLocation GetReturnLocation(size_t index) const {
return location_sig_->GetReturn(index);
@@ -125,8 +249,17 @@
// Get the callee-saved registers, if any, across this call.
RegList CalleeSavedRegisters() const { return callee_saved_registers_; }
+ // Get the callee-saved FP registers, if any, across this call.
+ RegList CalleeSavedFPRegisters() const { return callee_saved_fp_registers_; }
+
const char* debug_name() const { return debug_name_; }
+ bool UsesOnlyRegisters() const;
+
+ bool HasSameReturnLocationsAs(const CallDescriptor* other) const;
+
+ bool CanTailCall(const Node* call, int* stack_param_delta) const;
+
private:
friend class Linkage;
@@ -135,9 +268,10 @@
const LinkageLocation target_loc_;
const MachineSignature* const machine_sig_;
const LocationSignature* const location_sig_;
- const size_t js_param_count_;
+ const size_t stack_param_count_;
const Operator::Properties properties_;
const RegList callee_saved_registers_;
+ const RegList callee_saved_fp_registers_;
const Flags flags_;
const char* const debug_name_;
@@ -156,49 +290,49 @@
// Can be used to translate {arg_index} (i.e. index of the call node input) as
// well as {param_index} (i.e. as stored in parameter nodes) into an operator
// representing the architecture-specific location. The following call node
-// layouts are supported (where {n} is the number value inputs):
+// layouts are supported (where {n} is the number of value inputs):
//
// #0 #1 #2 #3 [...] #n
// Call[CodeStub] code, arg 1, arg 2, arg 3, [...], context
-// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], context
+// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], new, #arg, context
// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
class Linkage : public ZoneObject {
public:
- Linkage(Zone* zone, CompilationInfo* info)
- : zone_(zone), incoming_(ComputeIncoming(zone, info)) {}
- Linkage(Zone* zone, CallDescriptor* incoming)
- : zone_(zone), incoming_(incoming) {}
+ explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
static CallDescriptor* ComputeIncoming(Zone* zone, CompilationInfo* info);
// The call descriptor for this compilation unit describes the locations
// of incoming parameters and the outgoing return value(s).
CallDescriptor* GetIncomingDescriptor() const { return incoming_; }
- CallDescriptor* GetJSCallDescriptor(int parameter_count,
- CallDescriptor::Flags flags) const;
- static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone,
+ static CallDescriptor* GetJSCallDescriptor(Zone* zone, bool is_osr,
+ int parameter_count,
CallDescriptor::Flags flags);
- CallDescriptor* GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties) const;
- static CallDescriptor* GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone);
- CallDescriptor* GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count = 0,
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags,
- Operator::Properties properties = Operator::kNoProperties) const;
+ static CallDescriptor* GetRuntimeCallDescriptor(
+ Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties, CallDescriptor::Flags flags);
+
+ static CallDescriptor* GetLazyBailoutDescriptor(Zone* zone);
+
static CallDescriptor* GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone);
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count, CallDescriptor::Flags flags,
+ Operator::Properties properties = Operator::kNoProperties,
+ MachineType return_type = MachineType::AnyTagged(),
+ size_t return_count = 1);
// Creates a call descriptor for simplified C calls that is appropriate
// for the host platform. This simplified calling convention only supports
// integers and pointers of one word size each, i.e. no floating point,
// structs, pointers to members, etc.
static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig);
+ const MachineSignature* sig);
+
+ // Creates a call descriptor for interpreter handler code stubs. These are not
+ // intended to be called directly but are instead dispatched to by the
+ // interpreter.
+ static CallDescriptor* GetInterpreterDispatchDescriptor(Zone* zone);
// Get the location of an (incoming) parameter to this function.
LinkageLocation GetParameterLocation(int index) const {
@@ -211,24 +345,54 @@
}
// Get the location where this function should place its return value.
- LinkageLocation GetReturnLocation() const {
- return incoming_->GetReturnLocation(0);
+ LinkageLocation GetReturnLocation(size_t index = 0) const {
+ return incoming_->GetReturnLocation(index);
}
// Get the machine type of this function's return value.
- MachineType GetReturnType() const { return incoming_->GetReturnType(0); }
+ MachineType GetReturnType(size_t index = 0) const {
+ return incoming_->GetReturnType(index);
+ }
- // Get the frame offset for a given spill slot. The location depends on the
- // calling convention and the specific frame layout, and may thus be
- // architecture-specific. Negative spill slots indicate arguments on the
- // caller's frame. The {extra} parameter indicates an additional offset from
- // the frame offset, e.g. to index into part of a double slot.
- FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0) const;
+ bool ParameterHasSecondaryLocation(int index) const;
+ LinkageLocation GetParameterSecondaryLocation(int index) const;
- static bool NeedsFrameState(Runtime::FunctionId function);
+ static int FrameStateInputCount(Runtime::FunctionId function);
+
+ // Get the location where an incoming OSR value is stored.
+ LinkageLocation GetOsrValueLocation(int index) const;
+
+ // A special {Parameter} index for JSCalls that represents the new target.
+ static int GetJSCallNewTargetParamIndex(int parameter_count) {
+ return parameter_count + 0; // Parameter (arity + 0) is special.
+ }
+
+ // A special {Parameter} index for JSCalls that represents the argument count.
+ static int GetJSCallArgCountParamIndex(int parameter_count) {
+ return parameter_count + 1; // Parameter (arity + 1) is special.
+ }
+
+ // A special {Parameter} index for JSCalls that represents the context.
+ static int GetJSCallContextParamIndex(int parameter_count) {
+ return parameter_count + 2; // Parameter (arity + 2) is special.
+ }
+
+ // A special {Parameter} index for JSCalls that represents the closure.
+ static const int kJSCallClosureParamIndex = -1;
+
+ // A special {OsrValue} index to indicate the context spill slot.
+ static const int kOsrContextSpillSlotIndex = -1;
+
+ // Special parameter indices used to pass fixed register data through
+ // interpreter dispatches.
+ static const int kInterpreterAccumulatorParameter = 0;
+ static const int kInterpreterRegisterFileParameter = 1;
+ static const int kInterpreterBytecodeOffsetParameter = 2;
+ static const int kInterpreterBytecodeArrayParameter = 3;
+ static const int kInterpreterDispatchTableParameter = 4;
+ static const int kInterpreterContextParameter = 5;
private:
- Zone* const zone_;
CallDescriptor* const incoming_;
DISALLOW_COPY_AND_ASSIGN(Linkage);
diff --git a/src/compiler/live-range-separator.cc b/src/compiler/live-range-separator.cc
new file mode 100644
index 0000000..980c944
--- /dev/null
+++ b/src/compiler/live-range-separator.cc
@@ -0,0 +1,159 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/live-range-separator.h"
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+ } while (false)
+
+
+namespace {
+
+
+void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
+ LifetimePosition first_cut, LifetimePosition last_cut) {
+ DCHECK(!range->IsSplinter());
+ // We can ignore ranges that live solely in deferred blocks.
+ // If a range ends right at the end of a deferred block, it is marked by
+ // the range builder as ending at gap start of the next block - since the
+ // end is a position where the variable isn't live. We need to take that
+ // into consideration.
+ LifetimePosition max_allowed_end = last_cut.NextFullStart();
+
+ if (first_cut <= range->Start() && max_allowed_end >= range->End()) {
+ return;
+ }
+
+ LifetimePosition start = Max(first_cut, range->Start());
+ LifetimePosition end = Min(last_cut, range->End());
+
+ if (start < end) {
+ // Ensure the original range has a spill range associated, before it gets
+ // splintered. Splinters will point to it. This way, when attempting to
+ // reuse spill slots of splinters, during allocation, we avoid clobbering
+ // such slots.
+ if (range->MayRequireSpillRange()) {
+ data->CreateSpillRangeForLiveRange(range);
+ }
+ if (range->splinter() == nullptr) {
+ TopLevelLiveRange *splinter =
+ data->NextLiveRange(range->representation());
+ DCHECK_NULL(data->live_ranges()[splinter->vreg()]);
+ data->live_ranges()[splinter->vreg()] = splinter;
+ range->SetSplinter(splinter);
+ }
+ Zone *zone = data->allocation_zone();
+ TRACE("creating splinter for range %d between %d and %d\n", range->vreg(),
+ start.ToInstructionIndex(), end.ToInstructionIndex());
+ range->Splinter(start, end, zone);
+ }
+}
+
+
+void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
+ const InstructionSequence *code = data->code();
+ UseInterval *interval = range->first_interval();
+
+ LifetimePosition first_cut = LifetimePosition::Invalid();
+ LifetimePosition last_cut = LifetimePosition::Invalid();
+
+ while (interval != nullptr) {
+ UseInterval *next_interval = interval->next();
+ const InstructionBlock *first_block =
+ code->GetInstructionBlock(interval->FirstGapIndex());
+ const InstructionBlock *last_block =
+ code->GetInstructionBlock(interval->LastGapIndex());
+ int first_block_nr = first_block->rpo_number().ToInt();
+ int last_block_nr = last_block->rpo_number().ToInt();
+ for (int block_id = first_block_nr; block_id <= last_block_nr; ++block_id) {
+ const InstructionBlock *current_block =
+ code->InstructionBlockAt(RpoNumber::FromInt(block_id));
+ if (current_block->IsDeferred()) {
+ if (!first_cut.IsValid()) {
+ first_cut = LifetimePosition::GapFromInstructionIndex(
+ current_block->first_instruction_index());
+ }
+ last_cut = LifetimePosition::GapFromInstructionIndex(
+ current_block->last_instruction_index());
+ } else {
+ if (first_cut.IsValid()) {
+ CreateSplinter(range, data, first_cut, last_cut);
+ first_cut = LifetimePosition::Invalid();
+ last_cut = LifetimePosition::Invalid();
+ }
+ }
+ }
+ interval = next_interval;
+ }
+ // When the range ends in deferred blocks, first_cut will be valid here.
+ // Splinter from there to the last instruction that was in a deferred block.
+ if (first_cut.IsValid()) {
+ CreateSplinter(range, data, first_cut, last_cut);
+ }
+}
+} // namespace
+
+
+void LiveRangeSeparator::Splinter() {
+ size_t virt_reg_count = data()->live_ranges().size();
+ for (size_t vreg = 0; vreg < virt_reg_count; ++vreg) {
+ TopLevelLiveRange *range = data()->live_ranges()[vreg];
+ if (range == nullptr || range->IsEmpty() || range->IsSplinter()) {
+ continue;
+ }
+ int first_instr = range->first_interval()->FirstGapIndex();
+ if (!data()->code()->GetInstructionBlock(first_instr)->IsDeferred()) {
+ SplinterLiveRange(range, data());
+ }
+ }
+}
+
+
+void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
+ for (TopLevelLiveRange *top : data()->live_ranges()) {
+ if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr) {
+ continue;
+ }
+
+ LiveRange *child = top;
+ for (; child != nullptr; child = child->next()) {
+ if (child->spilled() ||
+ child->NextSlotPosition(child->Start()) != nullptr) {
+ break;
+ }
+ }
+ if (child == nullptr) top->MarkSpilledInDeferredBlock();
+ }
+}
+
+
+void LiveRangeMerger::Merge() {
+ MarkRangesSpilledInDeferredBlocks();
+
+ int live_range_count = static_cast<int>(data()->live_ranges().size());
+ for (int i = 0; i < live_range_count; ++i) {
+ TopLevelLiveRange *range = data()->live_ranges()[i];
+ if (range == nullptr || range->IsEmpty() || !range->IsSplinter()) {
+ continue;
+ }
+ TopLevelLiveRange *splinter_parent = range->splintered_from();
+
+ int to_remove = range->vreg();
+ splinter_parent->Merge(range, data()->allocation_zone());
+ data()->live_ranges()[to_remove] = nullptr;
+ }
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/live-range-separator.h b/src/compiler/live-range-separator.h
new file mode 100644
index 0000000..57bc982
--- /dev/null
+++ b/src/compiler/live-range-separator.h
@@ -0,0 +1,65 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIVE_RANGE_SEPARATOR_H_
+#define V8_LIVE_RANGE_SEPARATOR_H_
+
+
+#include <src/zone.h>
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class RegisterAllocationData;
+
+
+// A register allocation pair of transformations: splinter and merge live ranges
+class LiveRangeSeparator final : public ZoneObject {
+ public:
+ LiveRangeSeparator(RegisterAllocationData* data, Zone* zone)
+ : data_(data), zone_(zone) {}
+
+ void Splinter();
+
+ private:
+ RegisterAllocationData* data() const { return data_; }
+ Zone* zone() const { return zone_; }
+
+ RegisterAllocationData* const data_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeSeparator);
+};
+
+
+class LiveRangeMerger final : public ZoneObject {
+ public:
+ LiveRangeMerger(RegisterAllocationData* data, Zone* zone)
+ : data_(data), zone_(zone) {}
+
+ void Merge();
+
+ private:
+ RegisterAllocationData* data() const { return data_; }
+ Zone* zone() const { return zone_; }
+
+ // Mark ranges spilled in deferred blocks, that also cover non-deferred code.
+ // We do nothing special for ranges fully contained in deferred blocks,
+ // because they would "spill in deferred blocks" anyway.
+ void MarkRangesSpilledInDeferredBlocks();
+
+ RegisterAllocationData* const data_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeMerger);
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+#endif // V8_LIVE_RANGE_SEPARATOR_H_
diff --git a/src/compiler/liveness-analyzer.cc b/src/compiler/liveness-analyzer.cc
new file mode 100644
index 0000000..fe458b8
--- /dev/null
+++ b/src/compiler/liveness-analyzer.cc
@@ -0,0 +1,200 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/adapters.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/liveness-analyzer.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/state-values-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+LivenessAnalyzer::LivenessAnalyzer(size_t local_count, Zone* zone)
+ : zone_(zone), blocks_(zone), local_count_(local_count), queue_(zone) {}
+
+
+void LivenessAnalyzer::Print(std::ostream& os) {
+ for (auto block : blocks_) {
+ block->Print(os);
+ os << std::endl;
+ }
+}
+
+
+LivenessAnalyzerBlock* LivenessAnalyzer::NewBlock() {
+ LivenessAnalyzerBlock* result =
+ new (zone()->New(sizeof(LivenessAnalyzerBlock)))
+ LivenessAnalyzerBlock(blocks_.size(), local_count_, zone());
+ blocks_.push_back(result);
+ return result;
+}
+
+
+LivenessAnalyzerBlock* LivenessAnalyzer::NewBlock(
+ LivenessAnalyzerBlock* predecessor) {
+ LivenessAnalyzerBlock* result = NewBlock();
+ result->AddPredecessor(predecessor);
+ return result;
+}
+
+
+void LivenessAnalyzer::Queue(LivenessAnalyzerBlock* block) {
+ if (!block->IsQueued()) {
+ block->SetQueued();
+ queue_.push(block);
+ }
+}
+
+
+void LivenessAnalyzer::Run(NonLiveFrameStateSlotReplacer* replacer) {
+ if (local_count_ == 0) {
+ // No local variables => nothing to do.
+ return;
+ }
+
+ // Put all blocks into the queue.
+ DCHECK(queue_.empty());
+ for (auto block : blocks_) {
+ Queue(block);
+ }
+
+ // Compute the fix-point.
+ BitVector working_area(static_cast<int>(local_count_), zone_);
+ while (!queue_.empty()) {
+ LivenessAnalyzerBlock* block = queue_.front();
+ queue_.pop();
+ block->Process(&working_area, nullptr);
+
+ for (auto i = block->pred_begin(); i != block->pred_end(); i++) {
+ if ((*i)->UpdateLive(&working_area)) {
+ Queue(*i);
+ }
+ }
+ }
+
+ // Update the frame states according to the liveness.
+ for (auto block : blocks_) {
+ block->Process(&working_area, replacer);
+ }
+}
+
+LivenessAnalyzerBlock::LivenessAnalyzerBlock(size_t id, size_t local_count,
+ Zone* zone)
+ : entries_(zone),
+ predecessors_(zone),
+ live_(local_count == 0 ? 1 : static_cast<int>(local_count), zone),
+ queued_(false),
+ id_(id) {}
+
+void LivenessAnalyzerBlock::Process(BitVector* result,
+ NonLiveFrameStateSlotReplacer* replacer) {
+ queued_ = false;
+
+ // Copy the bitvector to the target bit vector.
+ result->CopyFrom(live_);
+
+ for (auto entry : base::Reversed(entries_)) {
+ switch (entry.kind()) {
+ case Entry::kLookup:
+ result->Add(entry.var());
+ break;
+ case Entry::kBind:
+ result->Remove(entry.var());
+ break;
+ case Entry::kCheckpoint:
+ if (replacer != nullptr) {
+ replacer->ClearNonLiveFrameStateSlots(entry.node(), result);
+ }
+ break;
+ }
+ }
+}
+
+
+bool LivenessAnalyzerBlock::UpdateLive(BitVector* working_area) {
+ return live_.UnionIsChanged(*working_area);
+}
+
+
+void NonLiveFrameStateSlotReplacer::ClearNonLiveFrameStateSlots(
+ Node* frame_state, BitVector* liveness) {
+ DCHECK_EQ(frame_state->opcode(), IrOpcode::kFrameState);
+ Node* locals_state = frame_state->InputAt(1);
+ DCHECK_EQ(locals_state->opcode(), IrOpcode::kStateValues);
+ int count = static_cast<int>(StateValuesAccess(locals_state).size());
+ DCHECK_EQ(count == 0 ? 1 : count, liveness->length());
+ for (int i = 0; i < count; i++) {
+ bool live = liveness->Contains(i) || permanently_live_.Contains(i);
+ if (!live || locals_state->InputAt(i) != replacement_node_) {
+ Node* new_values = ClearNonLiveStateValues(locals_state, liveness);
+ frame_state->ReplaceInput(1, new_values);
+ break;
+ }
+ }
+}
+
+
+Node* NonLiveFrameStateSlotReplacer::ClearNonLiveStateValues(
+ Node* values, BitVector* liveness) {
+ DCHECK(inputs_buffer_.empty());
+ for (StateValuesAccess::TypedNode node : StateValuesAccess(values)) {
+ // Index of the next variable is its furure index in the inputs buffer,
+ // i.e., the buffer's size.
+ int var = static_cast<int>(inputs_buffer_.size());
+ bool live = liveness->Contains(var) || permanently_live_.Contains(var);
+ inputs_buffer_.push_back(live ? node.node : replacement_node_);
+ }
+ Node* result = state_values_cache()->GetNodeForValues(
+ inputs_buffer_.empty() ? nullptr : &(inputs_buffer_.front()),
+ inputs_buffer_.size());
+ inputs_buffer_.clear();
+ return result;
+}
+
+
+void LivenessAnalyzerBlock::Print(std::ostream& os) {
+ os << "Block " << id();
+ bool first = true;
+ for (LivenessAnalyzerBlock* pred : predecessors_) {
+ if (!first) {
+ os << ", ";
+ } else {
+ os << "; predecessors: ";
+ first = false;
+ }
+ os << pred->id();
+ }
+ os << std::endl;
+
+ for (auto entry : entries_) {
+ os << " ";
+ switch (entry.kind()) {
+ case Entry::kLookup:
+ os << "- Lookup " << entry.var() << std::endl;
+ break;
+ case Entry::kBind:
+ os << "- Bind " << entry.var() << std::endl;
+ break;
+ case Entry::kCheckpoint:
+ os << "- Checkpoint " << entry.node()->id() << std::endl;
+ break;
+ }
+ }
+
+ if (live_.length() > 0) {
+ os << " Live set: ";
+ for (int i = 0; i < live_.length(); i++) {
+ os << (live_.Contains(i) ? "L" : ".");
+ }
+ os << std::endl;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/liveness-analyzer.h b/src/compiler/liveness-analyzer.h
new file mode 100644
index 0000000..1e2f85b
--- /dev/null
+++ b/src/compiler/liveness-analyzer.h
@@ -0,0 +1,146 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LIVENESS_ANAYZER_H_
+#define V8_COMPILER_LIVENESS_ANAYZER_H_
+
+#include "src/bit-vector.h"
+#include "src/compiler/node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LivenessAnalyzerBlock;
+class Node;
+class StateValuesCache;
+
+
+class NonLiveFrameStateSlotReplacer {
+ public:
+ void ClearNonLiveFrameStateSlots(Node* frame_state, BitVector* liveness);
+ NonLiveFrameStateSlotReplacer(StateValuesCache* state_values_cache,
+ Node* replacement, size_t local_count,
+ Zone* local_zone)
+ : replacement_node_(replacement),
+ state_values_cache_(state_values_cache),
+ local_zone_(local_zone),
+ permanently_live_(local_count == 0 ? 1 : static_cast<int>(local_count),
+ local_zone),
+ inputs_buffer_(local_zone) {}
+
+ void MarkPermanentlyLive(int var) { permanently_live_.Add(var); }
+
+ private:
+ Node* ClearNonLiveStateValues(Node* frame_state, BitVector* liveness);
+
+ StateValuesCache* state_values_cache() { return state_values_cache_; }
+ Zone* local_zone() { return local_zone_; }
+
+ // Node that replaces dead values.
+ Node* replacement_node_;
+ // Reference to state values cache so that we can create state values
+ // nodes.
+ StateValuesCache* state_values_cache_;
+
+ Zone* local_zone_;
+ BitVector permanently_live_;
+ NodeVector inputs_buffer_;
+};
+
+
+class LivenessAnalyzer {
+ public:
+ LivenessAnalyzer(size_t local_count, Zone* zone);
+
+ LivenessAnalyzerBlock* NewBlock();
+ LivenessAnalyzerBlock* NewBlock(LivenessAnalyzerBlock* predecessor);
+
+ void Run(NonLiveFrameStateSlotReplacer* relaxer);
+
+ Zone* zone() { return zone_; }
+
+ void Print(std::ostream& os);
+
+ size_t local_count() { return local_count_; }
+
+ private:
+ void Queue(LivenessAnalyzerBlock* block);
+
+ Zone* zone_;
+ ZoneDeque<LivenessAnalyzerBlock*> blocks_;
+ size_t local_count_;
+
+ ZoneQueue<LivenessAnalyzerBlock*> queue_;
+};
+
+
+class LivenessAnalyzerBlock {
+ public:
+ friend class LivenessAnalyzer;
+
+ void Lookup(int var) { entries_.push_back(Entry(Entry::kLookup, var)); }
+ void Bind(int var) { entries_.push_back(Entry(Entry::kBind, var)); }
+ void Checkpoint(Node* node) { entries_.push_back(Entry(node)); }
+ void AddPredecessor(LivenessAnalyzerBlock* b) { predecessors_.push_back(b); }
+
+ private:
+ class Entry {
+ public:
+ enum Kind { kBind, kLookup, kCheckpoint };
+
+ Kind kind() const { return kind_; }
+ Node* node() const {
+ DCHECK(kind() == kCheckpoint);
+ return node_;
+ }
+ int var() const {
+ DCHECK(kind() != kCheckpoint);
+ return var_;
+ }
+
+ explicit Entry(Node* node) : kind_(kCheckpoint), var_(-1), node_(node) {}
+ Entry(Kind kind, int var) : kind_(kind), var_(var), node_(nullptr) {
+ DCHECK(kind != kCheckpoint);
+ }
+
+ private:
+ Kind kind_;
+ int var_;
+ Node* node_;
+ };
+
+ LivenessAnalyzerBlock(size_t id, size_t local_count, Zone* zone);
+ void Process(BitVector* result, NonLiveFrameStateSlotReplacer* relaxer);
+ bool UpdateLive(BitVector* working_area);
+
+ void SetQueued() { queued_ = true; }
+ bool IsQueued() { return queued_; }
+
+ ZoneDeque<LivenessAnalyzerBlock*>::const_iterator pred_begin() {
+ return predecessors_.begin();
+ }
+ ZoneDeque<LivenessAnalyzerBlock*>::const_iterator pred_end() {
+ return predecessors_.end();
+ }
+
+ size_t id() { return id_; }
+ void Print(std::ostream& os);
+
+ ZoneDeque<Entry> entries_;
+ ZoneDeque<LivenessAnalyzerBlock*> predecessors_;
+
+ BitVector live_;
+ bool queued_;
+
+ size_t id_;
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_AST_GRAPH_BUILDER_H_
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
index fe0714e..97f1ab0 100644
--- a/src/compiler/load-elimination.cc
+++ b/src/compiler/load-elimination.cc
@@ -4,7 +4,7 @@
#include "src/compiler/load-elimination.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
namespace v8 {
@@ -28,7 +28,7 @@
Reduction LoadElimination::ReduceLoadField(Node* node) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const access = FieldAccessOf(node->op());
- Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* object = NodeProperties::GetValueInput(node, 0);
for (Node* effect = NodeProperties::GetEffectInput(node);;
effect = NodeProperties::GetEffectInput(effect)) {
switch (effect->opcode()) {
@@ -36,7 +36,7 @@
if (object == NodeProperties::GetValueInput(effect, 0) &&
access == FieldAccessOf(effect->op())) {
Node* const value = effect;
- NodeProperties::ReplaceWithValue(node, value);
+ ReplaceWithValue(node, value);
return Replace(value);
}
break;
@@ -45,7 +45,7 @@
if (access == FieldAccessOf(effect->op())) {
if (object == NodeProperties::GetValueInput(effect, 0)) {
Node* const value = NodeProperties::GetValueInput(effect, 1);
- NodeProperties::ReplaceWithValue(node, value);
+ ReplaceWithValue(node, value);
return Replace(value);
}
// TODO(turbofan): Alias analysis to the rescue?
@@ -53,11 +53,24 @@
}
break;
}
+ case IrOpcode::kBeginRegion:
case IrOpcode::kStoreBuffer:
case IrOpcode::kStoreElement: {
// These can never interfere with field loads.
break;
}
+ case IrOpcode::kFinishRegion: {
+ // "Look through" FinishRegion nodes to make LoadElimination capable
+ // of looking into atomic regions.
+ if (object == effect) object = NodeProperties::GetValueInput(effect, 0);
+ break;
+ }
+ case IrOpcode::kAllocate: {
+ // Allocations don't interfere with field loads. In case we see the
+ // actual allocation for the {object} we can abort.
+ if (object == effect) return NoChange();
+ break;
+ }
default: {
if (!effect->op()->HasProperty(Operator::kNoWrite) ||
effect->op()->EffectInputCount() != 1) {
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
index 6917ce3..db87d9a 100644
--- a/src/compiler/load-elimination.h
+++ b/src/compiler/load-elimination.h
@@ -11,12 +11,12 @@
namespace internal {
namespace compiler {
-class LoadElimination FINAL : public Reducer {
+class LoadElimination final : public AdvancedReducer {
public:
- LoadElimination() {}
- ~LoadElimination() FINAL;
+ explicit LoadElimination(Editor* editor) : AdvancedReducer(editor) {}
+ ~LoadElimination() final;
- Reduction Reduce(Node* node) FINAL;
+ Reduction Reduce(Node* node) final;
private:
Reduction ReduceLoadField(Node* node);
diff --git a/src/compiler/loop-analysis.cc b/src/compiler/loop-analysis.cc
index e1b703e..d52c7c7 100644
--- a/src/compiler/loop-analysis.cc
+++ b/src/compiler/loop-analysis.cc
@@ -1,61 +1,27 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/graph.h"
#include "src/compiler/loop-analysis.h"
+
+#include "src/compiler/graph.h"
#include "src/compiler/node.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node-properties.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
-typedef uint32_t LoopMarks;
-
-
-// TODO(titzer): don't assume entry edges have a particular index.
-// TODO(titzer): use a BitMatrix to generalize this algorithm.
-static const size_t kMaxLoops = 31;
-static const int kAssumedLoopEntryIndex = 0; // assume loops are entered here.
-static const LoopMarks kVisited = 1; // loop #0 is reserved.
-
+#define OFFSET(x) ((x)&0x1f)
+#define BIT(x) (1u << OFFSET(x))
+#define INDEX(x) ((x) >> 5)
// Temporary information for each node during marking.
struct NodeInfo {
Node* node;
NodeInfo* next; // link in chaining loop members
- LoopMarks forward; // accumulated marks in the forward direction
- LoopMarks backward; // accumulated marks in the backward direction
- LoopMarks loop_mark; // loop mark for header nodes; encodes loop_num
-
- bool MarkBackward(LoopMarks bw) {
- LoopMarks prev = backward;
- LoopMarks next = backward | bw;
- backward = next;
- return prev != next;
- }
-
- bool MarkForward(LoopMarks fw) {
- LoopMarks prev = forward;
- LoopMarks next = forward | fw;
- forward = next;
- return prev != next;
- }
-
- bool IsInLoop(size_t loop_num) {
- DCHECK(loop_num > 0 && loop_num <= 31);
- return forward & backward & (1 << loop_num);
- }
-
- bool IsLoopHeader() { return loop_mark != 0; }
- bool IsInAnyLoop() { return (forward & backward) > kVisited; }
-
- bool IsInHeaderForLoop(size_t loop_num) {
- DCHECK(loop_num > 0);
- return loop_mark == (kVisited | (1 << loop_num));
- }
};
@@ -68,9 +34,6 @@
};
-static const NodeInfo kEmptyNodeInfo = {nullptr, nullptr, 0, 0, 0};
-
-
// Encapsulation of the loop finding algorithm.
// -----------------------------------------------------------------------------
// Conceptually, the contents of a loop are those nodes that are "between" the
@@ -88,13 +51,18 @@
class LoopFinderImpl {
public:
LoopFinderImpl(Graph* graph, LoopTree* loop_tree, Zone* zone)
- : end_(graph->end()),
+ : zone_(zone),
+ end_(graph->end()),
queue_(zone),
queued_(graph, 2),
- info_(graph->NodeCount(), kEmptyNodeInfo, zone),
+ info_(graph->NodeCount(), {nullptr, nullptr}, zone),
loops_(zone),
+ loop_num_(graph->NodeCount(), -1, zone),
loop_tree_(loop_tree),
- loops_found_(0) {}
+ loops_found_(0),
+ width_(0),
+ backward_(nullptr),
+ forward_(nullptr) {}
void Run() {
PropagateBackward();
@@ -106,12 +74,15 @@
// Print out the results.
for (NodeInfo& ni : info_) {
if (ni.node == nullptr) continue;
- for (size_t i = 1; i <= loops_.size(); i++) {
- if (ni.IsInLoop(i)) {
+ for (int i = 1; i <= loops_found_; i++) {
+ int index = ni.node->id() * width_ + INDEX(i);
+ bool marked_forward = forward_[index] & BIT(i);
+ bool marked_backward = backward_[index] & BIT(i);
+ if (marked_forward && marked_backward) {
PrintF("X");
- } else if (ni.forward & (1 << i)) {
+ } else if (marked_forward) {
PrintF("/");
- } else if (ni.backward & (1 << i)) {
+ } else if (marked_backward) {
PrintF("\\");
} else {
PrintF(" ");
@@ -132,157 +103,246 @@
}
private:
+ Zone* zone_;
Node* end_;
NodeDeque queue_;
NodeMarker<bool> queued_;
ZoneVector<NodeInfo> info_;
ZoneVector<LoopInfo> loops_;
+ ZoneVector<int> loop_num_;
LoopTree* loop_tree_;
- size_t loops_found_;
+ int loops_found_;
+ int width_;
+ uint32_t* backward_;
+ uint32_t* forward_;
+
+ int num_nodes() {
+ return static_cast<int>(loop_tree_->node_to_loop_num_.size());
+ }
+
+ // Tb = Tb | (Fb - loop_filter)
+ bool PropagateBackwardMarks(Node* from, Node* to, int loop_filter) {
+ if (from == to) return false;
+ uint32_t* fp = &backward_[from->id() * width_];
+ uint32_t* tp = &backward_[to->id() * width_];
+ bool change = false;
+ for (int i = 0; i < width_; i++) {
+ uint32_t mask = i == INDEX(loop_filter) ? ~BIT(loop_filter) : 0xFFFFFFFF;
+ uint32_t prev = tp[i];
+ uint32_t next = prev | (fp[i] & mask);
+ tp[i] = next;
+ if (!change && (prev != next)) change = true;
+ }
+ return change;
+ }
+
+ // Tb = Tb | B
+ bool SetBackwardMark(Node* to, int loop_num) {
+ uint32_t* tp = &backward_[to->id() * width_ + INDEX(loop_num)];
+ uint32_t prev = tp[0];
+ uint32_t next = prev | BIT(loop_num);
+ tp[0] = next;
+ return next != prev;
+ }
+
+ // Tf = Tf | B
+ bool SetForwardMark(Node* to, int loop_num) {
+ uint32_t* tp = &forward_[to->id() * width_ + INDEX(loop_num)];
+ uint32_t prev = tp[0];
+ uint32_t next = prev | BIT(loop_num);
+ tp[0] = next;
+ return next != prev;
+ }
+
+ // Tf = Tf | (Ff & Tb)
+ bool PropagateForwardMarks(Node* from, Node* to) {
+ if (from == to) return false;
+ bool change = false;
+ int findex = from->id() * width_;
+ int tindex = to->id() * width_;
+ for (int i = 0; i < width_; i++) {
+ uint32_t marks = backward_[tindex + i] & forward_[findex + i];
+ uint32_t prev = forward_[tindex + i];
+ uint32_t next = prev | marks;
+ forward_[tindex + i] = next;
+ if (!change && (prev != next)) change = true;
+ }
+ return change;
+ }
+
+ bool IsInLoop(Node* node, int loop_num) {
+ int offset = node->id() * width_ + INDEX(loop_num);
+ return backward_[offset] & forward_[offset] & BIT(loop_num);
+ }
// Propagate marks backward from loop headers.
void PropagateBackward() {
- PropagateBackward(end_, kVisited);
+ ResizeBackwardMarks();
+ SetBackwardMark(end_, 0);
+ Queue(end_);
while (!queue_.empty()) {
Node* node = queue_.front();
+ info(node);
queue_.pop_front();
queued_.Set(node, false);
+ int loop_num = -1;
// Setup loop headers first.
if (node->opcode() == IrOpcode::kLoop) {
// found the loop node first.
- CreateLoopInfo(node);
- } else if (node->opcode() == IrOpcode::kPhi ||
- node->opcode() == IrOpcode::kEffectPhi) {
+ loop_num = CreateLoopInfo(node);
+ } else if (NodeProperties::IsPhi(node)) {
// found a phi first.
Node* merge = node->InputAt(node->InputCount() - 1);
- if (merge->opcode() == IrOpcode::kLoop) CreateLoopInfo(merge);
+ if (merge->opcode() == IrOpcode::kLoop) {
+ loop_num = CreateLoopInfo(merge);
+ }
}
- // Propagate reachability marks backwards from this node.
- NodeInfo& ni = info(node);
- if (ni.IsLoopHeader()) {
- // Handle edges from loop header nodes specially.
- for (int i = 0; i < node->InputCount(); i++) {
- if (i == kAssumedLoopEntryIndex) {
- // Don't propagate the loop mark backwards on the entry edge.
- PropagateBackward(node->InputAt(0),
- kVisited | (ni.backward & ~ni.loop_mark));
- } else {
- // Only propagate the loop mark on backedges.
- PropagateBackward(node->InputAt(i), ni.loop_mark);
- }
- }
- } else {
- // Propagate all loop marks backwards for a normal node.
- for (Node* const input : node->inputs()) {
- PropagateBackward(input, ni.backward);
+ // Propagate marks backwards from this node.
+ for (int i = 0; i < node->InputCount(); i++) {
+ Node* input = node->InputAt(i);
+ if (loop_num > 0 && i != kAssumedLoopEntryIndex) {
+ // Only propagate the loop mark on backedges.
+ if (SetBackwardMark(input, loop_num)) Queue(input);
+ } else {
+ // Entry or normal edge. Propagate all marks except loop_num.
+ if (PropagateBackwardMarks(node, input, loop_num)) Queue(input);
}
}
}
}
- // Make a new loop header for the given node.
- void CreateLoopInfo(Node* node) {
- NodeInfo& ni = info(node);
- if (ni.IsLoopHeader()) return; // loop already set up.
+ // Make a new loop if necessary for the given node.
+ int CreateLoopInfo(Node* node) {
+ int loop_num = LoopNum(node);
+ if (loop_num > 0) return loop_num;
- loops_found_++;
- size_t loop_num = loops_.size() + 1;
- CHECK(loops_found_ <= kMaxLoops); // TODO(titzer): don't crash.
+ loop_num = ++loops_found_;
+ if (INDEX(loop_num) >= width_) ResizeBackwardMarks();
+
// Create a new loop.
loops_.push_back({node, nullptr, nullptr, nullptr});
loop_tree_->NewLoop();
- LoopMarks loop_mark = kVisited | (1 << loop_num);
- ni.node = node;
- ni.loop_mark = loop_mark;
+ SetBackwardMark(node, loop_num);
+ loop_tree_->node_to_loop_num_[node->id()] = loop_num;
// Setup loop mark for phis attached to loop header.
for (Node* use : node->uses()) {
- if (use->opcode() == IrOpcode::kPhi ||
- use->opcode() == IrOpcode::kEffectPhi) {
- info(use).loop_mark = loop_mark;
+ if (NodeProperties::IsPhi(use)) {
+ info(use); // create the NodeInfo
+ SetBackwardMark(use, loop_num);
+ loop_tree_->node_to_loop_num_[use->id()] = loop_num;
}
}
+
+ return loop_num;
+ }
+
+ void ResizeBackwardMarks() {
+ int new_width = width_ + 1;
+ int max = num_nodes();
+ uint32_t* new_backward = zone_->NewArray<uint32_t>(new_width * max);
+ memset(new_backward, 0, new_width * max * sizeof(uint32_t));
+ if (width_ > 0) { // copy old matrix data.
+ for (int i = 0; i < max; i++) {
+ uint32_t* np = &new_backward[i * new_width];
+ uint32_t* op = &backward_[i * width_];
+ for (int j = 0; j < width_; j++) np[j] = op[j];
+ }
+ }
+ width_ = new_width;
+ backward_ = new_backward;
+ }
+
+ void ResizeForwardMarks() {
+ int max = num_nodes();
+ forward_ = zone_->NewArray<uint32_t>(width_ * max);
+ memset(forward_, 0, width_ * max * sizeof(uint32_t));
}
// Propagate marks forward from loops.
void PropagateForward() {
+ ResizeForwardMarks();
for (LoopInfo& li : loops_) {
- queued_.Set(li.header, true);
- queue_.push_back(li.header);
- NodeInfo& ni = info(li.header);
- ni.forward = ni.loop_mark;
+ SetForwardMark(li.header, LoopNum(li.header));
+ Queue(li.header);
}
// Propagate forward on paths that were backward reachable from backedges.
while (!queue_.empty()) {
Node* node = queue_.front();
queue_.pop_front();
queued_.Set(node, false);
- NodeInfo& ni = info(node);
for (Edge edge : node->use_edges()) {
Node* use = edge.from();
- NodeInfo& ui = info(use);
- if (IsBackedge(use, ui, edge)) continue; // skip backedges.
- LoopMarks both = ni.forward & ui.backward;
- if (ui.MarkForward(both) && !queued_.Get(use)) {
- queued_.Set(use, true);
- queue_.push_back(use);
+ if (!IsBackedge(use, edge)) {
+ if (PropagateForwardMarks(node, use)) Queue(use);
}
}
}
}
- bool IsBackedge(Node* use, NodeInfo& ui, Edge& edge) {
- // TODO(titzer): checking for backedges here is ugly.
- if (!ui.IsLoopHeader()) return false;
+ bool IsBackedge(Node* use, Edge& edge) {
+ if (LoopNum(use) <= 0) return false;
if (edge.index() == kAssumedLoopEntryIndex) return false;
- if (use->opcode() == IrOpcode::kPhi ||
- use->opcode() == IrOpcode::kEffectPhi) {
+ if (NodeProperties::IsPhi(use)) {
return !NodeProperties::IsControlEdge(edge);
}
return true;
}
+ int LoopNum(Node* node) { return loop_tree_->node_to_loop_num_[node->id()]; }
+
NodeInfo& info(Node* node) {
NodeInfo& i = info_[node->id()];
if (i.node == nullptr) i.node = node;
return i;
}
- void PropagateBackward(Node* node, LoopMarks marks) {
- if (info(node).MarkBackward(marks) && !queued_.Get(node)) {
+ void Queue(Node* node) {
+ if (!queued_.Get(node)) {
queue_.push_back(node);
queued_.Set(node, true);
}
}
void FinishLoopTree() {
- // Degenerate cases.
- if (loops_.size() == 0) return;
- if (loops_.size() == 1) return FinishSingleLoop();
+ DCHECK(loops_found_ == static_cast<int>(loops_.size()));
+ DCHECK(loops_found_ == static_cast<int>(loop_tree_->all_loops_.size()));
- for (size_t i = 1; i <= loops_.size(); i++) ConnectLoopTree(i);
+ // Degenerate cases.
+ if (loops_found_ == 0) return;
+ if (loops_found_ == 1) return FinishSingleLoop();
+
+ for (int i = 1; i <= loops_found_; i++) ConnectLoopTree(i);
size_t count = 0;
// Place the node into the innermost nested loop of which it is a member.
for (NodeInfo& ni : info_) {
- if (ni.node == nullptr || !ni.IsInAnyLoop()) continue;
+ if (ni.node == nullptr) continue;
LoopInfo* innermost = nullptr;
- size_t index = 0;
- for (size_t i = 1; i <= loops_.size(); i++) {
- if (ni.IsInLoop(i)) {
- LoopInfo* loop = &loops_[i - 1];
- if (innermost == nullptr ||
- loop->loop->depth_ > innermost->loop->depth_) {
- innermost = loop;
- index = i;
+ int innermost_index = 0;
+ int pos = ni.node->id() * width_;
+ // Search the marks word by word.
+ for (int i = 0; i < width_; i++) {
+ uint32_t marks = backward_[pos + i] & forward_[pos + i];
+ for (int j = 0; j < 32; j++) {
+ if (marks & (1u << j)) {
+ int loop_num = i * 32 + j;
+ if (loop_num == 0) continue;
+ LoopInfo* loop = &loops_[loop_num - 1];
+ if (innermost == nullptr ||
+ loop->loop->depth_ > innermost->loop->depth_) {
+ innermost = loop;
+ innermost_index = loop_num;
+ }
}
}
}
- if (ni.IsInHeaderForLoop(index)) {
+ if (innermost == nullptr) continue;
+ if (LoopNum(ni.node) == innermost_index) {
ni.next = innermost->header_list;
innermost->header_list = ∋
} else {
@@ -301,18 +361,14 @@
// Handle the simpler case of a single loop (no checks for nesting necessary).
void FinishSingleLoop() {
- DCHECK(loops_.size() == 1);
- DCHECK(loop_tree_->all_loops_.size() == 1);
-
// Place nodes into the loop header and body.
LoopInfo* li = &loops_[0];
li->loop = &loop_tree_->all_loops_[0];
loop_tree_->SetParent(nullptr, li->loop);
size_t count = 0;
for (NodeInfo& ni : info_) {
- if (ni.node == nullptr || !ni.IsInAnyLoop()) continue;
- DCHECK(ni.IsInLoop(1));
- if (ni.IsInHeaderForLoop(1)) {
+ if (ni.node == nullptr || !IsInLoop(ni.node, 1)) continue;
+ if (LoopNum(ni.node) == 1) {
ni.next = li->header_list;
li->header_list = ∋
} else {
@@ -330,25 +386,21 @@
// Recursively serialize the list of header nodes and body nodes
// so that nested loops occupy nested intervals.
void SerializeLoop(LoopTree::Loop* loop) {
- size_t loop_num = loop_tree_->LoopNum(loop);
+ int loop_num = loop_tree_->LoopNum(loop);
LoopInfo& li = loops_[loop_num - 1];
// Serialize the header.
loop->header_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
for (NodeInfo* ni = li.header_list; ni != nullptr; ni = ni->next) {
loop_tree_->loop_nodes_.push_back(ni->node);
- // TODO(titzer): lift loop count restriction.
- loop_tree_->node_to_loop_num_[ni->node->id()] =
- static_cast<uint8_t>(loop_num);
+ loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num;
}
// Serialize the body.
loop->body_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
for (NodeInfo* ni = li.body_list; ni != nullptr; ni = ni->next) {
loop_tree_->loop_nodes_.push_back(ni->node);
- // TODO(titzer): lift loop count restriction.
- loop_tree_->node_to_loop_num_[ni->node->id()] =
- static_cast<uint8_t>(loop_num);
+ loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num;
}
// Serialize nested loops.
@@ -358,15 +410,15 @@
}
// Connect the LoopTree loops to their parents recursively.
- LoopTree::Loop* ConnectLoopTree(size_t loop_num) {
+ LoopTree::Loop* ConnectLoopTree(int loop_num) {
LoopInfo& li = loops_[loop_num - 1];
if (li.loop != nullptr) return li.loop;
NodeInfo& ni = info(li.header);
LoopTree::Loop* parent = nullptr;
- for (size_t i = 1; i <= loops_.size(); i++) {
+ for (int i = 1; i <= loops_found_; i++) {
if (i == loop_num) continue;
- if (ni.IsInLoop(i)) {
+ if (IsInLoop(ni.node, i)) {
// recursively create potential parent loops first.
LoopTree::Loop* upper = ConnectLoopTree(i);
if (parent == nullptr || upper->depth_ > parent->depth_) {
@@ -406,6 +458,16 @@
return loop_tree;
}
+
+Node* LoopTree::HeaderNode(Loop* loop) {
+ Node* first = *HeaderNodes(loop).begin();
+ if (first->opcode() == IrOpcode::kLoop) return first;
+ DCHECK(IrOpcode::IsPhiOpcode(first->opcode()));
+ Node* header = NodeProperties::GetControlInput(first);
+ DCHECK_EQ(IrOpcode::kLoop, header->opcode());
+ return header;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/loop-analysis.h b/src/compiler/loop-analysis.h
index 8c8d19a..2ed5bc2 100644
--- a/src/compiler/loop-analysis.h
+++ b/src/compiler/loop-analysis.h
@@ -14,6 +14,9 @@
namespace internal {
namespace compiler {
+// TODO(titzer): don't assume entry edges have a particular index.
+static const int kAssumedLoopEntryIndex = 0; // assume loops are entered here.
+
class LoopFinderImpl;
typedef base::iterator_range<Node**> NodeRange;
@@ -25,7 +28,7 @@
: zone_(zone),
outer_loops_(zone),
all_loops_(zone),
- node_to_loop_num_(static_cast<int>(num_nodes), 0, zone),
+ node_to_loop_num_(static_cast<int>(num_nodes), -1, zone),
loop_nodes_(zone) {}
// Represents a loop in the tree of loops, including the header nodes,
@@ -37,6 +40,7 @@
size_t HeaderSize() const { return body_start_ - header_start_; }
size_t BodySize() const { return body_end_ - body_start_; }
size_t TotalSize() const { return body_end_ - header_start_; }
+ size_t depth() const { return static_cast<size_t>(depth_); }
private:
friend class LoopTree;
@@ -59,9 +63,8 @@
// Return the innermost nested loop, if any, that contains {node}.
Loop* ContainingLoop(Node* node) {
- if (node->id() >= static_cast<int>(node_to_loop_num_.size()))
- return nullptr;
- uint8_t num = node_to_loop_num_[node->id()];
+ if (node->id() >= node_to_loop_num_.size()) return nullptr;
+ int num = node_to_loop_num_[node->id()];
return num > 0 ? &all_loops_[num - 1] : nullptr;
}
@@ -88,12 +91,31 @@
&loop_nodes_[0] + loop->body_start_);
}
+ // Return the header control node for a loop.
+ Node* HeaderNode(Loop* loop);
+
// Return a range which can iterate over the body nodes of {loop}.
NodeRange BodyNodes(Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->body_start_,
&loop_nodes_[0] + loop->body_end_);
}
+ // Return a range which can iterate over the nodes of {loop}.
+ NodeRange LoopNodes(Loop* loop) {
+ return NodeRange(&loop_nodes_[0] + loop->header_start_,
+ &loop_nodes_[0] + loop->body_end_);
+ }
+
+ // Return the node that represents the control, i.e. the loop node itself.
+ Node* GetLoopControl(Loop* loop) {
+ // TODO(turbofan): make the loop control node always first?
+ for (Node* node : HeaderNodes(loop)) {
+ if (node->opcode() == IrOpcode::kLoop) return node;
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
private:
friend class LoopFinderImpl;
@@ -116,18 +138,17 @@
Zone* zone_;
ZoneVector<Loop*> outer_loops_;
ZoneVector<Loop> all_loops_;
- // TODO(titzer): lift loop count restriction.
- ZoneVector<uint8_t> node_to_loop_num_;
+ ZoneVector<int> node_to_loop_num_;
ZoneVector<Node*> loop_nodes_;
};
-
class LoopFinder {
public:
// Build a loop tree for the entire graph.
static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone);
};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/loop-peeling.cc b/src/compiler/loop-peeling.cc
new file mode 100644
index 0000000..b553a9f
--- /dev/null
+++ b/src/compiler/loop-peeling.cc
@@ -0,0 +1,334 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/loop-peeling.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node-properties.h"
+#include "src/zone.h"
+
+// Loop peeling is an optimization that copies the body of a loop, creating
+// a new copy of the body called the "peeled iteration" that represents the
+// first iteration. Beginning with a loop as follows:
+
+// E
+// | A
+// | | (backedges)
+// | +---------------|---------------------------------+
+// | | +-------------|-------------------------------+ |
+// | | | | +--------+ | |
+// | | | | | +----+ | | |
+// | | | | | | | | | |
+// ( Loop )<-------- ( phiA ) | | | |
+// | | | | | |
+// ((======P=================U=======|=|=====)) | |
+// (( | | )) | |
+// (( X <---------------------+ | )) | |
+// (( | )) | |
+// (( body | )) | |
+// (( | )) | |
+// (( Y <-----------------------+ )) | |
+// (( )) | |
+// ((===K====L====M==========================)) | |
+// | | | | |
+// | | +-----------------------------------------+ |
+// | +------------------------------------------------+
+// |
+// exit
+
+// The body of the loop is duplicated so that all nodes considered "inside"
+// the loop (e.g. {P, U, X, Y, K, L, M}) have a corresponding copies in the
+// peeled iteration (e.g. {P', U', X', Y', K', L', M'}). What were considered
+// backedges of the loop correspond to edges from the peeled iteration to
+// the main loop body, with multiple backedges requiring a merge.
+
+// Similarly, any exits from the loop body need to be merged with "exits"
+// from the peeled iteration, resulting in the graph as follows:
+
+// E
+// | A
+// | |
+// ((=====P'================U'===============))
+// (( ))
+// (( X'<-------------+ ))
+// (( | ))
+// (( peeled iteration | ))
+// (( | ))
+// (( Y'<-----------+ | ))
+// (( | | ))
+// ((===K'===L'====M'======|=|===============))
+// | | | | |
+// +--------+ +-+ +-+ | |
+// | | | | |
+// | Merge <------phi
+// | | |
+// | +-----+ |
+// | | | (backedges)
+// | | +---------------|---------------------------------+
+// | | | +-------------|-------------------------------+ |
+// | | | | | +--------+ | |
+// | | | | | | +----+ | | |
+// | | | | | | | | | | |
+// | ( Loop )<-------- ( phiA ) | | | |
+// | | | | | | |
+// | ((======P=================U=======|=|=====)) | |
+// | (( | | )) | |
+// | (( X <---------------------+ | )) | |
+// | (( | )) | |
+// | (( body | )) | |
+// | (( | )) | |
+// | (( Y <-----------------------+ )) | |
+// | (( )) | |
+// | ((===K====L====M==========================)) | |
+// | | | | | |
+// | | | +-----------------------------------------+ |
+// | | +------------------------------------------------+
+// | |
+// | |
+// +----+ +-+
+// | |
+// Merge
+// |
+// exit
+
+// Note that the boxes ((===)) above are not explicitly represented in the
+// graph, but are instead computed by the {LoopFinder}.
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct Peeling {
+ // Maps a node to its index in the {pairs} vector.
+ NodeMarker<size_t> node_map;
+ // The vector which contains the mapped nodes.
+ NodeVector* pairs;
+
+ Peeling(Graph* graph, Zone* tmp_zone, size_t max, NodeVector* p)
+ : node_map(graph, static_cast<uint32_t>(max)), pairs(p) {}
+
+ Node* map(Node* node) {
+ if (node_map.Get(node) == 0) return node;
+ return pairs->at(node_map.Get(node));
+ }
+
+ void Insert(Node* original, Node* copy) {
+ node_map.Set(original, 1 + pairs->size());
+ pairs->push_back(original);
+ pairs->push_back(copy);
+ }
+
+ void CopyNodes(Graph* graph, Zone* tmp_zone, Node* dead, NodeRange nodes) {
+ NodeVector inputs(tmp_zone);
+ // Copy all the nodes first.
+ for (Node* node : nodes) {
+ inputs.clear();
+ for (Node* input : node->inputs()) inputs.push_back(map(input));
+ Insert(node, graph->NewNode(node->op(), node->InputCount(), &inputs[0]));
+ }
+
+ // Fix remaining inputs of the copies.
+ for (Node* original : nodes) {
+ Node* copy = pairs->at(node_map.Get(original));
+ for (int i = 0; i < copy->InputCount(); i++) {
+ copy->ReplaceInput(i, map(original->InputAt(i)));
+ }
+ }
+ }
+
+ bool Marked(Node* node) { return node_map.Get(node) > 0; }
+};
+
+
+class PeeledIterationImpl : public PeeledIteration {
+ public:
+ NodeVector node_pairs_;
+ explicit PeeledIterationImpl(Zone* zone) : node_pairs_(zone) {}
+};
+
+
+Node* PeeledIteration::map(Node* node) {
+ // TODO(turbofan): we use a simple linear search, since the peeled iteration
+ // is really only used in testing.
+ PeeledIterationImpl* impl = static_cast<PeeledIterationImpl*>(this);
+ for (size_t i = 0; i < impl->node_pairs_.size(); i += 2) {
+ if (impl->node_pairs_[i] == node) return impl->node_pairs_[i + 1];
+ }
+ return node;
+}
+
+
+static void FindLoopExits(LoopTree* loop_tree, LoopTree::Loop* loop,
+ NodeVector& exits, NodeVector& rets) {
+ // Look for returns and if projections that are outside the loop but whose
+ // control input is inside the loop.
+ for (Node* node : loop_tree->LoopNodes(loop)) {
+ for (Node* use : node->uses()) {
+ if (!loop_tree->Contains(loop, use)) {
+ if (IrOpcode::IsIfProjectionOpcode(use->opcode())) {
+ // This is a branch from inside the loop to outside the loop.
+ exits.push_back(use);
+ } else if (use->opcode() == IrOpcode::kReturn &&
+ loop_tree->Contains(loop,
+ NodeProperties::GetControlInput(use))) {
+ // This is a return from inside the loop.
+ rets.push_back(use);
+ }
+ }
+ }
+ }
+}
+
+
+bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
+ Zone zone;
+ NodeVector exits(&zone);
+ NodeVector rets(&zone);
+ FindLoopExits(loop_tree, loop, exits, rets);
+ return exits.size() <= 1u;
+}
+
+
+PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
+ LoopTree* loop_tree, LoopTree::Loop* loop,
+ Zone* tmp_zone) {
+ //============================================================================
+ // Find the loop exit region to determine if this loop can be peeled.
+ //============================================================================
+ NodeVector exits(tmp_zone);
+ NodeVector rets(tmp_zone);
+ FindLoopExits(loop_tree, loop, exits, rets);
+
+ if (exits.size() != 1) return nullptr; // not peelable currently.
+
+ //============================================================================
+ // Construct the peeled iteration.
+ //============================================================================
+ PeeledIterationImpl* iter = new (tmp_zone) PeeledIterationImpl(tmp_zone);
+ size_t estimated_peeled_size =
+ 5 + (loop->TotalSize() + exits.size() + rets.size()) * 2;
+ Peeling peeling(graph, tmp_zone, estimated_peeled_size, &iter->node_pairs_);
+
+ Node* dead = graph->NewNode(common->Dead());
+
+ // Map the loop header nodes to their entry values.
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ peeling.Insert(node, node->InputAt(kAssumedLoopEntryIndex));
+ }
+
+ // Copy all the nodes of loop body for the peeled iteration.
+ peeling.CopyNodes(graph, tmp_zone, dead, loop_tree->BodyNodes(loop));
+
+ //============================================================================
+ // Replace the entry to the loop with the output of the peeled iteration.
+ //============================================================================
+ Node* loop_node = loop_tree->GetLoopControl(loop);
+ Node* new_entry;
+ int backedges = loop_node->InputCount() - 1;
+ if (backedges > 1) {
+ // Multiple backedges from original loop, therefore multiple output edges
+ // from the peeled iteration.
+ NodeVector inputs(tmp_zone);
+ for (int i = 1; i < loop_node->InputCount(); i++) {
+ inputs.push_back(peeling.map(loop_node->InputAt(i)));
+ }
+ Node* merge =
+ graph->NewNode(common->Merge(backedges), backedges, &inputs[0]);
+
+ // Merge values from the multiple output edges of the peeled iteration.
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ if (node->opcode() == IrOpcode::kLoop) continue; // already done.
+ inputs.clear();
+ for (int i = 0; i < backedges; i++) {
+ inputs.push_back(peeling.map(node->InputAt(1 + i)));
+ }
+ for (Node* input : inputs) {
+ if (input != inputs[0]) { // Non-redundant phi.
+ inputs.push_back(merge);
+ const Operator* op = common->ResizeMergeOrPhi(node->op(), backedges);
+ Node* phi = graph->NewNode(op, backedges + 1, &inputs[0]);
+ node->ReplaceInput(0, phi);
+ break;
+ }
+ }
+ }
+ new_entry = merge;
+ } else {
+ // Only one backedge, simply replace the input to loop with output of
+ // peeling.
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ node->ReplaceInput(0, peeling.map(node->InputAt(0)));
+ }
+ new_entry = peeling.map(loop_node->InputAt(1));
+ }
+ loop_node->ReplaceInput(0, new_entry);
+
+ //============================================================================
+ // Duplicate the loop exit region and add a merge.
+ //============================================================================
+
+ // Currently we are limited to peeling loops with a single exit. The exit is
+ // the postdominator of the loop (ignoring returns).
+ Node* postdom = exits[0];
+ for (Node* node : rets) exits.push_back(node);
+ for (Node* use : postdom->uses()) {
+ if (NodeProperties::IsPhi(use)) exits.push_back(use);
+ }
+
+ NodeRange exit_range(&exits[0], &exits[0] + exits.size());
+ peeling.CopyNodes(graph, tmp_zone, dead, exit_range);
+
+ Node* merge = graph->NewNode(common->Merge(2), postdom, peeling.map(postdom));
+ postdom->ReplaceUses(merge);
+ merge->ReplaceInput(0, postdom); // input 0 overwritten by above line.
+
+ // Find and update all the edges into either the loop or exit region.
+ for (int i = 0; i < 2; i++) {
+ NodeRange range = i == 0 ? loop_tree->LoopNodes(loop) : exit_range;
+ ZoneVector<Edge> value_edges(tmp_zone);
+ ZoneVector<Edge> effect_edges(tmp_zone);
+
+ for (Node* node : range) {
+ // Gather value and effect edges from outside the region.
+ for (Edge edge : node->use_edges()) {
+ if (!peeling.Marked(edge.from())) {
+ // Edge from outside the loop into the region.
+ if (NodeProperties::IsValueEdge(edge) ||
+ NodeProperties::IsContextEdge(edge)) {
+ value_edges.push_back(edge);
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ effect_edges.push_back(edge);
+ } else {
+ // don't do anything for control edges.
+ // TODO(titzer): should update control edges to peeled?
+ }
+ }
+ }
+
+ // Update all the value and effect edges at once.
+ if (!value_edges.empty()) {
+ // TODO(titzer): machine type is wrong here.
+ Node* phi =
+ graph->NewNode(common->Phi(MachineRepresentation::kTagged, 2), node,
+ peeling.map(node), merge);
+ for (Edge edge : value_edges) edge.UpdateTo(phi);
+ value_edges.clear();
+ }
+ if (!effect_edges.empty()) {
+ Node* effect_phi = graph->NewNode(common->EffectPhi(2), node,
+ peeling.map(node), merge);
+ for (Edge edge : effect_edges) edge.UpdateTo(effect_phi);
+ effect_edges.clear();
+ }
+ }
+ }
+
+ return iter;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/loop-peeling.h b/src/compiler/loop-peeling.h
new file mode 100644
index 0000000..ea963b0
--- /dev/null
+++ b/src/compiler/loop-peeling.h
@@ -0,0 +1,43 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOOP_PEELING_H_
+#define V8_COMPILER_LOOP_PEELING_H_
+
+#include "src/compiler/loop-analysis.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Represents the output of peeling a loop, which is basically the mapping
+// from the body of the loop to the corresponding nodes in the peeled
+// iteration.
+class PeeledIteration : public ZoneObject {
+ public:
+ // Maps {node} to its corresponding copy in the peeled iteration, if
+ // the node was part of the body of the loop. Returns {node} otherwise.
+ Node* map(Node* node);
+
+ protected:
+ PeeledIteration() {}
+};
+
+class CommonOperatorBuilder;
+
+// Implements loop peeling.
+class LoopPeeler {
+ public:
+ static bool CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop);
+ static PeeledIteration* Peel(Graph* graph, CommonOperatorBuilder* common,
+ LoopTree* loop_tree, LoopTree::Loop* loop,
+ Zone* tmp_zone);
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_LOOP_PEELING_H_
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index c3e45a1..19ea062 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -75,7 +75,9 @@
Node* MachineOperatorReducer::Int32Sub(Node* lhs, Node* rhs) {
- return graph()->NewNode(machine()->Int32Sub(), lhs, rhs);
+ Node* const node = graph()->NewNode(machine()->Int32Sub(), lhs, rhs);
+ Reduction const reduction = ReduceInt32Sub(node);
+ return reduction.Changed() ? reduction.replacement() : node;
}
@@ -101,13 +103,19 @@
Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
- DCHECK_LT(0, divisor);
+ DCHECK_LT(0u, divisor);
+ // If the divisor is even, we can avoid using the expensive fixup by shifting
+ // the dividend upfront.
+ unsigned const shift = base::bits::CountTrailingZeros32(divisor);
+ dividend = Word32Shr(dividend, shift);
+ divisor >>= shift;
+ // Compute the magic number for the (shifted) divisor.
base::MagicNumbersForDivision<uint32_t> const mag =
- base::UnsignedDivisionByConstant(bit_cast<uint32_t>(divisor));
+ base::UnsignedDivisionByConstant(divisor, shift);
Node* quotient = graph()->NewNode(machine()->Uint32MulHigh(), dividend,
Uint32Constant(mag.multiplier));
if (mag.add) {
- DCHECK_LE(1, mag.shift);
+ DCHECK_LE(1u, mag.shift);
quotient = Word32Shr(
Int32Add(Word32Shr(Int32Sub(dividend, quotient), 1), quotient),
mag.shift - 1);
@@ -122,7 +130,7 @@
Reduction MachineOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kProjection:
- return ReduceProjection(OpParameter<size_t>(node), node->InputAt(0));
+ return ReduceProjection(ProjectionIndexOf(node->op()), node->InputAt(0));
case IrOpcode::kWord32And:
return ReduceWord32And(node);
case IrOpcode::kWord32Or:
@@ -152,29 +160,8 @@
}
return ReduceWord32Shifts(node);
}
- case IrOpcode::kWord32Sar: {
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
- if (m.IsFoldable()) { // K >> K => K
- return ReplaceInt32(m.left().Value() >> m.right().Value());
- }
- if (m.left().IsWord32Shl()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.left().IsLoad()) {
- LoadRepresentation const rep =
- OpParameter<LoadRepresentation>(mleft.left().node());
- if (m.right().Is(24) && mleft.right().Is(24) && rep == kMachInt8) {
- // Load[kMachInt8] << 24 >> 24 => Load[kMachInt8]
- return Replace(mleft.left().node());
- }
- if (m.right().Is(16) && mleft.right().Is(16) && rep == kMachInt16) {
- // Load[kMachInt16] << 16 >> 16 => Load[kMachInt8]
- return Replace(mleft.left().node());
- }
- }
- }
- return ReduceWord32Shifts(node);
- }
+ case IrOpcode::kWord32Sar:
+ return ReduceWord32Sar(node);
case IrOpcode::kWord32Ror: {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x ror 0 => x
@@ -216,16 +203,8 @@
}
case IrOpcode::kInt32Add:
return ReduceInt32Add(node);
- case IrOpcode::kInt32Sub: {
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
- if (m.IsFoldable()) { // K - K => K
- return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
- static_cast<uint32_t>(m.right().Value()));
- }
- if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
- break;
- }
+ case IrOpcode::kInt32Sub:
+ return ReduceInt32Sub(node);
case IrOpcode::kInt32Mul: {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
@@ -234,14 +213,14 @@
return ReplaceInt32(m.left().Value() * m.right().Value());
}
if (m.right().Is(-1)) { // x * -1 => 0 - x
- node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, m.left().node());
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
return Changed(node);
}
if (m.right().IsPowerOf2()) { // x * 2^n => x << n
- node->set_op(machine()->Word32Shl());
node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
+ NodeProperties::ChangeOp(node, machine()->Word32Shl());
Reduction reduction = ReduceWord32Shl(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -359,9 +338,9 @@
case IrOpcode::kFloat64Mul: {
Float64BinopMatcher m(node);
if (m.right().Is(-1)) { // x * -1.0 => -0.0 - x
- node->set_op(machine()->Float64Sub());
node->ReplaceInput(0, Float64Constant(-0.0));
node->ReplaceInput(1, m.left().node());
+ NodeProperties::ChangeOp(node, machine()->Float64Sub());
return Changed(node);
}
if (m.right().Is(1)) return Replace(m.left().node()); // x * 1.0 => x
@@ -390,7 +369,7 @@
case IrOpcode::kFloat64Mod: {
Float64BinopMatcher m(node);
if (m.right().Is(0)) { // x % 0 => NaN
- return ReplaceFloat64(base::OS::nan_value());
+ return ReplaceFloat64(std::numeric_limits<double>::quiet_NaN());
}
if (m.right().IsNaN()) { // x % NaN => NaN
return Replace(m.right().node());
@@ -454,8 +433,16 @@
if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
+ case IrOpcode::kFloat64InsertLowWord32:
+ return ReduceFloat64InsertLowWord32(node);
+ case IrOpcode::kFloat64InsertHighWord32:
+ return ReduceFloat64InsertHighWord32(node);
case IrOpcode::kStore:
return ReduceStore(node);
+ case IrOpcode::kFloat64Equal:
+ case IrOpcode::kFloat64LessThan:
+ case IrOpcode::kFloat64LessThanOrEqual:
+ return ReduceFloat64Compare(node);
default:
break;
}
@@ -471,6 +458,44 @@
return ReplaceUint32(bit_cast<uint32_t>(m.left().Value()) +
bit_cast<uint32_t>(m.right().Value()));
}
+ if (m.left().IsInt32Sub()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.left().Is(0)) { // (0 - x) + y => y - x
+ node->ReplaceInput(0, m.right().node());
+ node->ReplaceInput(1, mleft.right().node());
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
+ Reduction const reduction = ReduceInt32Sub(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ if (m.right().IsInt32Sub()) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.left().Is(0)) { // y + (0 - x) => y - x
+ node->ReplaceInput(1, mright.right().node());
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
+ Reduction const reduction = ReduceInt32Sub(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
+ DCHECK_EQ(IrOpcode::kInt32Sub, node->opcode());
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
+ if (m.IsFoldable()) { // K - K => K
+ return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
+ static_cast<uint32_t>(m.right().Value()));
+ }
+ if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
+ if (m.right().HasValue()) { // x - K => x + -K
+ node->ReplaceInput(1, Int32Constant(-m.right().Value()));
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
+ Reduction const reduction = ReduceInt32Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
return NoChange();
}
@@ -489,10 +514,10 @@
return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
}
if (m.right().Is(-1)) { // x / -1 => 0 - x
- node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, m.left().node());
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
return Changed(node);
}
if (m.right().HasValue()) {
@@ -501,7 +526,7 @@
Node* quotient = dividend;
if (base::bits::IsPowerOfTwo32(Abs(divisor))) {
uint32_t const shift = WhichPowerOf2Abs(divisor);
- DCHECK_NE(0, shift);
+ DCHECK_NE(0u, shift);
if (shift > 1) {
quotient = Word32Sar(quotient, 31);
}
@@ -511,10 +536,10 @@
quotient = Int32Div(quotient, Abs(divisor));
}
if (divisor < 0) {
- node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, quotient);
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
return Changed(node);
}
return Replace(quotient);
@@ -540,9 +565,9 @@
Node* const dividend = m.left().node();
uint32_t const divisor = m.right().Value();
if (base::bits::IsPowerOfTwo32(divisor)) { // x / 2^n => x >> n
- node->set_op(machine()->Word32Shr());
node->ReplaceInput(1, Uint32Constant(WhichPowerOf2(m.right().Value())));
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Word32Shr());
return Changed(node);
} else {
return Replace(Uint32Div(dividend, divisor));
@@ -569,18 +594,20 @@
if (base::bits::IsPowerOfTwo32(divisor)) {
uint32_t const mask = divisor - 1;
Node* const zero = Int32Constant(0);
- node->set_op(common()->Select(kMachInt32, BranchHint::kFalse));
node->ReplaceInput(
0, graph()->NewNode(machine()->Int32LessThan(), dividend, zero));
node->ReplaceInput(
1, Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)));
node->ReplaceInput(2, Word32And(dividend, mask));
+ NodeProperties::ChangeOp(
+ node,
+ common()->Select(MachineRepresentation::kWord32, BranchHint::kFalse));
} else {
Node* quotient = Int32Div(dividend, divisor);
- node->set_op(machine()->Int32Sub());
DCHECK_EQ(dividend, node->InputAt(0));
node->ReplaceInput(1, Int32Mul(quotient, Int32Constant(divisor)));
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
}
return Changed(node);
}
@@ -602,15 +629,16 @@
Node* const dividend = m.left().node();
uint32_t const divisor = m.right().Value();
if (base::bits::IsPowerOfTwo32(divisor)) { // x % 2^n => x & 2^n-1
- node->set_op(machine()->Word32And());
node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Word32And());
} else {
Node* quotient = Uint32Div(dividend, divisor);
- node->set_op(machine()->Int32Sub());
DCHECK_EQ(dividend, node->InputAt(0));
node->ReplaceInput(1, Int32Mul(quotient, Uint32Constant(divisor)));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
}
- node->TrimInputCount(2);
return Changed(node);
}
return NoChange();
@@ -623,23 +651,24 @@
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
if (m.IsPhi()) {
Node* const phi = m.node();
- DCHECK_EQ(kRepFloat64, RepresentationOf(OpParameter<MachineType>(phi)));
+ DCHECK_EQ(MachineRepresentation::kFloat64, PhiRepresentationOf(phi->op()));
if (phi->OwnedBy(node)) {
- // TruncateFloat64ToInt32(Phi[Float64](x1,...,xn))
- // => Phi[Int32](TruncateFloat64ToInt32(x1),
+ // TruncateFloat64ToInt32[mode](Phi[Float64](x1,...,xn))
+ // => Phi[Int32](TruncateFloat64ToInt32[mode](x1),
// ...,
- // TruncateFloat64ToInt32(xn))
+ // TruncateFloat64ToInt32[mode](xn))
const int value_input_count = phi->InputCount() - 1;
for (int i = 0; i < value_input_count; ++i) {
- Node* input = graph()->NewNode(machine()->TruncateFloat64ToInt32(),
- phi->InputAt(i));
+ Node* input = graph()->NewNode(node->op(), phi->InputAt(i));
// TODO(bmeurer): Reschedule input for reduction once we have Revisit()
// instead of recursing into ReduceTruncateFloat64ToInt32() here.
Reduction reduction = ReduceTruncateFloat64ToInt32(input);
if (reduction.Changed()) input = reduction.replacement();
phi->ReplaceInput(i, input);
}
- phi->set_op(common()->Phi(kMachInt32, value_input_count));
+ NodeProperties::ChangeOp(
+ phi,
+ common()->Phi(MachineRepresentation::kWord32, value_input_count));
return Replace(phi);
}
}
@@ -648,15 +677,16 @@
Reduction MachineOperatorReducer::ReduceStore(Node* node) {
- MachineType const rep =
- RepresentationOf(StoreRepresentationOf(node->op()).machine_type());
+ MachineRepresentation const rep =
+ StoreRepresentationOf(node->op()).representation();
Node* const value = node->InputAt(2);
switch (value->opcode()) {
case IrOpcode::kWord32And: {
Uint32BinopMatcher m(value);
- if (m.right().HasValue() &&
- ((rep == kRepWord8 && (m.right().Value() & 0xff) == 0xff) ||
- (rep == kRepWord16 && (m.right().Value() & 0xffff) == 0xffff))) {
+ if (m.right().HasValue() && ((rep == MachineRepresentation::kWord8 &&
+ (m.right().Value() & 0xff) == 0xff) ||
+ (rep == MachineRepresentation::kWord16 &&
+ (m.right().Value() & 0xffff) == 0xffff))) {
node->ReplaceInput(2, m.left().node());
return Changed(node);
}
@@ -664,9 +694,10 @@
}
case IrOpcode::kWord32Sar: {
Int32BinopMatcher m(value);
- if (m.left().IsWord32Shl() &&
- ((rep == kRepWord8 && m.right().IsInRange(1, 24)) ||
- (rep == kRepWord16 && m.right().IsInRange(1, 16)))) {
+ if (m.left().IsWord32Shl() && ((rep == MachineRepresentation::kWord8 &&
+ m.right().IsInRange(1, 24)) ||
+ (rep == MachineRepresentation::kWord16 &&
+ m.right().IsInRange(1, 16)))) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(m.right().Value())) {
node->ReplaceInput(2, mleft.left().node());
@@ -752,10 +783,10 @@
if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(m.right().Value())) {
- node->set_op(machine()->Word32And());
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1,
Uint32Constant(~((1U << m.right().Value()) - 1U)));
+ NodeProperties::ChangeOp(node, machine()->Word32And());
Reduction reduction = ReduceWord32And(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -765,11 +796,50 @@
}
+Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
+ if (m.IsFoldable()) { // K >> K => K
+ return ReplaceInt32(m.left().Value() >> m.right().Value());
+ }
+ if (m.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.left().IsComparison()) {
+ if (m.right().Is(31) && mleft.right().Is(31)) {
+ // Comparison << 31 >> 31 => 0 - Comparison
+ node->ReplaceInput(0, Int32Constant(0));
+ node->ReplaceInput(1, mleft.left().node());
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
+ Reduction const reduction = ReduceInt32Sub(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ } else if (mleft.left().IsLoad()) {
+ LoadRepresentation const rep =
+ LoadRepresentationOf(mleft.left().node()->op());
+ if (m.right().Is(24) && mleft.right().Is(24) &&
+ rep == MachineType::Int8()) {
+ // Load[kMachInt8] << 24 >> 24 => Load[kMachInt8]
+ return Replace(mleft.left().node());
+ }
+ if (m.right().Is(16) && mleft.right().Is(16) &&
+ rep == MachineType::Int16()) {
+ // Load[kMachInt16] << 16 >> 16 => Load[kMachInt8]
+ return Replace(mleft.left().node());
+ }
+ }
+ }
+ return ReduceWord32Shifts(node);
+}
+
+
Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
DCHECK_EQ(IrOpcode::kWord32And, node->opcode());
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
if (m.right().Is(-1)) return Replace(m.left().node()); // x & -1 => x
+ if (m.left().IsComparison() && m.right().Is(1)) { // CMP & 1 => CMP
+ return Replace(m.left().node());
+ }
if (m.IsFoldable()) { // K & K => K
return ReplaceInt32(m.left().Value() & m.right().Value());
}
@@ -784,63 +854,79 @@
return reduction.Changed() ? reduction : Changed(node);
}
}
- if (m.left().IsInt32Add() && m.right().IsNegativePowerOf2()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue() &&
- (mleft.right().Value() & m.right().Value()) == mleft.right().Value()) {
- // (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
- node->set_op(machine()->Int32Add());
- node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
- node->ReplaceInput(1, mleft.right().node());
- Reduction const reduction = ReduceInt32Add(node);
- return reduction.Changed() ? reduction : Changed(node);
- }
- if (mleft.left().IsInt32Mul()) {
- Int32BinopMatcher mleftleft(mleft.left().node());
- if (mleftleft.right().IsMultipleOf(-m.right().Value())) {
- // (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
- node->set_op(machine()->Int32Add());
- node->ReplaceInput(0,
- Word32And(mleft.right().node(), m.right().node()));
- node->ReplaceInput(1, mleftleft.node());
- Reduction const reduction = ReduceInt32Add(node);
- return reduction.Changed() ? reduction : Changed(node);
+ if (m.right().IsNegativePowerOf2()) {
+ int32_t const mask = m.right().Value();
+ if (m.left().IsWord32Shl()) {
+ Uint32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue() &&
+ mleft.right().Value() >= base::bits::CountTrailingZeros32(mask)) {
+ // (x << L) & (-1 << K) => x << L iff K >= L
+ return Replace(mleft.node());
}
- }
- if (mleft.right().IsInt32Mul()) {
- Int32BinopMatcher mleftright(mleft.right().node());
- if (mleftright.right().IsMultipleOf(-m.right().Value())) {
- // (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
- node->set_op(machine()->Int32Add());
+ } else if (m.left().IsInt32Add()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue() &&
+ (mleft.right().Value() & mask) == mleft.right().Value()) {
+ // (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
- node->ReplaceInput(1, mleftright.node());
+ node->ReplaceInput(1, mleft.right().node());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
Reduction const reduction = ReduceInt32Add(node);
return reduction.Changed() ? reduction : Changed(node);
}
- }
- if (mleft.left().IsWord32Shl()) {
- Int32BinopMatcher mleftleft(mleft.left().node());
- if (mleftleft.right().Is(
- base::bits::CountTrailingZeros32(m.right().Value()))) {
- // (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
- node->set_op(machine()->Int32Add());
- node->ReplaceInput(0,
- Word32And(mleft.right().node(), m.right().node()));
- node->ReplaceInput(1, mleftleft.node());
- Reduction const reduction = ReduceInt32Add(node);
- return reduction.Changed() ? reduction : Changed(node);
+ if (mleft.left().IsInt32Mul()) {
+ Int32BinopMatcher mleftleft(mleft.left().node());
+ if (mleftleft.right().IsMultipleOf(-mask)) {
+ // (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+ node->ReplaceInput(0,
+ Word32And(mleft.right().node(), m.right().node()));
+ node->ReplaceInput(1, mleftleft.node());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
+ Reduction const reduction = ReduceInt32Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
}
- }
- if (mleft.right().IsWord32Shl()) {
- Int32BinopMatcher mleftright(mleft.right().node());
- if (mleftright.right().Is(
- base::bits::CountTrailingZeros32(m.right().Value()))) {
- // (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
- node->set_op(machine()->Int32Add());
- node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
- node->ReplaceInput(1, mleftright.node());
- Reduction const reduction = ReduceInt32Add(node);
- return reduction.Changed() ? reduction : Changed(node);
+ if (mleft.right().IsInt32Mul()) {
+ Int32BinopMatcher mleftright(mleft.right().node());
+ if (mleftright.right().IsMultipleOf(-mask)) {
+ // (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+ node->ReplaceInput(0,
+ Word32And(mleft.left().node(), m.right().node()));
+ node->ReplaceInput(1, mleftright.node());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
+ Reduction const reduction = ReduceInt32Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ if (mleft.left().IsWord32Shl()) {
+ Int32BinopMatcher mleftleft(mleft.left().node());
+ if (mleftleft.right().Is(base::bits::CountTrailingZeros32(mask))) {
+ // (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
+ node->ReplaceInput(0,
+ Word32And(mleft.right().node(), m.right().node()));
+ node->ReplaceInput(1, mleftleft.node());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
+ Reduction const reduction = ReduceInt32Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ if (mleft.right().IsWord32Shl()) {
+ Int32BinopMatcher mleftright(mleft.right().node());
+ if (mleftright.right().Is(base::bits::CountTrailingZeros32(mask))) {
+ // (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
+ node->ReplaceInput(0,
+ Word32And(mleft.left().node(), m.right().node()));
+ node->ReplaceInput(1, mleftright.node());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
+ Reduction const reduction = ReduceInt32Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ } else if (m.left().IsInt32Mul()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsMultipleOf(-mask)) {
+ // (x * (K << L)) & (-1 << L) => x * (K << L)
+ return Replace(mleft.node());
}
}
}
@@ -858,8 +944,8 @@
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
- Node* shl = NULL;
- Node* shr = NULL;
+ Node* shl = nullptr;
+ Node* shr = nullptr;
// Recognize rotation, we are matching either:
// * x << y | x >>> (32 - y) => x ror (32 - y), i.e x rol y
// * x << (32 - y) | x >>> y => x ror y
@@ -882,8 +968,8 @@
// Case where y is a constant.
if (mshl.right().Value() + mshr.right().Value() != 32) return NoChange();
} else {
- Node* sub = NULL;
- Node* y = NULL;
+ Node* sub = nullptr;
+ Node* y = nullptr;
if (mshl.right().IsInt32Sub()) {
sub = mshl.right().node();
y = mshr.right().node();
@@ -898,13 +984,96 @@
if (!msub.left().Is(32) || msub.right().node() != y) return NoChange();
}
- node->set_op(machine()->Word32Ror());
node->ReplaceInput(0, mshl.left().node());
node->ReplaceInput(1, mshr.right().node());
+ NodeProperties::ChangeOp(node, machine()->Word32Ror());
return Changed(node);
}
+Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
+ DCHECK_EQ(IrOpcode::kFloat64InsertLowWord32, node->opcode());
+ Float64Matcher mlhs(node->InputAt(0));
+ Uint32Matcher mrhs(node->InputAt(1));
+ if (mlhs.HasValue() && mrhs.HasValue()) {
+ return ReplaceFloat64(bit_cast<double>(
+ (bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF00000000)) |
+ mrhs.Value()));
+ }
+ return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
+ DCHECK_EQ(IrOpcode::kFloat64InsertHighWord32, node->opcode());
+ Float64Matcher mlhs(node->InputAt(0));
+ Uint32Matcher mrhs(node->InputAt(1));
+ if (mlhs.HasValue() && mrhs.HasValue()) {
+ return ReplaceFloat64(bit_cast<double>(
+ (bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF)) |
+ (static_cast<uint64_t>(mrhs.Value()) << 32)));
+ }
+ return NoChange();
+}
+
+
+namespace {
+
+bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
+ if (m.HasValue()) {
+ double v = m.Value();
+ float fv = static_cast<float>(v);
+ return static_cast<double>(fv) == v;
+ }
+ return false;
+}
+
+} // namespace
+
+
+Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
+ DCHECK((IrOpcode::kFloat64Equal == node->opcode()) ||
+ (IrOpcode::kFloat64LessThan == node->opcode()) ||
+ (IrOpcode::kFloat64LessThanOrEqual == node->opcode()));
+ // As all Float32 values have an exact representation in Float64, comparing
+ // two Float64 values both converted from Float32 is equivalent to comparing
+ // the original Float32s, so we can ignore the conversions. We can also reduce
+ // comparisons of converted Float64 values against constants that can be
+ // represented exactly as Float32.
+ Float64BinopMatcher m(node);
+ if ((m.left().IsChangeFloat32ToFloat64() &&
+ m.right().IsChangeFloat32ToFloat64()) ||
+ (m.left().IsChangeFloat32ToFloat64() &&
+ IsFloat64RepresentableAsFloat32(m.right())) ||
+ (IsFloat64RepresentableAsFloat32(m.left()) &&
+ m.right().IsChangeFloat32ToFloat64())) {
+ switch (node->opcode()) {
+ case IrOpcode::kFloat64Equal:
+ NodeProperties::ChangeOp(node, machine()->Float32Equal());
+ break;
+ case IrOpcode::kFloat64LessThan:
+ NodeProperties::ChangeOp(node, machine()->Float32LessThan());
+ break;
+ case IrOpcode::kFloat64LessThanOrEqual:
+ NodeProperties::ChangeOp(node, machine()->Float32LessThanOrEqual());
+ break;
+ default:
+ return NoChange();
+ }
+ node->ReplaceInput(
+ 0, m.left().HasValue()
+ ? Float32Constant(static_cast<float>(m.left().Value()))
+ : m.left().InputAt(0));
+ node->ReplaceInput(
+ 1, m.right().HasValue()
+ ? Float32Constant(static_cast<float>(m.right().Value()))
+ : m.right().InputAt(0));
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
CommonOperatorBuilder* MachineOperatorReducer::common() const {
return jsgraph()->common();
}
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index 8200abb..7f8ff1a 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -19,12 +19,12 @@
// Performs constant folding and strength reduction on nodes that have
// machine operators.
-class MachineOperatorReducer FINAL : public Reducer {
+class MachineOperatorReducer final : public Reducer {
public:
explicit MachineOperatorReducer(JSGraph* jsgraph);
~MachineOperatorReducer();
- Reduction Reduce(Node* node) OVERRIDE;
+ Reduction Reduce(Node* node) override;
private:
Node* Float32Constant(volatile float value);
@@ -65,6 +65,7 @@
}
Reduction ReduceInt32Add(Node* node);
+ Reduction ReduceInt32Sub(Node* node);
Reduction ReduceInt32Div(Node* node);
Reduction ReduceUint32Div(Node* node);
Reduction ReduceInt32Mod(Node* node);
@@ -74,8 +75,12 @@
Reduction ReduceProjection(size_t index, Node* node);
Reduction ReduceWord32Shifts(Node* node);
Reduction ReduceWord32Shl(Node* node);
+ Reduction ReduceWord32Sar(Node* node);
Reduction ReduceWord32And(Node* node);
Reduction ReduceWord32Or(Node* node);
+ Reduction ReduceFloat64InsertLowWord32(Node* node);
+ Reduction ReduceFloat64InsertHighWord32(Node* node);
+ Reduction ReduceFloat64Compare(Node* node);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index eb034e9..511a10d 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -7,17 +7,37 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/v8.h"
-#include "src/zone-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
+std::ostream& operator<<(std::ostream& os, TruncationMode mode) {
+ switch (mode) {
+ case TruncationMode::kJavaScript:
+ return os << "JavaScript";
+ case TruncationMode::kRoundToZero:
+ return os << "RoundToZero";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+TruncationMode TruncationModeOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, op->opcode());
+ return OpParameter<TruncationMode>(op);
+}
+
+
std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
switch (kind) {
case kNoWriteBarrier:
return os << "NoWriteBarrier";
+ case kMapWriteBarrier:
+ return os << "MapWriteBarrier";
+ case kPointerWriteBarrier:
+ return os << "PointerWriteBarrier";
case kFullWriteBarrier:
return os << "FullWriteBarrier";
}
@@ -27,7 +47,7 @@
bool operator==(StoreRepresentation lhs, StoreRepresentation rhs) {
- return lhs.machine_type() == rhs.machine_type() &&
+ return lhs.representation() == rhs.representation() &&
lhs.write_barrier_kind() == rhs.write_barrier_kind();
}
@@ -38,16 +58,22 @@
size_t hash_value(StoreRepresentation rep) {
- return base::hash_combine(rep.machine_type(), rep.write_barrier_kind());
+ return base::hash_combine(rep.representation(), rep.write_barrier_kind());
}
std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
- return os << "(" << rep.machine_type() << " : " << rep.write_barrier_kind()
+ return os << "(" << rep.representation() << " : " << rep.write_barrier_kind()
<< ")";
}
+LoadRepresentation LoadRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kLoad, op->opcode());
+ return OpParameter<LoadRepresentation>(op);
+}
+
+
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kStore, op->opcode());
return OpParameter<StoreRepresentation>(op);
@@ -75,6 +101,7 @@
V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \
V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \
V(Word32Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
@@ -82,6 +109,7 @@
V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \
V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
@@ -100,67 +128,115 @@
V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \
V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
+ 0, 2) \
V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int64SubWithOverflow, Operator::kNoProperties, 2, 0, 2) \
V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int64Div, Operator::kNoProperties, 2, 0, 1) \
- V(Int64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
V(Int64LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint64Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint64Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint64Mod, Operator::kNoProperties, 2, 1, 1) \
V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Add, Operator::kCommutative, 2, 0, 1) \
+ V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float64Add, Operator::kCommutative, 2, 0, 1) \
V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Ceil, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Floor, Operator::kNoProperties, 1, 0, 1) \
- V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
- V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Float32LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Float64Equal, Operator::kCommutative, 2, 0, 1) \
V(Float64LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)
+ V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)
+
+#define PURE_OPTIONAL_OP_LIST(V) \
+ V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64Popcnt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Max, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Min, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Max, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Min, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32RoundDown, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32RoundUp, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundUp, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
#define MACHINE_TYPE_LIST(V) \
- V(MachFloat32) \
- V(MachFloat64) \
- V(MachInt8) \
- V(MachUint8) \
- V(MachInt16) \
- V(MachUint16) \
- V(MachInt32) \
- V(MachUint32) \
- V(MachInt64) \
- V(MachUint64) \
- V(MachAnyTagged) \
- V(RepBit) \
- V(RepWord8) \
- V(RepWord16) \
- V(RepWord32) \
- V(RepWord64) \
- V(RepFloat32) \
- V(RepFloat64) \
- V(RepTagged)
+ V(Float32) \
+ V(Float64) \
+ V(Int8) \
+ V(Uint8) \
+ V(Int16) \
+ V(Uint16) \
+ V(Int32) \
+ V(Uint32) \
+ V(Int64) \
+ V(Uint64) \
+ V(Pointer) \
+ V(AnyTagged)
+
+
+#define MACHINE_REPRESENTATION_LIST(V) \
+ V(kFloat32) \
+ V(kFloat64) \
+ V(kWord8) \
+ V(kWord16) \
+ V(kWord32) \
+ V(kWord64) \
+ V(kTagged)
struct MachineOperatorGlobalCache {
#define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \
- struct Name##Operator FINAL : public Operator { \
+ struct Name##Operator final : public Operator { \
Name##Operator() \
: Operator(IrOpcode::k##Name, Operator::kPure | properties, #Name, \
value_input_count, 0, control_input_count, output_count, 0, \
@@ -168,21 +244,35 @@
}; \
Name##Operator k##Name;
PURE_OP_LIST(PURE)
+ PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
+ template <TruncationMode kMode>
+ struct TruncateFloat64ToInt32Operator final
+ : public Operator1<TruncationMode> {
+ TruncateFloat64ToInt32Operator()
+ : Operator1<TruncationMode>(IrOpcode::kTruncateFloat64ToInt32,
+ Operator::kPure, "TruncateFloat64ToInt32",
+ 1, 0, 0, 1, 0, 0, kMode) {}
+ };
+ TruncateFloat64ToInt32Operator<TruncationMode::kJavaScript>
+ kTruncateFloat64ToInt32JavaScript;
+ TruncateFloat64ToInt32Operator<TruncationMode::kRoundToZero>
+ kTruncateFloat64ToInt32RoundToZero;
+
#define LOAD(Type) \
- struct Load##Type##Operator FINAL : public Operator1<LoadRepresentation> { \
+ struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
Load##Type##Operator() \
: Operator1<LoadRepresentation>( \
IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "Load", 2, 1, 1, 1, 1, 0, k##Type) {} \
+ "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
- struct CheckedLoad##Type##Operator FINAL \
+ struct CheckedLoad##Type##Operator final \
: public Operator1<CheckedLoadRepresentation> { \
CheckedLoad##Type##Operator() \
: Operator1<CheckedLoadRepresentation>( \
IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "CheckedLoad", 3, 1, 1, 1, 1, 0, k##Type) {} \
+ "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
CheckedLoad##Type##Operator kCheckedLoad##Type;
@@ -195,29 +285,44 @@
: Operator1<StoreRepresentation>( \
IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, \
"Store", 3, 1, 1, 0, 1, 0, \
- StoreRepresentation(k##Type, write_barrier_kind)) {} \
+ StoreRepresentation(MachineRepresentation::Type, \
+ write_barrier_kind)) {} \
}; \
- struct Store##Type##NoWriteBarrier##Operator FINAL \
+ struct Store##Type##NoWriteBarrier##Operator final \
: public Store##Type##Operator { \
Store##Type##NoWriteBarrier##Operator() \
: Store##Type##Operator(kNoWriteBarrier) {} \
}; \
- struct Store##Type##FullWriteBarrier##Operator FINAL \
+ struct Store##Type##MapWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##MapWriteBarrier##Operator() \
+ : Store##Type##Operator(kMapWriteBarrier) {} \
+ }; \
+ struct Store##Type##PointerWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##PointerWriteBarrier##Operator() \
+ : Store##Type##Operator(kPointerWriteBarrier) {} \
+ }; \
+ struct Store##Type##FullWriteBarrier##Operator final \
: public Store##Type##Operator { \
Store##Type##FullWriteBarrier##Operator() \
: Store##Type##Operator(kFullWriteBarrier) {} \
}; \
- struct CheckedStore##Type##Operator FINAL \
+ struct CheckedStore##Type##Operator final \
: public Operator1<CheckedStoreRepresentation> { \
CheckedStore##Type##Operator() \
: Operator1<CheckedStoreRepresentation>( \
IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow, \
- "CheckedStore", 4, 1, 1, 0, 1, 0, k##Type) {} \
+ "CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
+ } \
}; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
+ Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
+ Store##Type##PointerWriteBarrier##Operator \
+ kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
CheckedStore##Type##Operator kCheckedStore##Type;
- MACHINE_TYPE_LIST(STORE)
+ MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
};
@@ -226,10 +331,12 @@
LAZY_INSTANCE_INITIALIZER;
-MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone, MachineType word,
+MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone,
+ MachineRepresentation word,
Flags flags)
- : zone_(zone), cache_(kCache.Get()), word_(word), flags_(flags) {
- DCHECK(word == kRepWord32 || word == kRepWord64);
+ : cache_(kCache.Get()), word_(word), flags_(flags) {
+ DCHECK(word == MachineRepresentation::kWord32 ||
+ word == MachineRepresentation::kWord64);
}
@@ -239,81 +346,93 @@
PURE_OP_LIST(PURE)
#undef PURE
-
-const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
- switch (rep) {
-#define LOAD(Type) \
- case k##Type: \
- return &cache_.kLoad##Type;
- MACHINE_TYPE_LIST(LOAD)
-#undef LOAD
- default:
- break;
+#define PURE(Name, properties, value_input_count, control_input_count, \
+ output_count) \
+ const OptionalOperator MachineOperatorBuilder::Name() { \
+ return OptionalOperator(flags_ & k##Name ? &cache_.k##Name : nullptr); \
}
- // Uncached.
- return new (zone_) Operator1<LoadRepresentation>( // --
- IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, "Load", 2, 1, 1,
- 1, 1, 0, rep);
+PURE_OPTIONAL_OP_LIST(PURE)
+#undef PURE
+
+
+const Operator* MachineOperatorBuilder::TruncateFloat64ToInt32(
+ TruncationMode mode) {
+ switch (mode) {
+ case TruncationMode::kJavaScript:
+ return &cache_.kTruncateFloat64ToInt32JavaScript;
+ case TruncationMode::kRoundToZero:
+ return &cache_.kTruncateFloat64ToInt32RoundToZero;
+ }
+ UNREACHABLE();
+ return nullptr;
}
-const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
- switch (rep.machine_type()) {
-#define STORE(Type) \
- case k##Type: \
- switch (rep.write_barrier_kind()) { \
- case kNoWriteBarrier: \
- return &cache_.k##Store##Type##NoWriteBarrier; \
- case kFullWriteBarrier: \
- return &cache_.k##Store##Type##FullWriteBarrier; \
- } \
- break;
- MACHINE_TYPE_LIST(STORE)
-#undef STORE
+const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kLoad##Type; \
+ }
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+ UNREACHABLE();
+ return nullptr;
+}
- default:
+
+const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
+ switch (store_rep.representation()) {
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ switch (store_rep.write_barrier_kind()) { \
+ case kNoWriteBarrier: \
+ return &cache_.k##Store##kRep##NoWriteBarrier; \
+ case kMapWriteBarrier: \
+ return &cache_.k##Store##kRep##MapWriteBarrier; \
+ case kPointerWriteBarrier: \
+ return &cache_.k##Store##kRep##PointerWriteBarrier; \
+ case kFullWriteBarrier: \
+ return &cache_.k##Store##kRep##FullWriteBarrier; \
+ } \
+ break;
+ MACHINE_REPRESENTATION_LIST(STORE)
+#undef STORE
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kNone:
break;
}
- // Uncached.
- return new (zone_) Operator1<StoreRepresentation>( // --
- IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, "Store", 3, 1,
- 1, 0, 1, 0, rep);
+ UNREACHABLE();
+ return nullptr;
}
const Operator* MachineOperatorBuilder::CheckedLoad(
CheckedLoadRepresentation rep) {
- switch (rep) {
-#define LOAD(Type) \
- case k##Type: \
- return &cache_.kCheckedLoad##Type;
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kCheckedLoad##Type; \
+ }
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
- default:
- break;
- }
- // Uncached.
- return new (zone_) Operator1<CheckedLoadRepresentation>(
- IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite,
- "CheckedLoad", 3, 1, 1, 1, 1, 0, rep);
+ UNREACHABLE();
+ return nullptr;
}
const Operator* MachineOperatorBuilder::CheckedStore(
CheckedStoreRepresentation rep) {
switch (rep) {
-#define STORE(Type) \
- case k##Type: \
- return &cache_.kCheckedStore##Type;
- MACHINE_TYPE_LIST(STORE)
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ return &cache_.kCheckedStore##kRep;
+ MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kNone:
break;
}
- // Uncached.
- return new (zone_) Operator1<CheckedStoreRepresentation>(
- IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow,
- "CheckedStore", 4, 1, 1, 0, 1, 0, rep);
+ UNREACHABLE();
+ return nullptr;
}
} // namespace compiler
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 42f3130..00fefe3 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_MACHINE_OPERATOR_H_
#include "src/base/flags.h"
-#include "src/compiler/machine-type.h"
+#include "src/machine-type.h"
namespace v8 {
namespace internal {
@@ -17,8 +17,44 @@
class Operator;
+// For operators that are not supported on all platforms.
+class OptionalOperator final {
+ public:
+ explicit OptionalOperator(const Operator* op) : op_(op) {}
+
+ bool IsSupported() const { return op_ != nullptr; }
+ const Operator* op() const {
+ DCHECK_NOT_NULL(op_);
+ return op_;
+ }
+
+ private:
+ const Operator* const op_;
+};
+
+
+// Supported float64 to int32 truncation modes.
+enum class TruncationMode : uint8_t {
+ kJavaScript, // ES6 section 7.1.5
+ kRoundToZero // Round towards zero. Implementation defined for NaN and ovf.
+};
+
+V8_INLINE size_t hash_value(TruncationMode mode) {
+ return static_cast<uint8_t>(mode);
+}
+
+std::ostream& operator<<(std::ostream&, TruncationMode);
+
+TruncationMode TruncationModeOf(Operator const*);
+
+
// Supported write barrier modes.
-enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
+enum WriteBarrierKind {
+ kNoWriteBarrier,
+ kMapWriteBarrier,
+ kPointerWriteBarrier,
+ kFullWriteBarrier
+};
std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
@@ -26,20 +62,22 @@
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
+LoadRepresentation LoadRepresentationOf(Operator const*);
// A Store needs a MachineType and a WriteBarrierKind in order to emit the
// correct write barrier.
-class StoreRepresentation FINAL {
+class StoreRepresentation final {
public:
- StoreRepresentation(MachineType machine_type,
+ StoreRepresentation(MachineRepresentation representation,
WriteBarrierKind write_barrier_kind)
- : machine_type_(machine_type), write_barrier_kind_(write_barrier_kind) {}
+ : representation_(representation),
+ write_barrier_kind_(write_barrier_kind) {}
- MachineType machine_type() const { return machine_type_; }
+ MachineRepresentation representation() const { return representation_; }
WriteBarrierKind write_barrier_kind() const { return write_barrier_kind_; }
private:
- MachineType machine_type_;
+ MachineRepresentation representation_;
WriteBarrierKind write_barrier_kind_;
};
@@ -60,7 +98,7 @@
// A CheckedStore needs a MachineType.
-typedef MachineType CheckedStoreRepresentation;
+typedef MachineRepresentation CheckedStoreRepresentation;
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
@@ -68,24 +106,47 @@
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
-class MachineOperatorBuilder FINAL : public ZoneObject {
+class MachineOperatorBuilder final : public ZoneObject {
public:
// Flags that specify which operations are available. This is useful
// for operations that are unsupported by some back-ends.
enum Flag {
kNoFlags = 0u,
- kFloat64Floor = 1u << 0,
- kFloat64Ceil = 1u << 1,
- kFloat64RoundTruncate = 1u << 2,
- kFloat64RoundTiesAway = 1u << 3,
- kInt32DivIsSafe = 1u << 4,
- kUint32DivIsSafe = 1u << 5,
- kWord32ShiftIsSafe = 1u << 6
+ // Note that Float*Max behaves like `(b < a) ? a : b`, not like Math.max().
+ // Note that Float*Min behaves like `(a < b) ? a : b`, not like Math.min().
+ kFloat32Max = 1u << 0,
+ kFloat32Min = 1u << 1,
+ kFloat64Max = 1u << 2,
+ kFloat64Min = 1u << 3,
+ kFloat32RoundDown = 1u << 4,
+ kFloat64RoundDown = 1u << 5,
+ kFloat32RoundUp = 1u << 6,
+ kFloat64RoundUp = 1u << 7,
+ kFloat32RoundTruncate = 1u << 8,
+ kFloat64RoundTruncate = 1u << 9,
+ kFloat32RoundTiesEven = 1u << 10,
+ kFloat64RoundTiesEven = 1u << 11,
+ kFloat64RoundTiesAway = 1u << 12,
+ kInt32DivIsSafe = 1u << 13,
+ kUint32DivIsSafe = 1u << 14,
+ kWord32ShiftIsSafe = 1u << 15,
+ kWord32Ctz = 1u << 16,
+ kWord64Ctz = 1u << 17,
+ kWord32Popcnt = 1u << 18,
+ kWord64Popcnt = 1u << 19,
+ kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
+ kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
+ kFloat64RoundUp | kFloat32RoundTruncate |
+ kFloat64RoundTruncate | kFloat64RoundTiesAway |
+ kFloat32RoundTiesEven | kFloat64RoundTiesEven |
+ kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt
};
typedef base::Flags<Flag, unsigned> Flags;
- explicit MachineOperatorBuilder(Zone* zone, MachineType word = kMachPtr,
- Flags supportedOperators = kNoFlags);
+ explicit MachineOperatorBuilder(
+ Zone* zone,
+ MachineRepresentation word = MachineType::PointerRepresentation(),
+ Flags supportedOperators = kNoFlags);
const Operator* Word32And();
const Operator* Word32Or();
@@ -95,6 +156,10 @@
const Operator* Word32Sar();
const Operator* Word32Ror();
const Operator* Word32Equal();
+ const Operator* Word32Clz();
+ const OptionalOperator Word32Ctz();
+ const OptionalOperator Word32Popcnt();
+ const OptionalOperator Word64Popcnt();
bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
const Operator* Word64And();
@@ -104,6 +169,8 @@
const Operator* Word64Shr();
const Operator* Word64Sar();
const Operator* Word64Ror();
+ const Operator* Word64Clz();
+ const OptionalOperator Word64Ctz();
const Operator* Word64Equal();
const Operator* Int32Add();
@@ -125,7 +192,9 @@
bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
const Operator* Int64Add();
+ const Operator* Int64AddWithOverflow();
const Operator* Int64Sub();
+ const Operator* Int64SubWithOverflow();
const Operator* Int64Mul();
const Operator* Int64Div();
const Operator* Int64Mod();
@@ -133,6 +202,7 @@
const Operator* Int64LessThanOrEqual();
const Operator* Uint64Div();
const Operator* Uint64LessThan();
+ const Operator* Uint64LessThanOrEqual();
const Operator* Uint64Mod();
// These operators change the representation of numbers while preserving the
@@ -143,18 +213,42 @@
const Operator* ChangeFloat32ToFloat64();
const Operator* ChangeFloat64ToInt32(); // narrowing
const Operator* ChangeFloat64ToUint32(); // narrowing
+ const Operator* TryTruncateFloat32ToInt64();
+ const Operator* TryTruncateFloat64ToInt64();
+ const Operator* TryTruncateFloat32ToUint64();
+ const Operator* TryTruncateFloat64ToUint64();
const Operator* ChangeInt32ToFloat64();
const Operator* ChangeInt32ToInt64();
const Operator* ChangeUint32ToFloat64();
const Operator* ChangeUint32ToUint64();
- // These operators truncate numbers, both changing the representation of
- // the number and mapping multiple input values onto the same output value.
+ // These operators truncate or round numbers, both changing the representation
+ // of the number and mapping multiple input values onto the same output value.
const Operator* TruncateFloat64ToFloat32();
- const Operator* TruncateFloat64ToInt32(); // JavaScript semantics.
+ const Operator* TruncateFloat64ToInt32(TruncationMode);
const Operator* TruncateInt64ToInt32();
+ const Operator* RoundInt64ToFloat32();
+ const Operator* RoundInt64ToFloat64();
+ const Operator* RoundUint64ToFloat32();
+ const Operator* RoundUint64ToFloat64();
- // Floating point operators always operate with IEEE 754 round-to-nearest.
+ // These operators reinterpret the bits of a floating point number as an
+ // integer and vice versa.
+ const Operator* BitcastFloat32ToInt32();
+ const Operator* BitcastFloat64ToInt64();
+ const Operator* BitcastInt32ToFloat32();
+ const Operator* BitcastInt64ToFloat64();
+
+ // Floating point operators always operate with IEEE 754 round-to-nearest
+ // (single-precision).
+ const Operator* Float32Add();
+ const Operator* Float32Sub();
+ const Operator* Float32Mul();
+ const Operator* Float32Div();
+ const Operator* Float32Sqrt();
+
+ // Floating point operators always operate with IEEE 754 round-to-nearest
+ // (double-precision).
const Operator* Float64Add();
const Operator* Float64Sub();
const Operator* Float64Mul();
@@ -162,20 +256,46 @@
const Operator* Float64Mod();
const Operator* Float64Sqrt();
- // Floating point comparisons complying to IEEE 754.
+ // Floating point comparisons complying to IEEE 754 (single-precision).
+ const Operator* Float32Equal();
+ const Operator* Float32LessThan();
+ const Operator* Float32LessThanOrEqual();
+
+ // Floating point comparisons complying to IEEE 754 (double-precision).
const Operator* Float64Equal();
const Operator* Float64LessThan();
const Operator* Float64LessThanOrEqual();
+ // Floating point min/max complying to IEEE 754 (single-precision).
+ const OptionalOperator Float32Max();
+ const OptionalOperator Float32Min();
+
+ // Floating point min/max complying to IEEE 754 (double-precision).
+ const OptionalOperator Float64Max();
+ const OptionalOperator Float64Min();
+
+ // Floating point abs complying to IEEE 754 (single-precision).
+ const Operator* Float32Abs();
+
+ // Floating point abs complying to IEEE 754 (double-precision).
+ const Operator* Float64Abs();
+
// Floating point rounding.
- const Operator* Float64Floor();
- const Operator* Float64Ceil();
- const Operator* Float64RoundTruncate();
- const Operator* Float64RoundTiesAway();
- bool HasFloat64Floor() { return flags_ & kFloat64Floor; }
- bool HasFloat64Ceil() { return flags_ & kFloat64Ceil; }
- bool HasFloat64RoundTruncate() { return flags_ & kFloat64RoundTruncate; }
- bool HasFloat64RoundTiesAway() { return flags_ & kFloat64RoundTiesAway; }
+ const OptionalOperator Float32RoundDown();
+ const OptionalOperator Float64RoundDown();
+ const OptionalOperator Float32RoundUp();
+ const OptionalOperator Float64RoundUp();
+ const OptionalOperator Float32RoundTruncate();
+ const OptionalOperator Float64RoundTruncate();
+ const OptionalOperator Float64RoundTiesAway();
+ const OptionalOperator Float32RoundTiesEven();
+ const OptionalOperator Float64RoundTiesEven();
+
+ // Floating point bit representation.
+ const Operator* Float64ExtractLowWord32();
+ const Operator* Float64ExtractHighWord32();
+ const Operator* Float64InsertLowWord32();
+ const Operator* Float64InsertHighWord32();
// load [base + index]
const Operator* Load(LoadRepresentation rep);
@@ -185,6 +305,7 @@
// Access to the machine stack.
const Operator* LoadStackPointer();
+ const Operator* LoadFramePointer();
// checked-load heap, index, length
const Operator* CheckedLoad(CheckedLoadRepresentation);
@@ -192,9 +313,9 @@
const Operator* CheckedStore(CheckedStoreRepresentation);
// Target machine word-size assumed by this builder.
- bool Is32() const { return word() == kRepWord32; }
- bool Is64() const { return word() == kRepWord64; }
- MachineType word() const { return word_; }
+ bool Is32() const { return word() == MachineRepresentation::kWord32; }
+ bool Is64() const { return word() == MachineRepresentation::kWord64; }
+ MachineRepresentation word() const { return word_; }
// Pseudo operators that translate to 32/64-bit operators depending on the
// word-size of the target machine assumed by this builder.
@@ -226,10 +347,9 @@
#undef PSEUDO_OP_LIST
private:
- Zone* zone_;
- const MachineOperatorGlobalCache& cache_;
- const MachineType word_;
- const Flags flags_;
+ MachineOperatorGlobalCache const& cache_;
+ MachineRepresentation const word_;
+ Flags const flags_;
DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
};
diff --git a/src/compiler/machine-type.cc b/src/compiler/machine-type.cc
deleted file mode 100644
index 7475a03..0000000
--- a/src/compiler/machine-type.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/machine-type.h"
-#include "src/ostreams.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#define PRINT(bit) \
- if (type & bit) { \
- if (before) os << "|"; \
- os << #bit; \
- before = true; \
- }
-
-
-std::ostream& operator<<(std::ostream& os, const MachineType& type) {
- bool before = false;
- PRINT(kRepBit);
- PRINT(kRepWord8);
- PRINT(kRepWord16);
- PRINT(kRepWord32);
- PRINT(kRepWord64);
- PRINT(kRepFloat32);
- PRINT(kRepFloat64);
- PRINT(kRepTagged);
-
- PRINT(kTypeBool);
- PRINT(kTypeInt32);
- PRINT(kTypeUint32);
- PRINT(kTypeInt64);
- PRINT(kTypeUint64);
- PRINT(kTypeNumber);
- PRINT(kTypeAny);
- return os;
-}
-
-
-#undef PRINT
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/machine-type.h b/src/compiler/machine-type.h
deleted file mode 100644
index 4c51a9f..0000000
--- a/src/compiler/machine-type.h
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_MACHINE_TYPE_H_
-#define V8_COMPILER_MACHINE_TYPE_H_
-
-#include <iosfwd>
-
-#include "src/base/bits.h"
-#include "src/globals.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Machine-level types and representations.
-// TODO(titzer): Use the real type system instead of MachineType.
-enum MachineType {
- // Representations.
- kRepBit = 1 << 0,
- kRepWord8 = 1 << 1,
- kRepWord16 = 1 << 2,
- kRepWord32 = 1 << 3,
- kRepWord64 = 1 << 4,
- kRepFloat32 = 1 << 5,
- kRepFloat64 = 1 << 6,
- kRepTagged = 1 << 7,
-
- // Types.
- kTypeBool = 1 << 8,
- kTypeInt32 = 1 << 9,
- kTypeUint32 = 1 << 10,
- kTypeInt64 = 1 << 11,
- kTypeUint64 = 1 << 12,
- kTypeNumber = 1 << 13,
- kTypeAny = 1 << 14,
-
- // Machine types.
- kMachNone = 0,
- kMachBool = kRepBit | kTypeBool,
- kMachFloat32 = kRepFloat32 | kTypeNumber,
- kMachFloat64 = kRepFloat64 | kTypeNumber,
- kMachInt8 = kRepWord8 | kTypeInt32,
- kMachUint8 = kRepWord8 | kTypeUint32,
- kMachInt16 = kRepWord16 | kTypeInt32,
- kMachUint16 = kRepWord16 | kTypeUint32,
- kMachInt32 = kRepWord32 | kTypeInt32,
- kMachUint32 = kRepWord32 | kTypeUint32,
- kMachInt64 = kRepWord64 | kTypeInt64,
- kMachUint64 = kRepWord64 | kTypeUint64,
- kMachIntPtr = (kPointerSize == 4) ? kMachInt32 : kMachInt64,
- kMachUintPtr = (kPointerSize == 4) ? kMachUint32 : kMachUint64,
- kMachPtr = (kPointerSize == 4) ? kRepWord32 : kRepWord64,
- kMachAnyTagged = kRepTagged | kTypeAny
-};
-
-std::ostream& operator<<(std::ostream& os, const MachineType& type);
-
-typedef uint16_t MachineTypeUnion;
-
-// Globally useful machine types and constants.
-const MachineTypeUnion kRepMask = kRepBit | kRepWord8 | kRepWord16 |
- kRepWord32 | kRepWord64 | kRepFloat32 |
- kRepFloat64 | kRepTagged;
-const MachineTypeUnion kTypeMask = kTypeBool | kTypeInt32 | kTypeUint32 |
- kTypeInt64 | kTypeUint64 | kTypeNumber |
- kTypeAny;
-
-// Gets only the type of the given type.
-inline MachineType TypeOf(MachineType machine_type) {
- int result = machine_type & kTypeMask;
- return static_cast<MachineType>(result);
-}
-
-// Gets only the representation of the given type.
-inline MachineType RepresentationOf(MachineType machine_type) {
- int result = machine_type & kRepMask;
- CHECK(base::bits::IsPowerOfTwo32(result));
- return static_cast<MachineType>(result);
-}
-
-// Gets the log2 of the element size in bytes of the machine type.
-inline int ElementSizeLog2Of(MachineType machine_type) {
- switch (RepresentationOf(machine_type)) {
- case kRepBit:
- case kRepWord8:
- return 0;
- case kRepWord16:
- return 1;
- case kRepWord32:
- case kRepFloat32:
- return 2;
- case kRepWord64:
- case kRepFloat64:
- return 3;
- case kRepTagged:
- return kPointerSizeLog2;
- default:
- break;
- }
- UNREACHABLE();
- return -1;
-}
-
-// Gets the element size in bytes of the machine type.
-inline int ElementSizeOf(MachineType machine_type) {
- const int shift = ElementSizeLog2Of(machine_type);
- DCHECK_NE(-1, shift);
- return 1 << shift;
-}
-
-// Describes the inputs and outputs of a function or call.
-template <typename T>
-class Signature : public ZoneObject {
- public:
- Signature(size_t return_count, size_t parameter_count, T* reps)
- : return_count_(return_count),
- parameter_count_(parameter_count),
- reps_(reps) {}
-
- size_t return_count() const { return return_count_; }
- size_t parameter_count() const { return parameter_count_; }
-
- T GetParam(size_t index) const {
- DCHECK(index < parameter_count_);
- return reps_[return_count_ + index];
- }
-
- T GetReturn(size_t index = 0) const {
- DCHECK(index < return_count_);
- return reps_[index];
- }
-
- // For incrementally building signatures.
- class Builder {
- public:
- Builder(Zone* zone, size_t return_count, size_t parameter_count)
- : return_count_(return_count),
- parameter_count_(parameter_count),
- zone_(zone),
- rcursor_(0),
- pcursor_(0),
- buffer_(zone->NewArray<T>(
- static_cast<int>(return_count + parameter_count))) {}
-
- const size_t return_count_;
- const size_t parameter_count_;
-
- void AddReturn(T val) {
- DCHECK(rcursor_ < return_count_);
- buffer_[rcursor_++] = val;
- }
- void AddParam(T val) {
- DCHECK(pcursor_ < parameter_count_);
- buffer_[return_count_ + pcursor_++] = val;
- }
- Signature<T>* Build() {
- DCHECK(rcursor_ == return_count_);
- DCHECK(pcursor_ == parameter_count_);
- return new (zone_) Signature<T>(return_count_, parameter_count_, buffer_);
- }
-
- private:
- Zone* zone_;
- size_t rcursor_;
- size_t pcursor_;
- T* buffer_;
- };
-
- protected:
- size_t return_count_;
- size_t parameter_count_;
- T* reps_;
-};
-
-typedef Signature<MachineType> MachineSignature;
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_MACHINE_TYPE_H_
diff --git a/src/compiler/mips/OWNERS b/src/compiler/mips/OWNERS
index 5508ba6..89455a4 100644
--- a/src/compiler/mips/OWNERS
+++ b/src/compiler/mips/OWNERS
@@ -3,3 +3,4 @@
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index dd92837..75e4b9e 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/osr.h"
#include "src/mips/macro-assembler-mips.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -35,16 +35,16 @@
// Adds Mips-specific methods to convert InstructionOperands.
-class MipsOperandConverter FINAL : public InstructionOperandConverter {
+class MipsOperandConverter final : public InstructionOperandConverter {
public:
MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- FloatRegister OutputSingleRegister(int index = 0) {
+ FloatRegister OutputSingleRegister(size_t index = 0) {
return ToSingleRegister(instr_->OutputAt(index));
}
- FloatRegister InputSingleRegister(int index) {
+ FloatRegister InputSingleRegister(size_t index) {
return ToSingleRegister(instr_->InputAt(index));
}
@@ -54,7 +54,19 @@
return ToDoubleRegister(op);
}
- Operand InputImmediate(int index) {
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
+ Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
@@ -79,7 +91,7 @@
return Operand(zero_reg);
}
- Operand InputOperand(int index) {
+ Operand InputOperand(size_t index) {
InstructionOperand* op = instr_->InputAt(index);
if (op->IsRegister()) {
return Operand(ToRegister(op));
@@ -87,8 +99,8 @@
return InputImmediate(index);
}
- MemOperand MemoryOperand(int* first_index) {
- const int index = *first_index;
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
break;
@@ -103,33 +115,31 @@
return MemOperand(no_reg);
}
- MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
-static inline bool HasRegisterInput(Instruction* instr, int index) {
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
namespace {
-class OutOfLineLoadSingle FINAL : public OutOfLineCode {
+class OutOfLineLoadSingle final : public OutOfLineCode {
public:
OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ Move(result_, std::numeric_limits<float>::quiet_NaN());
}
@@ -138,12 +148,12 @@
};
-class OutOfLineLoadDouble FINAL : public OutOfLineCode {
+class OutOfLineLoadDouble final : public OutOfLineCode {
public:
OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ Move(result_, std::numeric_limits<double>::quiet_NaN());
}
@@ -152,12 +162,12 @@
};
-class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+class OutOfLineLoadInteger final : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL { __ mov(result_, zero_reg); }
+ void Generate() final { __ mov(result_, zero_reg); }
private:
Register const result_;
@@ -169,7 +179,7 @@
OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
// Handle rounding to zero case where sign has to be preserved.
// High bits of double input already in kScratchReg.
__ srl(at, kScratchReg, 31);
@@ -182,25 +192,145 @@
};
-class OutOfLineTruncate FINAL : public OutOfLineRound {
+class OutOfLineRound32 : public OutOfLineCode {
public:
- OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ // Handle rounding to zero case where sign has to be preserved.
+ // High bits of float input already in kScratchReg.
+ __ srl(at, kScratchReg, 31);
+ __ sll(at, at, 31);
+ __ mtc1(at, result_);
+ }
+
+ private:
+ DoubleRegister const result_;
};
-class OutOfLineFloor FINAL : public OutOfLineRound {
+class OutOfLineRecordWrite final : public OutOfLineCode {
public:
- OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ Addu(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
};
-class OutOfLineCeil FINAL : public OutOfLineRound {
- public:
- OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
-};
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ predicate = true;
+ return EQ;
+ case kNotEqual:
+ predicate = false;
+ return EQ;
+ case kUnsignedLessThan:
+ predicate = true;
+ return OLT;
+ case kUnsignedGreaterThanOrEqual:
+ predicate = false;
+ return ULT;
+ case kUnsignedLessThanOrEqual:
+ predicate = true;
+ return OLE;
+ case kUnsignedGreaterThan:
+ predicate = false;
+ return ULE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ predicate = true;
+ break;
+ default:
+ predicate = true;
+ break;
+ }
+ UNREACHABLE();
+ return kNoFPUCondition;
+}
} // namespace
@@ -212,8 +342,8 @@
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
@@ -230,8 +360,8 @@
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
@@ -248,8 +378,8 @@
auto offset = i.InputRegister(0); \
auto value = i.Input##width##Register(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
auto value = i.Input##width##Register(2); \
@@ -267,8 +397,8 @@
auto offset = i.InputRegister(0); \
auto value = i.InputRegister(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
auto value = i.InputRegister(2); \
@@ -279,10 +409,15 @@
} while (0)
-#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
- do { \
- auto ool = \
- new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
Label done; \
__ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
__ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
@@ -290,14 +425,63 @@
__ Branch(USE_DELAY_SLOT, &done, hs, at, \
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
__ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
__ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
__ or_(at, at, kScratchReg2); \
__ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
__ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
__ bind(ool->exit()); \
__ bind(&done); \
- } while (0)
+ }
+
+
+#define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ int32_t kFloat32ExponentBias = 127; \
+ int32_t kFloat32MantissaBits = 23; \
+ int32_t kFloat32ExponentBits = 8; \
+ auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
+ Label done; \
+ __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
+ __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, at, \
+ Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
+ __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mfc1(at, i.OutputDoubleRegister()); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
+ __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ __ bind(ool->exit()); \
+ __ bind(&done); \
+ }
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ addiu(sp, sp, sp_slot_delta * kPointerSize);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
// Assembles an instruction after register allocation, producing machine code.
@@ -315,7 +499,21 @@
__ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(at);
+ }
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -329,37 +527,115 @@
__ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ }
+
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Jump(at);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), sp);
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ Addu(at, object, index);
+ __ sw(value, MemOperand(at));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kMipsAdd:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsAddOvf:
- __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), kCompareReg, kScratchReg);
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
break;
case kMipsSub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsSubOvf:
- __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), kCompareReg, kScratchReg);
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
break;
case kMipsMul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -372,9 +648,19 @@
break;
case kMipsDiv:
__ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMipsDivU:
__ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMipsMod:
__ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -388,9 +674,20 @@
case kMipsOr:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMipsNor:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK(i.InputOperand(1).immediate() == 0);
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
case kMipsXor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMipsClz:
+ __ Clz(i.OutputRegister(), i.InputRegister(0));
+ break;
case kMipsShl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -415,6 +712,18 @@
__ sra(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
+ case kMipsExt:
+ __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ break;
+ case kMipsIns:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
+ } else {
+ __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
+ break;
case kMipsRor:
__ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -434,6 +743,56 @@
}
break;
+ case kMipsCmpS:
+ // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
+ break;
+ case kMipsAddS:
+ // TODO(plind): add special case: combine mult & add.
+ __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsSubS:
+ __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsMulS:
+ // TODO(plind): add special case: right op is -1.0, see arm port.
+ __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsDivS:
+ __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsModS: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputSingleRegister());
+ break;
+ }
+ case kMipsAbsS:
+ __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kMipsSqrtS: {
+ __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kMipsMaxS:
+ __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsMinS:
+ __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
@@ -468,20 +827,107 @@
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
- case kMipsFloat64Floor: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+ case kMipsAbsD:
+ __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMipsSqrtD: {
+ __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kMipsFloat64Ceil: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+ case kMipsMaxD:
+ __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsMinD:
+ __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsFloat64RoundDown: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
+ break;
+ }
+ case kMipsFloat32RoundDown: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
break;
}
case kMipsFloat64RoundTruncate: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
break;
}
- case kMipsSqrtD: {
- __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ case kMipsFloat32RoundTruncate: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
+ break;
+ }
+ case kMipsFloat64RoundUp: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
+ break;
+ }
+ case kMipsFloat32RoundUp: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
+ break;
+ }
+ case kMipsFloat64RoundTiesEven: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
+ break;
+ }
+ case kMipsFloat32RoundTiesEven: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
+ break;
+ }
+ case kMipsFloat64Max: {
+ // (b < a) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat64Min: {
+ // (a < b) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat32Max: {
+ // (b < a) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat32Min: {
+ // (a < b) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
break;
}
case kMipsCvtSD: {
@@ -498,11 +944,35 @@
__ cvt_d_w(i.OutputDoubleRegister(), scratch);
break;
}
+ case kMipsCvtSW: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ mtc1(i.InputRegister(0), scratch);
+ __ cvt_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
case kMipsCvtDUw: {
FPURegister scratch = kScratchDoubleReg;
__ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
break;
}
+ case kMipsFloorWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsCeilWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsRoundWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
case kMipsTruncWD: {
FPURegister scratch = kScratchDoubleReg;
// Other arches use round to zero here, so we follow.
@@ -510,12 +980,48 @@
__ mfc1(i.OutputRegister(), scratch);
break;
}
+ case kMipsFloorWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsCeilWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsRoundWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsTruncWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ trunc_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
case kMipsTruncUwD: {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMipsFloat64ExtractLowWord32:
+ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMipsFloat64ExtractHighWord32:
+ __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMipsFloat64InsertLowWord32:
+ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kMipsFloat64InsertHighWord32:
+ __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
// ... more basic instructions ...
case kMipsLbu:
@@ -547,7 +1053,7 @@
break;
}
case kMipsSwc1: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand);
break;
@@ -559,28 +1065,26 @@
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMipsPush:
- __ Push(i.InputRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Subu(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
break;
case kMipsStackClaim: {
- int words = MiscField::decode(instr->opcode());
- __ Subu(sp, sp, Operand(words << kPointerSizeLog2));
+ __ Subu(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
break;
}
case kMipsStoreToStackSlot: {
- int slot = MiscField::decode(instr->opcode());
- __ sw(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
- break;
- }
- case kMipsStoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ addu(index, object, index);
- __ sw(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RAStatus ra_status = kRAHasNotBeenSaved;
- __ RecordWrite(object, index, value, ra_status, mode);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
break;
}
case kCheckedLoadInt8:
@@ -619,8 +1123,12 @@
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
break;
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord64:
+ UNREACHABLE(); // currently unsupported checked int64 load/store.
+ break;
}
-}
+} // NOLINT(readability/fn_size)
#define UNSUPPORTED_COND(opcode, condition) \
@@ -628,137 +1136,113 @@
out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
UNIMPLEMENTED();
+static bool convertCondition(FlagsCondition condition, Condition& cc) {
+ switch (condition) {
+ case kEqual:
+ cc = eq;
+ return true;
+ case kNotEqual:
+ cc = ne;
+ return true;
+ case kUnsignedLessThan:
+ cc = lt;
+ return true;
+ case kUnsignedGreaterThanOrEqual:
+ cc = uge;
+ return true;
+ case kUnsignedLessThanOrEqual:
+ cc = le;
+ return true;
+ case kUnsignedGreaterThan:
+ cc = ugt;
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
MipsOperandConverter i(this, instr);
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
Condition cc = kNoCondition;
-
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips pseudo-instructions, which are handled here by branch
// instructions that do the actual comparison. Essential that the input
// registers to compare pseudo-op are not modified before this branch op, as
// they are tested here.
- // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
- // not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) {
- switch (branch->condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMipsTst, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionTst(branch->condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
-
- } else if (instr->arch_opcode() == kMipsAddOvf ||
- instr->arch_opcode() == kMipsSubOvf) {
- // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
+ } else if (instr->arch_opcode() == kMipsAddOvf) {
switch (branch->condition) {
case kOverflow:
- cc = lt;
+ __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
break;
case kNotOverflow:
- cc = ge;
+ __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
break;
default:
UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
break;
}
- __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
-
+ } else if (instr->arch_opcode() == kMipsSubOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+ break;
+ }
} else if (instr->arch_opcode() == kMipsCmp) {
- switch (branch->condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMipsCmp, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
-
- } else if (instr->arch_opcode() == kMipsCmpD) {
- // TODO(dusmil) optimize unordered checks to use fewer instructions
- // even if we have to unfold BranchF macro.
- Label* nan = flabel;
- switch (branch->condition) {
- case kUnorderedEqual:
- cc = eq;
- break;
- case kUnorderedNotEqual:
- cc = ne;
- nan = tlabel;
- break;
- case kUnorderedLessThan:
- cc = lt;
- break;
- case kUnorderedGreaterThanOrEqual:
- cc = ge;
- nan = tlabel;
- break;
- case kUnorderedLessThanOrEqual:
- cc = le;
- break;
- case kUnorderedGreaterThan:
- cc = gt;
- nan = tlabel;
- break;
- default:
- UNSUPPORTED_COND(kMipsCmpD, branch->condition);
- break;
+ } else if (instr->arch_opcode() == kMipsCmpS) {
+ if (!convertCondition(branch->condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpS, branch->condition);
}
- __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
-
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF32(tlabel, nullptr, cc, left, right);
+ } else if (instr->arch_opcode() == kMipsCmpD) {
+ if (!convertCondition(branch->condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+ }
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
UNIMPLEMENTED();
}
+ if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
@@ -772,224 +1256,291 @@
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label false_value;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register result = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
-
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are checked and handled here.
- // For materializations, we use delay slot to set the result true, and
- // in the false case, where we fall thru the branch, we reset the result
- // false.
-
- // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
- // not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) {
- switch (condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMipsTst, condition);
- break;
+ cc = FlagsConditionToConditionTst(condition);
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+ __ Sltu(result, zero_reg, kScratchReg);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
- __ li(result, Operand(1)); // In delay slot.
-
+ return;
} else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) {
- // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
- switch (condition) {
- case kOverflow:
- cc = lt;
+ Label flabel, tlabel;
+ switch (instr->arch_opcode()) {
+ case kMipsAddOvf:
+ __ AddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+
break;
- case kNotOverflow:
- cc = ge;
+ case kMipsSubOvf:
+ __ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
break;
default:
- UNSUPPORTED_COND(kMipsAddOvf, condition);
+ UNREACHABLE();
break;
}
- __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
- __ li(result, Operand(1)); // In delay slot.
-
-
+ __ li(result, 1);
+ __ Branch(&tlabel);
+ __ bind(&flabel);
+ __ li(result, 0);
+ __ bind(&tlabel);
} else if (instr->arch_opcode() == kMipsCmp) {
- Register left = i.InputRegister(0);
- Operand right = i.InputOperand(1);
- switch (condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
+ cc = FlagsConditionToConditionCmp(condition);
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ Register select;
+ if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
+ // Pass left operand if right is zero.
+ select = left;
+ } else {
+ __ Subu(kScratchReg, left, right);
+ select = kScratchReg;
+ }
+ __ Sltu(result, zero_reg, select);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lo:
+ case hs: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == hs) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case hi:
+ case ls: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == ls) {
+ __ xori(result, result, 1);
+ }
+ } break;
default:
- UNSUPPORTED_COND(kMipsCmp, condition);
- break;
+ UNREACHABLE();
}
- __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
- __ li(result, Operand(1)); // In delay slot.
-
- } else if (instr->arch_opcode() == kMipsCmpD) {
- FPURegister left = i.InputDoubleRegister(0);
- FPURegister right = i.InputDoubleRegister(1);
- // TODO(plind): Provide NaN-testing macro-asm function without need for
- // BranchF.
- FPURegister dummy1 = f0;
- FPURegister dummy2 = f2;
- switch (condition) {
- case kUnorderedEqual:
- // TODO(plind): improve the NaN testing throughout this function.
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = eq;
- break;
- case kUnorderedNotEqual:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = ne;
- break;
- case kUnorderedLessThan:
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = lt;
- break;
- case kUnorderedGreaterThanOrEqual:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = ge;
- break;
- case kUnorderedLessThanOrEqual:
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = le;
- break;
- case kUnorderedGreaterThan:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = gt;
- break;
- default:
- UNSUPPORTED_COND(kMipsCmp, condition);
- break;
+ return;
+ } else if (instr->arch_opcode() == kMipsCmpD ||
+ instr->arch_opcode() == kMipsCmpS) {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
}
- __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
- __ li(result, Operand(1)); // In delay slot - branch taken returns 1.
- // Fall-thru (branch not taken) returns 0.
-
+ bool predicate;
+ FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ __ li(result, Operand(1));
+ if (instr->arch_opcode() == kMipsCmpD) {
+ __ c(cc, D, left, right);
+ } else {
+ DCHECK(instr->arch_opcode() == kMipsCmpS);
+ __ c(cc, S, left, right);
+ }
+ if (predicate) {
+ __ Movf(result, zero_reg);
+ } else {
+ __ Movt(result, zero_reg);
+ }
+ } else {
+ if (instr->arch_opcode() == kMipsCmpD) {
+ __ cmp(cc, L, kDoubleCompareReg, left, right);
+ } else {
+ DCHECK(instr->arch_opcode() == kMipsCmpS);
+ __ cmp(cc, W, kDoubleCompareReg, left, right);
+ }
+ __ mfc1(result, kDoubleCompareReg);
+ __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
+ if (!predicate) // Toggle result for not equal.
+ __ xori(result, result, 1);
+ }
+ return;
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
instr->arch_opcode());
TRACE_UNIMPL();
UNIMPLEMENTED();
}
- // Fallthru case is the false materialization.
- __ bind(&false_value);
- __ li(result, Operand(0));
- __ bind(&done);
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ li(at, Operand(i.InputInt32(index + 0)));
+ __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
+ }
+ __ nop(); // Branch delay slot of the last beq.
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ Label here;
+ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
+ __ BlockTrampolinePoolFor(case_count + 6);
+ __ bal(&here);
+ __ sll(at, input, 2); // Branch delay slot.
+ __ bind(&here);
+ __ addu(at, at, ra);
+ __ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ __ jr(at);
+ __ nop(); // Branch delay slot nop.
+ for (size_t index = 0; index < case_count; ++index) {
+ __ dd(GetLabel(i.InputRpo(index + 2)));
+ }
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- // TODO(plind): make callee save size const, possibly DCHECK it.
- int register_save_area_size = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- __ MultiPush(saves);
- }
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(0);
}
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ Subu(sp, sp, Operand(stack_slots * kPointerSize));
+ frame_access_state()->SetFrameAccessToDefault();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ if (stack_shrink_slots > 0) {
+ __ Subu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ }
+
+ // Save callee-saved FPU registers.
+ if (saves_fpu != 0) {
+ __ MultiPushFPU(saves_fpu);
+ int count = base::bits::CountPopulation32(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == count);
+ frame()->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ // kNumCalleeSaved includes the fp register, but the fp register
+ // is saved separately in TF.
+ int count = base::bits::CountPopulation32(saves);
+ DCHECK(kNumCalleeSaved == count + 1);
+ frame()->AllocateSavedCalleeRegisterSlots(count);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ Addu(sp, sp, Operand(stack_slots * kPointerSize));
- }
- // Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- __ MultiPop(saves);
- }
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+
+ // Restore GP registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ if (descriptor->IsCFunctionCall()) {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ } else if (frame()->needs_frame()) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
}
- __ mov(sp, fp);
- __ Pop(ra, fp);
- __ Ret();
- } else {
- __ mov(sp, fp);
- __ Pop(ra, fp);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
+ }
+ if (pop_count != 0) {
__ DropAndRet(pop_count);
+ } else {
+ __ Ret();
}
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1031,9 +1582,19 @@
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
break;
- case Constant::kHeapObject:
- __ li(dst, src.ToHeapObject());
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ int offset;
+ if (IsMaterializableFromFrame(src_object, &offset)) {
+ __ lw(dst, MemOperand(fp, offset));
+ } else if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object);
+ }
break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
break;
@@ -1085,7 +1646,7 @@
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1134,9 +1695,9 @@
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
- MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
__ ldc1(temp_1, dst0); // Save destination in temp_1.
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
__ sw(temp_0, dst0);
@@ -1150,6 +1711,12 @@
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 32-bit MIPS we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() {
// Unused on 32-bit ARM. Still exists on 64-bit arm.
// TODO(plind): Unclear when this is called now. Understand, fix if needed.
@@ -1158,24 +1725,25 @@
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- // Block tramoline pool emission for duration of padding.
- v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= v8::internal::Assembler::kInstrSize;
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block tramoline pool emission for duration of padding.
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index 3aa508f..c938177 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -25,30 +25,60 @@
V(MipsModU) \
V(MipsAnd) \
V(MipsOr) \
+ V(MipsNor) \
V(MipsXor) \
+ V(MipsClz) \
V(MipsShl) \
V(MipsShr) \
V(MipsSar) \
+ V(MipsExt) \
+ V(MipsIns) \
V(MipsRor) \
V(MipsMov) \
V(MipsTst) \
V(MipsCmp) \
+ V(MipsCmpS) \
+ V(MipsAddS) \
+ V(MipsSubS) \
+ V(MipsMulS) \
+ V(MipsDivS) \
+ V(MipsModS) \
+ V(MipsAbsS) \
+ V(MipsSqrtS) \
+ V(MipsMaxS) \
+ V(MipsMinS) \
V(MipsCmpD) \
V(MipsAddD) \
V(MipsSubD) \
V(MipsMulD) \
V(MipsDivD) \
V(MipsModD) \
+ V(MipsAbsD) \
V(MipsSqrtD) \
- V(MipsFloat64Floor) \
- V(MipsFloat64Ceil) \
+ V(MipsMaxD) \
+ V(MipsMinD) \
+ V(MipsFloat32RoundDown) \
+ V(MipsFloat32RoundTruncate) \
+ V(MipsFloat32RoundUp) \
+ V(MipsFloat32RoundTiesEven) \
+ V(MipsFloat64RoundDown) \
V(MipsFloat64RoundTruncate) \
+ V(MipsFloat64RoundUp) \
+ V(MipsFloat64RoundTiesEven) \
V(MipsCvtSD) \
V(MipsCvtDS) \
V(MipsTruncWD) \
+ V(MipsRoundWD) \
+ V(MipsFloorWD) \
+ V(MipsCeilWD) \
+ V(MipsTruncWS) \
+ V(MipsRoundWS) \
+ V(MipsFloorWS) \
+ V(MipsCeilWS) \
V(MipsTruncUwD) \
V(MipsCvtDW) \
V(MipsCvtDUw) \
+ V(MipsCvtSW) \
V(MipsLb) \
V(MipsLbu) \
V(MipsSb) \
@@ -61,10 +91,17 @@
V(MipsSwc1) \
V(MipsLdc1) \
V(MipsSdc1) \
+ V(MipsFloat64ExtractLowWord32) \
+ V(MipsFloat64ExtractHighWord32) \
+ V(MipsFloat64InsertLowWord32) \
+ V(MipsFloat64InsertHighWord32) \
+ V(MipsFloat64Max) \
+ V(MipsFloat64Min) \
+ V(MipsFloat32Max) \
+ V(MipsFloat32Min) \
V(MipsPush) \
V(MipsStoreToStackSlot) \
- V(MipsStackClaim) \
- V(MipsStoreWriteBarrier)
+ V(MipsStackClaim)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/src/compiler/mips/instruction-scheduler-mips.cc b/src/compiler/mips/instruction-scheduler-mips.cc
new file mode 100644
index 0000000..af86a87
--- /dev/null
+++ b/src/compiler/mips/instruction-scheduler-mips.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNIMPLEMENTED();
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 5e8e3b1..61cea76 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -17,12 +19,12 @@
// Adds Mips-specific methods for generating InstructionOperands.
-class MipsOperandGenerator FINAL : public OperandGenerator {
+class MipsOperandGenerator final : public OperandGenerator {
public:
explicit MipsOperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
if (CanBeImmediate(node, opcode)) {
return UseImmediate(node);
}
@@ -42,11 +44,10 @@
return is_uint16(value);
case kMipsLdc1:
case kMipsSdc1:
- case kCheckedLoadFloat32:
case kCheckedLoadFloat64:
- case kCheckedStoreFloat32:
case kCheckedStoreFloat64:
- return is_int16(value + kIntSize);
+ return std::numeric_limits<int16_t>::min() <= (value + kIntSize) &&
+ std::numeric_limits<int16_t>::max() >= (value + kIntSize);
default:
return is_int16(value);
}
@@ -90,9 +91,9 @@
InstructionCode opcode, FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
@@ -108,14 +109,13 @@
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -127,32 +127,32 @@
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kMipsLwc1;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kMipsLdc1;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kMipsLbu : kMipsLb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kMipsLhu : kMipsLh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kMipsLw;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -161,7 +161,7 @@
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
- InstructionOperand* addr_reg = g.TempRegister();
+ InstructionOperand addr_reg = g.TempRegister();
Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
@@ -177,59 +177,126 @@
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
- Emit(kMipsStoreWriteBarrier, NULL, g.UseFixed(base, t0),
- g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kMipsSwc1;
- break;
- case kRepFloat64:
- opcode = kMipsSdc1;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kMipsSb;
- break;
- case kRepWord16:
- opcode = kMipsSh;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kMipsSw;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ // TODO(mips): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand* addr_reg = g.TempRegister();
- Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
- g.TempImmediate(0), g.UseRegister(value));
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMipsSwc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMipsSdc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kMipsSb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMipsSh;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kMipsSw;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitWord32And(Node* node) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().Value() & 0x1f;
+
+ // Ext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Ext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kMipsExt, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation32(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of invereted mask.
+ Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0), g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMipsAnd);
}
@@ -240,16 +307,81 @@
void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsNor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ MipsOperandGenerator g(this);
+ Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMipsXor);
}
void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasValue()) {
+ uint32_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMipsShl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMipsShl, node);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x1f;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ MipsOperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kMipsExt, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMipsShr, node);
}
@@ -264,6 +396,17 @@
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kMipsClz, node);
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
MipsOperandGenerator g(this);
@@ -289,7 +432,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -298,7 +441,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
@@ -307,15 +450,12 @@
return;
}
}
- Emit(kMipsMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseRegister(m.right().node()));
+ VisitRRR(this, kMipsMul, node);
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsMulHigh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
+ VisitRRR(this, kMipsMulHigh, node);
}
@@ -329,7 +469,7 @@
void InstructionSelector::VisitInt32Div(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMipsDiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -337,7 +477,7 @@
void InstructionSelector::VisitUint32Div(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMipsDivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -359,39 +499,130 @@
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsCvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMipsCvtDS, node);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsCvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMipsCvtDW, node);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsCvtDUw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMipsCvtDUw, node);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
MipsOperandGenerator g(this);
- Emit(kMipsTruncWD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ Node* value = node->InputAt(0);
+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
+ // which does rounding and conversion to integer format.
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kFloat64RoundDown:
+ Emit(kMipsFloorWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundUp:
+ Emit(kMipsCeilWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTiesEven:
+ Emit(kMipsRoundWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTruncate:
+ Emit(kMipsTruncWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ default:
+ break;
+ }
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (CanCover(value, next)) {
+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
+ switch (next->opcode()) {
+ case IrOpcode::kFloat32RoundDown:
+ Emit(kMipsFloorWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundUp:
+ Emit(kMipsCeilWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTiesEven:
+ Emit(kMipsRoundWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTruncate:
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ default:
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ } else {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
+ VisitRR(this, kMipsTruncWD, node);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsTruncUwD, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMipsTruncUwD, node);
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
MipsOperandGenerator g(this);
- Emit(kMipsCvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kMipsCvtSW, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ VisitRR(this, kMipsCvtSD, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kMipsTruncWD, node);
+ }
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kMipsFloat64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kMipsAddS, node);
}
@@ -400,16 +631,45 @@
}
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kMipsSubS, node);
+}
+
+
void InstructionSelector::VisitFloat64Sub(Node* node) {
+ MipsOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
VisitRRR(this, kMipsSubD, node);
}
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kMipsMulS, node);
+}
+
+
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitRRR(this, kMipsMulD, node);
}
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kMipsDivS, node);
+}
+
+
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kMipsDivD, node);
}
@@ -422,19 +682,108 @@
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+void InstructionSelector::VisitFloat32Max(Node* node) {
MipsOperandGenerator g(this);
- Emit(kMipsSqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat32Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat32Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRR(this, kMipsFloat64Floor, node);
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat64Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRR(this, kMipsFloat64Ceil, node);
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat32Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat32Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat64Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kMipsAbsS, node);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kMipsAbsD, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kMipsSqrtS, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kMipsSqrtD, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kMipsFloat32RoundDown, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kMipsFloat64RoundDown, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kMipsFloat32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kMipsFloat64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kMipsFloat32RoundTruncate, node);
}
@@ -448,96 +797,94 @@
}
-void InstructionSelector::VisitCall(Node* node) {
- MipsOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = NULL;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
- // Possibly align stack here for functions.
- int push_count = buffer.pushed_nodes.size();
- if (push_count > 0) {
- Emit(kMipsStackClaim | MiscField::encode(push_count), NULL);
- }
- int slot = buffer.pushed_nodes.size() - 1;
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- Emit(kMipsStoreToStackSlot | MiscField::encode(slot), NULL,
- g.UseRegister(*input));
- slot--;
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- InstructionOperand** first_output =
- buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), first_output,
- buffer.instruction_args.size(), &buffer.instruction_args.front());
- call_instr->MarkAsCall();
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kMipsFloat32RoundTiesEven, node);
}
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kMipsFloat64RoundTiesEven, node);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ MipsOperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kCArgSlotCount;
+ for (PushParameter input : (*arguments)) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ // Possibly align stack here for functions.
+ int push_count = static_cast<int>(descriptor->StackParameterCount());
+ if (push_count > 0) {
+ Emit(kMipsStackClaim, g.NoOutput(),
+ g.TempImmediate(push_count << kPointerSizeLog2));
+ }
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(n << kPointerSizeLog2));
+ }
+ }
+ }
+}
+
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
@@ -546,45 +893,46 @@
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
- length_operand, g.UseRegister(value), g.UseRegister(buffer));
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ offset_operand, length_operand, g.UseRegister(value),
+ g.UseRegister(buffer));
}
@@ -592,29 +940,47 @@
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
+ InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
- // TODO(plind): Revisit and test this path.
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
}
}
-// Shared routine for multiple float compare operations.
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ MipsOperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
+}
+
+
+// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMipsCmpD, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
}
@@ -628,12 +994,52 @@
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode)) {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
- cont);
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
} else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
- VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
- cont);
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
cont);
@@ -679,26 +1085,35 @@
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
- Node* const result = node->FindProjection(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
if (!result || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
@@ -724,11 +1139,10 @@
// Continuation could not be combined with a compare, emit compare against 0.
MipsOperandGenerator g(selector);
InstructionCode const opcode = cont->Encode(kMipsCmp);
- InstructionOperand* const value_operand = g.UseRegister(value);
+ InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -743,6 +1157,34 @@
}
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ MipsOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 9 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kMipsSub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -778,7 +1220,7 @@
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMipsAddOvf, &cont);
}
@@ -788,7 +1230,7 @@
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMipsSubOvf, &cont);
}
@@ -797,33 +1239,96 @@
}
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
+ flags |= MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
- return MachineOperatorBuilder::kNoFlags;
+ return flags | MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
} // namespace compiler
diff --git a/src/compiler/mips/linkage-mips.cc b/src/compiler/mips/linkage-mips.cc
deleted file mode 100644
index 2b314a2..0000000
--- a/src/compiler/mips/linkage-mips.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct MipsLinkageHelperTraits {
- static Register ReturnValueReg() { return v0; }
- static Register ReturnValue2Reg() { return v1; }
- static Register JSCallFunctionReg() { return a1; }
- static Register ContextReg() { return cp; }
- static Register RuntimeCallFunctionReg() { return a1; }
- static Register RuntimeCallArgCountReg() { return a0; }
- static RegList CCalleeSaveRegisters() {
- return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
- s6.bit() | s7.bit();
- }
- static Register CRegisterParameter(int i) {
- static Register register_parameters[] = {a0, a1, a2, a3};
- return register_parameters[i];
- }
- static int CRegisterParametersLength() { return 4; }
-};
-
-
-typedef LinkageHelper<MipsLinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/mips64/OWNERS b/src/compiler/mips64/OWNERS
index 5508ba6..89455a4 100644
--- a/src/compiler/mips64/OWNERS
+++ b/src/compiler/mips64/OWNERS
@@ -3,3 +3,4 @@
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index dee7705..1b81aa5 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/osr.h"
#include "src/mips/macro-assembler-mips.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -34,16 +34,16 @@
// Adds Mips-specific methods to convert InstructionOperands.
-class MipsOperandConverter FINAL : public InstructionOperandConverter {
+class MipsOperandConverter final : public InstructionOperandConverter {
public:
MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- FloatRegister OutputSingleRegister(int index = 0) {
+ FloatRegister OutputSingleRegister(size_t index = 0) {
return ToSingleRegister(instr_->OutputAt(index));
}
- FloatRegister InputSingleRegister(int index) {
+ FloatRegister InputSingleRegister(size_t index) {
return ToSingleRegister(instr_->InputAt(index));
}
@@ -53,7 +53,19 @@
return ToDoubleRegister(op);
}
- Operand InputImmediate(int index) {
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
+ Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
@@ -79,7 +91,7 @@
return Operand(zero_reg);
}
- Operand InputOperand(int index) {
+ Operand InputOperand(size_t index) {
InstructionOperand* op = instr_->InputAt(index);
if (op->IsRegister()) {
return Operand(ToRegister(op));
@@ -87,8 +99,8 @@
return InputImmediate(index);
}
- MemOperand MemoryOperand(int* first_index) {
- const int index = *first_index;
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
break;
@@ -103,33 +115,31 @@
return MemOperand(no_reg);
}
- MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
-static inline bool HasRegisterInput(Instruction* instr, int index) {
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
namespace {
-class OutOfLineLoadSingle FINAL : public OutOfLineCode {
+class OutOfLineLoadSingle final : public OutOfLineCode {
public:
OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ Move(result_, std::numeric_limits<float>::quiet_NaN());
}
@@ -138,12 +148,12 @@
};
-class OutOfLineLoadDouble FINAL : public OutOfLineCode {
+class OutOfLineLoadDouble final : public OutOfLineCode {
public:
OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ Move(result_, std::numeric_limits<double>::quiet_NaN());
}
@@ -152,12 +162,12 @@
};
-class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+class OutOfLineLoadInteger final : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL { __ mov(result_, zero_reg); }
+ void Generate() final { __ mov(result_, zero_reg); }
private:
Register const result_;
@@ -169,7 +179,7 @@
OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
// Handle rounding to zero case where sign has to be preserved.
// High bits of double input already in kScratchReg.
__ dsrl(at, kScratchReg, 31);
@@ -182,27 +192,160 @@
};
-class OutOfLineTruncate FINAL : public OutOfLineRound {
+class OutOfLineRound32 : public OutOfLineCode {
public:
- OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ // Handle rounding to zero case where sign has to be preserved.
+ // High bits of float input already in kScratchReg.
+ __ srl(at, kScratchReg, 31);
+ __ sll(at, at, 31);
+ __ mtc1(at, result_);
+ }
+
+ private:
+ DoubleRegister const result_;
};
-class OutOfLineFloor FINAL : public OutOfLineRound {
+class OutOfLineRecordWrite final : public OutOfLineCode {
public:
- OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ Daddu(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
};
-class OutOfLineCeil FINAL : public OutOfLineRound {
- public:
- OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
-};
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
+ switch (condition) {
+ case kOverflow:
+ return ne;
+ case kNotOverflow:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ predicate = true;
+ return EQ;
+ case kNotEqual:
+ predicate = false;
+ return EQ;
+ case kUnsignedLessThan:
+ predicate = true;
+ return OLT;
+ case kUnsignedGreaterThanOrEqual:
+ predicate = false;
+ return ULT;
+ case kUnsignedLessThanOrEqual:
+ predicate = true;
+ return OLE;
+ case kUnsignedGreaterThan:
+ predicate = false;
+ return ULE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ predicate = true;
+ break;
+ default:
+ predicate = true;
+ break;
+ }
+ UNREACHABLE();
+ return kNoFPUCondition;
+}
+
} // namespace
@@ -213,10 +356,10 @@
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Daddu(at, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(at, 0)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
- auto offset = i.InputOperand(0).immediate(); \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
} \
@@ -231,10 +374,10 @@
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Daddu(at, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(at, 0)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
- auto offset = i.InputOperand(0).immediate(); \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
} \
@@ -249,10 +392,10 @@
auto offset = i.InputRegister(0); \
auto value = i.Input##width##Register(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Daddu(at, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(at, 0)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
- auto offset = i.InputOperand(0).immediate(); \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
auto value = i.Input##width##Register(2); \
__ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
@@ -268,10 +411,10 @@
auto offset = i.InputRegister(0); \
auto value = i.InputRegister(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Daddu(at, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(at, 0)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
- auto offset = i.InputOperand(0).immediate(); \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
auto value = i.InputRegister(2); \
__ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
@@ -280,10 +423,15 @@
} while (0)
-#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
- do { \
- auto ool = \
- new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
+ if (kArchVariant == kMips64r6) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
Label done; \
__ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
__ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
@@ -291,13 +439,61 @@
__ Branch(USE_DELAY_SLOT, &done, hs, at, \
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
__ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
__ dmfc1(at, i.OutputDoubleRegister()); \
__ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
__ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
__ bind(ool->exit()); \
__ bind(&done); \
- } while (0)
+ }
+
+#define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
+ if (kArchVariant == kMips64r6) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ int32_t kFloat32ExponentBias = 127; \
+ int32_t kFloat32MantissaBits = 23; \
+ int32_t kFloat32ExponentBits = 8; \
+ auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
+ Label done; \
+ __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
+ __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, at, \
+ Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
+ __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mfc1(at, i.OutputDoubleRegister()); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
+ __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ __ bind(ool->exit()); \
+ __ bind(&done); \
+ }
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ daddiu(sp, sp, sp_slot_delta * kPointerSize);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Dsubu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
// Assembles an instruction after register allocation, producing machine code.
@@ -315,7 +511,21 @@
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(at);
+ }
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -326,39 +536,123 @@
__ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
-
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ }
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Jump(at);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), sp);
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ Daddu(at, object, index);
+ __ sd(value, MemOperand(at));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kMips64Add:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dadd:
__ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64DaddOvf:
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ break;
case kMips64Sub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dsub:
__ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64DsubOvf:
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ break;
case kMips64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -368,11 +662,24 @@
case kMips64MulHighU:
__ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64DMulHigh:
+ __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
case kMips64Div:
__ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64DivU:
__ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64Mod:
__ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -385,9 +692,19 @@
break;
case kMips64Ddiv:
__ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64DdivU:
__ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64Dmod:
__ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -395,58 +712,103 @@
case kMips64DmodU:
__ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- break;
case kMips64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64Nor:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK(i.InputOperand(1).immediate() == 0);
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
case kMips64Xor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64Clz:
+ __ Clz(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kMips64Dclz:
+ __ dclz(i.OutputRegister(), i.InputRegister(0));
+ break;
case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
- __ sll(i.OutputRegister(), i.InputRegister(0), imm);
+ int64_t imm = i.InputOperand(1).immediate();
+ __ sll(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
}
break;
case kMips64Shr:
if (instr->InputAt(1)->IsRegister()) {
__ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
- __ srl(i.OutputRegister(), i.InputRegister(0), imm);
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srl(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
}
break;
case kMips64Sar:
if (instr->InputAt(1)->IsRegister()) {
__ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
- __ sra(i.OutputRegister(), i.InputRegister(0), imm);
+ int64_t imm = i.InputOperand(1).immediate();
+ __ sra(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
}
break;
case kMips64Ext:
__ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
break;
- case kMips64Dext:
- __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
+ case kMips64Ins:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
+ } else {
+ __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
+ break;
+ case kMips64Dext: {
+ int16_t pos = i.InputInt8(1);
+ int16_t size = i.InputInt8(2);
+ if (size > 0 && size <= 32 && pos >= 0 && pos < 32) {
+ __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ } else if (size > 32 && size <= 64 && pos > 0 && pos < 32) {
+ __ Dextm(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ } else {
+ DCHECK(size > 0 && size <= 32 && pos >= 32 && pos < 64);
+ __ Dextu(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
+ break;
+ }
+ case kMips64Dins:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
+ } else {
+ __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
break;
case kMips64Dshl:
if (instr->InputAt(1)->IsRegister()) {
__ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
+ int64_t imm = i.InputOperand(1).immediate();
if (imm < 32) {
- __ dsll(i.OutputRegister(), i.InputRegister(0), imm);
+ __ dsll(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
} else {
- __ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+ __ dsll32(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm - 32));
}
}
break;
@@ -454,11 +816,13 @@
if (instr->InputAt(1)->IsRegister()) {
__ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
+ int64_t imm = i.InputOperand(1).immediate();
if (imm < 32) {
- __ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
+ __ dsrl(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
} else {
- __ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+ __ dsrl32(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm - 32));
}
}
break;
@@ -466,7 +830,7 @@
if (instr->InputAt(1)->IsRegister()) {
__ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
- int32_t imm = i.InputOperand(1).immediate();
+ int64_t imm = i.InputOperand(1).immediate();
if (imm < 32) {
__ dsra(i.OutputRegister(), i.InputRegister(0), imm);
} else {
@@ -481,11 +845,9 @@
__ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Tst:
- case kMips64Tst32:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
case kMips64Cmp:
- case kMips64Cmp32:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
case kMips64Mov:
@@ -498,6 +860,56 @@
}
break;
+ case kMips64CmpS:
+ // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
+ break;
+ case kMips64AddS:
+ // TODO(plind): add special case: combine mult & add.
+ __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64SubS:
+ __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64MulS:
+ // TODO(plind): add special case: right op is -1.0, see arm port.
+ __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64DivS:
+ __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64ModS: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputSingleRegister());
+ break;
+ }
+ case kMips64AbsS:
+ __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kMips64SqrtS: {
+ __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kMips64MaxS:
+ __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64MinS:
+ __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMips64CmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
@@ -532,39 +944,167 @@
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
- case kMips64Float64Floor: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+ case kMips64AbsD:
+ __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- }
- case kMips64Float64Ceil: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
- break;
- }
- case kMips64Float64RoundTruncate: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
- break;
- }
case kMips64SqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kMips64CvtSD: {
+ case kMips64MaxD:
+ __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64MinD:
+ __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64Float64RoundDown: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
+ break;
+ }
+ case kMips64Float32RoundDown: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
+ break;
+ }
+ case kMips64Float64RoundTruncate: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
+ break;
+ }
+ case kMips64Float32RoundTruncate: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
+ break;
+ }
+ case kMips64Float64RoundUp: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
+ break;
+ }
+ case kMips64Float32RoundUp: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
+ break;
+ }
+ case kMips64Float64RoundTiesEven: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
+ break;
+ }
+ case kMips64Float32RoundTiesEven: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
+ break;
+ }
+ case kMips64Float64Max: {
+ // (b < a) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMips64Float64Min: {
+ // (a < b) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMips64Float32Max: {
+ // (b < a) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMips64Float32Min: {
+ // (a < b) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMips64CvtSD:
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
- }
- case kMips64CvtDS: {
+ case kMips64CvtDS:
__ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
break;
- }
case kMips64CvtDW: {
FPURegister scratch = kScratchDoubleReg;
__ mtc1(i.InputRegister(0), scratch);
__ cvt_d_w(i.OutputDoubleRegister(), scratch);
break;
}
- case kMips64CvtDUw: {
+ case kMips64CvtSW: {
FPURegister scratch = kScratchDoubleReg;
- __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+ __ mtc1(i.InputRegister(0), scratch);
+ __ cvt_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kMips64CvtSL: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ dmtc1(i.InputRegister(0), scratch);
+ __ cvt_s_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kMips64CvtDL: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ dmtc1(i.InputRegister(0), scratch);
+ __ cvt_d_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kMips64CvtDUw: {
+ __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kMips64CvtDUl: {
+ __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kMips64CvtSUl: {
+ __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kMips64FloorWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64CeilWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64RoundWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
break;
}
case kMips64TruncWD: {
@@ -574,12 +1114,126 @@
__ mfc1(i.OutputRegister(), scratch);
break;
}
+ case kMips64FloorWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64CeilWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64RoundWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64TruncWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ trunc_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64TruncLS: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register tmp_fcsr = kScratchReg;
+ Register result = kScratchReg2;
+
+ bool load_status = instr->OutputCount() > 1;
+ if (load_status) {
+ // Save FCSR.
+ __ cfc1(tmp_fcsr, FCSR);
+ // Clear FPU flags.
+ __ ctc1(zero_reg, FCSR);
+ }
+ // Other arches use round to zero here, so we follow.
+ __ trunc_l_s(scratch, i.InputDoubleRegister(0));
+ __ dmfc1(i.OutputRegister(), scratch);
+ if (load_status) {
+ __ cfc1(result, FCSR);
+ // Check for overflow and NaNs.
+ __ andi(result, result,
+ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
+ __ Slt(result, zero_reg, result);
+ __ xori(result, result, 1);
+ __ mov(i.OutputRegister(1), result);
+ // Restore FCSR
+ __ ctc1(tmp_fcsr, FCSR);
+ }
+ break;
+ }
+ case kMips64TruncLD: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register tmp_fcsr = kScratchReg;
+ Register result = kScratchReg2;
+
+ bool load_status = instr->OutputCount() > 1;
+ if (load_status) {
+ // Save FCSR.
+ __ cfc1(tmp_fcsr, FCSR);
+ // Clear FPU flags.
+ __ ctc1(zero_reg, FCSR);
+ }
+ // Other arches use round to zero here, so we follow.
+ __ trunc_l_d(scratch, i.InputDoubleRegister(0));
+ __ dmfc1(i.OutputRegister(0), scratch);
+ if (load_status) {
+ __ cfc1(result, FCSR);
+ // Check for overflow and NaNs.
+ __ andi(result, result,
+ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
+ __ Slt(result, zero_reg, result);
+ __ xori(result, result, 1);
+ __ mov(i.OutputRegister(1), result);
+ // Restore FCSR
+ __ ctc1(tmp_fcsr, FCSR);
+ }
+ break;
+ }
case kMips64TruncUwD: {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMips64TruncUlS: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ // TODO(plind): Fix wrong param order of Trunc_ul_s() macro-asm function.
+ __ Trunc_ul_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch,
+ result);
+ break;
+ }
+ case kMips64TruncUlD: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ // TODO(plind): Fix wrong param order of Trunc_ul_d() macro-asm function.
+ __ Trunc_ul_d(i.InputDoubleRegister(0), i.OutputRegister(0), scratch,
+ result);
+ break;
+ }
+ case kMips64BitcastDL:
+ __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMips64BitcastLD:
+ __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister());
+ break;
+ case kMips64Float64ExtractLowWord32:
+ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMips64Float64ExtractHighWord32:
+ __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMips64Float64InsertLowWord32:
+ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kMips64Float64InsertHighWord32:
+ __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
// ... more basic instructions ...
case kMips64Lbu:
@@ -617,7 +1271,7 @@
break;
}
case kMips64Swc1: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand);
break;
@@ -629,28 +1283,26 @@
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMips64Push:
- __ Push(i.InputRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Subu(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
break;
case kMips64StackClaim: {
- int words = MiscField::decode(instr->opcode());
- __ Dsubu(sp, sp, Operand(words << kPointerSizeLog2));
+ __ Dsubu(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
break;
}
case kMips64StoreToStackSlot: {
- int slot = MiscField::decode(instr->opcode());
- __ sd(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
- break;
- }
- case kMips64StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ daddu(index, object, index);
- __ sd(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RAStatus ra_status = kRAHasNotBeenSaved;
- __ RecordWrite(object, index, value, ra_status, mode);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
break;
}
case kCheckedLoadInt8:
@@ -668,6 +1320,9 @@
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
break;
+ case kCheckedLoadWord64:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(ld);
+ break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
break;
@@ -683,6 +1338,9 @@
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(sw);
break;
+ case kCheckedStoreWord64:
+ ASSEMBLE_CHECKED_STORE_INTEGER(sd);
+ break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
break;
@@ -690,7 +1348,7 @@
ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
break;
}
-}
+} // NOLINT(readability/fn_size)
#define UNSUPPORTED_COND(opcode, condition) \
@@ -698,225 +1356,119 @@
out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
UNIMPLEMENTED();
+static bool convertCondition(FlagsCondition condition, Condition& cc) {
+ switch (condition) {
+ case kEqual:
+ cc = eq;
+ return true;
+ case kNotEqual:
+ cc = ne;
+ return true;
+ case kUnsignedLessThan:
+ cc = lt;
+ return true;
+ case kUnsignedGreaterThanOrEqual:
+ cc = uge;
+ return true;
+ case kUnsignedLessThanOrEqual:
+ cc = le;
+ return true;
+ case kUnsignedGreaterThan:
+ cc = ugt;
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
MipsOperandConverter i(this, instr);
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
Condition cc = kNoCondition;
-
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are handled here by branch
// instructions that do the actual comparison. Essential that the input
- // registers to compare psuedo-op are not modified before this branch op, as
+ // registers to compare pseudo-op are not modified before this branch op, as
// they are tested here.
- // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
- // not separated by other instructions.
if (instr->arch_opcode() == kMips64Tst) {
- switch (branch->condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64Tst, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionTst(branch->condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
- } else if (instr->arch_opcode() == kMips64Tst32) {
- switch (branch->condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64Tst32, branch->condition);
- break;
- }
- // Zero-extend registers on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- // This is a disadvantage to perform 32-bit operation on MIPS64.
- // Try to force globally in front-end Word64 representation to be preferred
- // for MIPS64 even for Word32.
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Dext(at, at, 0, 32);
- __ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
- switch (branch->condition) {
- case kOverflow:
- cc = ne;
- break;
- case kNotOverflow:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64Dadd, branch->condition);
- break;
- }
-
+ cc = FlagsConditionToConditionOvf(branch->condition);
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
+ } else if (instr->arch_opcode() == kMips64DaddOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64DaddOvf, branch->condition);
+ break;
+ }
+ } else if (instr->arch_opcode() == kMips64DsubOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
+ break;
+ }
} else if (instr->arch_opcode() == kMips64Cmp) {
- switch (branch->condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
-
- } else if (instr->arch_opcode() == kMips64Cmp32) {
- switch (branch->condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp32, branch->condition);
- break;
+ } else if (instr->arch_opcode() == kMips64CmpS) {
+ if (!convertCondition(branch->condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpS, branch->condition);
}
-
- switch (branch->condition) {
- case kEqual:
- case kNotEqual:
- case kSignedLessThan:
- case kSignedGreaterThanOrEqual:
- case kSignedLessThanOrEqual:
- case kSignedGreaterThan:
- // Sign-extend registers on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- __ sll(i.InputRegister(0), i.InputRegister(0), 0);
- if (instr->InputAt(1)->IsRegister()) {
- __ sll(i.InputRegister(1), i.InputRegister(1), 0);
- }
- break;
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- case kUnsignedLessThanOrEqual:
- case kUnsignedGreaterThan:
- // Zero-extend registers on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- __ Dext(i.InputRegister(0), i.InputRegister(0), 0, 32);
- if (instr->InputAt(1)->IsRegister()) {
- __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
- }
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp, branch->condition);
- break;
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
}
- __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+ __ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMips64CmpD) {
- // TODO(dusmil) optimize unordered checks to use less instructions
- // even if we have to unfold BranchF macro.
- Label* nan = flabel;
- switch (branch->condition) {
- case kUnorderedEqual:
- cc = eq;
- break;
- case kUnorderedNotEqual:
- cc = ne;
- nan = tlabel;
- break;
- case kUnorderedLessThan:
- cc = lt;
- break;
- case kUnorderedGreaterThanOrEqual:
- cc = ge;
- nan = tlabel;
- break;
- case kUnorderedLessThanOrEqual:
- cc = le;
- break;
- case kUnorderedGreaterThan:
- cc = gt;
- nan = tlabel;
- break;
- default:
- UNSUPPORTED_COND(kMips64CmpD, branch->condition);
- break;
+ if (!convertCondition(branch->condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpD, branch->condition);
}
- __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
UNIMPLEMENTED();
}
+ if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
@@ -930,325 +1482,302 @@
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label false_value;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register result = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
-
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips pseudo-instructions, which are checked and handled here.
- // For materializations, we use delay slot to set the result true, and
- // in the false case, where we fall through the branch, we reset the result
- // false.
-
if (instr->arch_opcode() == kMips64Tst) {
- switch (condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64Tst, condition);
- break;
+ cc = FlagsConditionToConditionTst(condition);
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+ __ Sltu(result, zero_reg, kScratchReg);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
- __ li(result, Operand(1)); // In delay slot.
- } else if (instr->arch_opcode() == kMips64Tst32) {
- switch (condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMips64Tst, condition);
- break;
- }
- // Zero-extend register on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Dext(at, at, 0, 32);
- __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
- __ li(result, Operand(1)); // In delay slot.
+ return;
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
- switch (condition) {
- case kOverflow:
- cc = ne;
+ cc = FlagsConditionToConditionOvf(condition);
+ // Check for overflow creates 1 or 0 for result.
+ __ dsrl32(kScratchReg, i.OutputRegister(), 31);
+ __ srl(at, i.OutputRegister(), 31);
+ __ xor_(result, kScratchReg, at);
+ if (cc == eq) // Toggle result for not overflow.
+ __ xori(result, result, 1);
+ return;
+ } else if (instr->arch_opcode() == kMips64DaddOvf ||
+ instr->arch_opcode() == kMips64DsubOvf) {
+ Label flabel, tlabel;
+ switch (instr->arch_opcode()) {
+ case kMips64DaddOvf:
+ __ DaddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+
break;
- case kNotOverflow:
- cc = eq;
+ case kMips64DsubOvf:
+ __ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
break;
default:
- UNSUPPORTED_COND(kMips64DAdd, condition);
+ UNREACHABLE();
break;
}
- __ dsra32(kScratchReg, i.OutputRegister(), 0);
- __ sra(at, i.OutputRegister(), 31);
- __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
- __ li(result, Operand(1)); // In delay slot.
+ __ li(result, 1);
+ __ Branch(&tlabel);
+ __ bind(&flabel);
+ __ li(result, 0);
+ __ bind(&tlabel);
} else if (instr->arch_opcode() == kMips64Cmp) {
- Register left = i.InputRegister(0);
- Operand right = i.InputOperand(1);
- switch (condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp, condition);
- break;
- }
- __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
- __ li(result, Operand(1)); // In delay slot.
- } else if (instr->arch_opcode() == kMips64Cmp32) {
- Register left = i.InputRegister(0);
- Operand right = i.InputOperand(1);
- switch (condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp, condition);
- break;
- }
-
- switch (condition) {
- case kEqual:
- case kNotEqual:
- case kSignedLessThan:
- case kSignedGreaterThanOrEqual:
- case kSignedLessThanOrEqual:
- case kSignedGreaterThan:
- // Sign-extend registers on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- __ sll(left, left, 0);
- if (instr->InputAt(1)->IsRegister()) {
- __ sll(i.InputRegister(1), i.InputRegister(1), 0);
+ cc = FlagsConditionToConditionCmp(condition);
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ Register select;
+ if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
+ // Pass left operand if right is zero.
+ select = left;
+ } else {
+ __ Dsubu(kScratchReg, left, right);
+ select = kScratchReg;
}
- break;
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- case kUnsignedLessThanOrEqual:
- case kUnsignedGreaterThan:
- // Zero-extend registers on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- __ Dext(left, left, 0, 32);
- if (instr->InputAt(1)->IsRegister()) {
- __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
+ __ Sltu(result, zero_reg, select);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
- break;
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lo:
+ case hs: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == hs) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case hi:
+ case ls: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == ls) {
+ __ xori(result, result, 1);
+ }
+ } break;
default:
- UNSUPPORTED_COND(kMips64Cmp32, condition);
- break;
+ UNREACHABLE();
}
- __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
- __ li(result, Operand(1)); // In delay slot.
- } else if (instr->arch_opcode() == kMips64CmpD) {
- FPURegister left = i.InputDoubleRegister(0);
- FPURegister right = i.InputDoubleRegister(1);
- // TODO(plind): Provide NaN-testing macro-asm function without need for
- // BranchF.
- FPURegister dummy1 = f0;
- FPURegister dummy2 = f2;
- switch (condition) {
- case kUnorderedEqual:
- // TODO(plind): improve the NaN testing throughout this function.
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = eq;
- break;
- case kUnorderedNotEqual:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = ne;
- break;
- case kUnorderedLessThan:
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = lt;
- break;
- case kUnorderedGreaterThanOrEqual:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = ge;
- break;
- case kUnorderedLessThanOrEqual:
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = le;
- break;
- case kUnorderedGreaterThan:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = gt;
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp, condition);
- break;
+ return;
+ } else if (instr->arch_opcode() == kMips64CmpD ||
+ instr->arch_opcode() == kMips64CmpS) {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
}
- __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
- __ li(result, Operand(1)); // In delay slot - branch taken returns 1.
- // Fall-thru (branch not taken) returns 0.
+ bool predicate;
+ FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
+ if (kArchVariant != kMips64r6) {
+ __ li(result, Operand(1));
+ if (instr->arch_opcode() == kMips64CmpD) {
+ __ c(cc, D, left, right);
+ } else {
+ DCHECK(instr->arch_opcode() == kMips64CmpS);
+ __ c(cc, S, left, right);
+ }
+ if (predicate) {
+ __ Movf(result, zero_reg);
+ } else {
+ __ Movt(result, zero_reg);
+ }
+ } else {
+ if (instr->arch_opcode() == kMips64CmpD) {
+ __ cmp(cc, L, kDoubleCompareReg, left, right);
+ } else {
+ DCHECK(instr->arch_opcode() == kMips64CmpS);
+ __ cmp(cc, W, kDoubleCompareReg, left, right);
+ }
+ __ dmfc1(result, kDoubleCompareReg);
+ __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
+ if (!predicate) // Toggle result for not equal.
+ __ xori(result, result, 1);
+ }
+ return;
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
instr->arch_opcode());
TRACE_UNIMPL();
UNIMPLEMENTED();
}
- // Fallthru case is the false materialization.
- __ bind(&false_value);
- __ li(result, Operand(static_cast<int64_t>(0)));
- __ bind(&done);
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ li(at, Operand(i.InputInt32(index + 0)));
+ __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
+ }
+ __ nop(); // Branch delay slot of the last beq.
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ Label here;
+
+ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
+ __ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
+ // Ensure that dd-ed labels use 8 byte aligned addresses.
+ __ Align(8);
+ __ bal(&here);
+ __ dsll(at, input, 3); // Branch delay slot.
+ __ bind(&here);
+ __ daddu(at, at, ra);
+ __ ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ __ jr(at);
+ __ nop(); // Branch delay slot nop.
+ for (size_t index = 0; index < case_count; ++index) {
+ __ dd(GetLabel(i.InputRpo(index + 2)));
+ }
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- // TODO(plind): make callee save size const, possibly DCHECK it.
- int register_save_area_size = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- __ MultiPush(saves);
- }
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
- if (info->strict_mode() == SLOPPY && !info->is_native()) {
- Label ok;
- // +2 for return address and saved frame pointer.
- int receiver_slot = info->scope()->num_parameters() + 2;
- __ ld(a2, MemOperand(fp, receiver_slot * kPointerSize));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&ok, ne, a2, Operand(at));
-
- __ ld(a2, GlobalObjectOperand());
- __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
- __ sd(a2, MemOperand(fp, receiver_slot * kPointerSize));
- __ bind(&ok);
- }
- } else {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(0);
}
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
+ frame_access_state()->SetFrameAccessToDefault();
+
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ if (stack_shrink_slots > 0) {
+ __ Dsubu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ }
+
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(saves_fpu);
+ int count = base::bits::CountPopulation32(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == count);
+ frame()->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ // kNumCalleeSaved includes the fp register, but the fp register
+ // is saved separately in TF.
+ int count = base::bits::CountPopulation32(saves);
+ DCHECK(kNumCalleeSaved == count + 1);
+ frame()->AllocateSavedCalleeRegisterSlots(count);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
- }
- // Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- __ MultiPop(saves);
- }
+
+ // Restore GP registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ if (descriptor->IsCFunctionCall()) {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ } else if (frame()->needs_frame()) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
}
- __ mov(sp, fp);
- __ Pop(ra, fp);
- __ Ret();
- } else {
- __ mov(sp, fp);
- __ Pop(ra, fp);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
+ }
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ if (pop_count != 0) {
__ DropAndRet(pop_count);
+ } else {
+ __ Ret();
}
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1290,9 +1819,19 @@
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
break;
- case Constant::kHeapObject:
- __ li(dst, src.ToHeapObject());
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ int offset;
+ if (IsMaterializableFromFrame(src_object, &offset)) {
+ __ ld(dst, MemOperand(fp, offset));
+ } else if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object);
+ }
break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
break;
@@ -1344,7 +1883,7 @@
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1393,9 +1932,9 @@
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
- MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
__ ldc1(temp_1, dst0); // Save destination in temp_1.
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
__ sw(temp_0, dst0);
@@ -1409,6 +1948,12 @@
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit MIPS we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() {
// Unused on 32-bit ARM. Still exists on 64-bit arm.
// TODO(plind): Unclear when this is called now. Understand, fix if needed.
@@ -1417,24 +1962,25 @@
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- // Block tramoline pool emission for duration of padding.
- v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= v8::internal::Assembler::kInstrSize;
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block tramoline pool emission for duration of padding.
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index dd019f9..778c6ad 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -11,75 +11,124 @@
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Mips64Add) \
- V(Mips64Dadd) \
- V(Mips64Sub) \
- V(Mips64Dsub) \
- V(Mips64Mul) \
- V(Mips64MulHigh) \
- V(Mips64MulHighU) \
- V(Mips64Dmul) \
- V(Mips64Div) \
- V(Mips64Ddiv) \
- V(Mips64DivU) \
- V(Mips64DdivU) \
- V(Mips64Mod) \
- V(Mips64Dmod) \
- V(Mips64ModU) \
- V(Mips64DmodU) \
- V(Mips64And) \
- V(Mips64Or) \
- V(Mips64Xor) \
- V(Mips64Shl) \
- V(Mips64Shr) \
- V(Mips64Sar) \
- V(Mips64Ext) \
- V(Mips64Dext) \
- V(Mips64Dshl) \
- V(Mips64Dshr) \
- V(Mips64Dsar) \
- V(Mips64Ror) \
- V(Mips64Dror) \
- V(Mips64Mov) \
- V(Mips64Tst) \
- V(Mips64Tst32) \
- V(Mips64Cmp) \
- V(Mips64Cmp32) \
- V(Mips64CmpD) \
- V(Mips64AddD) \
- V(Mips64SubD) \
- V(Mips64MulD) \
- V(Mips64DivD) \
- V(Mips64ModD) \
- V(Mips64SqrtD) \
- V(Mips64Float64Floor) \
- V(Mips64Float64Ceil) \
- V(Mips64Float64RoundTruncate) \
- V(Mips64CvtSD) \
- V(Mips64CvtDS) \
- V(Mips64TruncWD) \
- V(Mips64TruncUwD) \
- V(Mips64CvtDW) \
- V(Mips64CvtDUw) \
- V(Mips64Lb) \
- V(Mips64Lbu) \
- V(Mips64Sb) \
- V(Mips64Lh) \
- V(Mips64Lhu) \
- V(Mips64Sh) \
- V(Mips64Ld) \
- V(Mips64Lw) \
- V(Mips64Sw) \
- V(Mips64Sd) \
- V(Mips64Lwc1) \
- V(Mips64Swc1) \
- V(Mips64Ldc1) \
- V(Mips64Sdc1) \
- V(Mips64Push) \
- V(Mips64StoreToStackSlot) \
- V(Mips64StackClaim) \
- V(Mips64StoreWriteBarrier)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
+ V(Mips64Mul) \
+ V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64Or) \
+ V(Mips64Nor) \
+ V(Mips64Xor) \
+ V(Mips64Clz) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Ins) \
+ V(Mips64Dext) \
+ V(Mips64Dins) \
+ V(Mips64Dclz) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Cmp) \
+ V(Mips64CmpS) \
+ V(Mips64AddS) \
+ V(Mips64SubS) \
+ V(Mips64MulS) \
+ V(Mips64DivS) \
+ V(Mips64ModS) \
+ V(Mips64AbsS) \
+ V(Mips64SqrtS) \
+ V(Mips64MaxS) \
+ V(Mips64MinS) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64AbsD) \
+ V(Mips64SqrtD) \
+ V(Mips64MaxD) \
+ V(Mips64MinD) \
+ V(Mips64Float64RoundDown) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
+ V(Mips64TruncUwD) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUl) \
+ V(Mips64CvtDL) \
+ V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Lhu) \
+ V(Mips64Sh) \
+ V(Mips64Ld) \
+ V(Mips64Lw) \
+ V(Mips64Sw) \
+ V(Mips64Sd) \
+ V(Mips64Lwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
+ V(Mips64Float64ExtractLowWord32) \
+ V(Mips64Float64ExtractHighWord32) \
+ V(Mips64Float64InsertLowWord32) \
+ V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float64Max) \
+ V(Mips64Float64Min) \
+ V(Mips64Float32Max) \
+ V(Mips64Float32Min) \
+ V(Mips64Push) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64StackClaim)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/src/compiler/mips64/instruction-scheduler-mips64.cc b/src/compiler/mips64/instruction-scheduler-mips64.cc
new file mode 100644
index 0000000..af86a87
--- /dev/null
+++ b/src/compiler/mips64/instruction-scheduler-mips64.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNIMPLEMENTED();
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 35ad16b..1b12bd9 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -17,12 +19,12 @@
// Adds Mips-specific methods for generating InstructionOperands.
-class Mips64OperandGenerator FINAL : public OperandGenerator {
+class Mips64OperandGenerator final : public OperandGenerator {
public:
explicit Mips64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
if (CanBeImmediate(node, opcode)) {
return UseImmediate(node);
}
@@ -56,35 +58,6 @@
}
}
-
- bool CanBeImmediate(Node* node, InstructionCode opcode,
- FlagsContinuation* cont) {
- int64_t value;
- if (node->opcode() == IrOpcode::kInt32Constant)
- value = OpParameter<int32_t>(node);
- else if (node->opcode() == IrOpcode::kInt64Constant)
- value = OpParameter<int64_t>(node);
- else
- return false;
- switch (ArchOpcodeField::decode(opcode)) {
- case kMips64Cmp32:
- switch (cont->condition()) {
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- case kUnsignedLessThanOrEqual:
- case kUnsignedGreaterThan:
- // Immediate operands for unsigned 32-bit compare operations
- // should not be sign-extended.
- return is_uint15(value);
- default:
- return false;
- }
- default:
- return is_int16(value);
- }
- }
-
-
private:
bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
TRACE_UNIMPL();
@@ -123,9 +96,9 @@
InstructionCode opcode, FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
@@ -141,14 +114,13 @@
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -160,35 +132,34 @@
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kMips64Lwc1;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kMips64Ldc1;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kMips64Lbu : kMips64Lb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kMips64Lhu : kMips64Lh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kMips64Lw;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kMips64Ld;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -197,7 +168,7 @@
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
- InstructionOperand* addr_reg = g.TempRegister();
+ InstructionOperand addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
@@ -213,67 +184,180 @@
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
- Emit(kMips64StoreWriteBarrier, NULL, g.UseFixed(base, t0),
- g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kMips64Swc1;
- break;
- case kRepFloat64:
- opcode = kMips64Sdc1;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kMips64Sb;
- break;
- case kRepWord16:
- opcode = kMips64Sh;
- break;
- case kRepWord32:
- opcode = kMips64Sw;
- break;
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kMips64Sd;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ // TODO(mips): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand* addr_reg = g.TempRegister();
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
- g.TempImmediate(0), g.UseRegister(value));
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMips64Swc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMips64Sdc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kMips64Sb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMips64Sh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kMips64Sw;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kMips64Sd;
+ break;
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitWord32And(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().Value() & 0x1f;
+
+ // Ext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Ext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kMips64Ext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation32(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask.
+ Emit(kMips64Ins, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMips64And);
}
void InstructionSelector::VisitWord64And(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint64_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation64(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+
+ // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+
+ // Dext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Dext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+ Emit(kMips64Dext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint64_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation64(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros64(~mask);
+ if (shift != 0 && shift < 32 && msb + shift == 64) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask. Dins cannot insert bits
+ // past word size, so shifts smaller than 32 are covered.
+ Emit(kMips64Dins, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMips64And);
}
@@ -289,21 +373,105 @@
void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMips64Xor);
}
void InstructionSelector::VisitWord64Xor(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMips64Xor);
}
void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasValue()) {
+ uint32_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMips64Shl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMips64Shl, node);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x1f;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ Mips64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kMips64Ext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMips64Shr, node);
}
@@ -314,11 +482,67 @@
void InstructionSelector::VisitWord64Shl(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63)) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kMips64Dshl, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 63)) {
+ // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint64_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation64(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ uint64_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 64) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMips64Dshl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMips64Dshl, node);
}
void InstructionSelector::VisitWord64Shr(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x3f;
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation64(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_msb + mask_width + lsb) == 64) {
+ Mips64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
+ Emit(kMips64Dext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMips64Dshr, node);
}
@@ -333,11 +557,33 @@
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kMips64Clz, node);
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kMips64Dror, node);
}
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ VisitRR(this, kMips64Dclz, node);
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
Mips64OperandGenerator g(this);
// TODO(plind): Consider multiply & add optimization from arm port.
@@ -374,7 +620,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -383,7 +629,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
@@ -392,25 +638,32 @@
return;
}
}
- Emit(kMips64Mul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseRegister(m.right().node()));
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ // Combine untagging shifts with Dmul high.
+ Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ VisitRRR(this, kMips64Mul, node);
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64MulHigh, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ VisitRRR(this, kMips64MulHigh, node);
}
void InstructionSelector::VisitUint32MulHigh(Node* node) {
- Mips64OperandGenerator g(this);
- InstructionOperand* const dmul_operand = g.TempRegister();
- Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
- Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
- g.TempImmediate(32));
+ VisitRRR(this, kMips64MulHighU, node);
}
@@ -419,7 +672,7 @@
Int64BinopMatcher m(node);
// TODO(dusmil): Add optimization for shifts larger than 32.
if (m.right().HasValue() && m.right().Value() > 0) {
- int64_t value = m.right().Value();
+ int32_t value = static_cast<int32_t>(m.right().Value());
if (base::bits::IsPowerOfTwo32(value)) {
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -427,7 +680,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -436,7 +689,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
@@ -453,7 +706,22 @@
void InstructionSelector::VisitInt32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMips64Div, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Ddiv.
+ Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -461,7 +729,7 @@
void InstructionSelector::VisitUint32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMips64DivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -469,6 +737,21 @@
void InstructionSelector::VisitInt32Mod(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Dmod.
+ Emit(kMips64Dmod, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -485,7 +768,7 @@
void InstructionSelector::VisitInt64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
- Emit(kMips64Ddiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -493,7 +776,7 @@
void InstructionSelector::VisitUint64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
- Emit(kMips64DdivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -515,35 +798,151 @@
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64CvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMips64CvtDS, node);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64CvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMips64CvtDW, node);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64CvtDUw, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMips64CvtDUw, node);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kMips64TruncWD, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ Node* value = node->InputAt(0);
+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
+ // which does rounding and conversion to integer format.
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kFloat64RoundDown:
+ Emit(kMips64FloorWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundUp:
+ Emit(kMips64CeilWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTiesEven:
+ Emit(kMips64RoundWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTruncate:
+ Emit(kMips64TruncWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ default:
+ break;
+ }
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (CanCover(value, next)) {
+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
+ switch (next->opcode()) {
+ case IrOpcode::kFloat32RoundDown:
+ Emit(kMips64FloorWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundUp:
+ Emit(kMips64CeilWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTiesEven:
+ Emit(kMips64RoundWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTruncate:
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ default:
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ } else {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
+ VisitRR(this, kMips64TruncWD, node);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kMips64TruncUwD, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kMips64TruncUwD, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Mips64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
}
@@ -563,6 +962,24 @@
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Sar: {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kMips64Dsar, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
}
@@ -570,7 +987,75 @@
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kMips64CvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kMips64CvtSW, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ VisitRR(this, kMips64CvtSD, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kMips64TruncWD, node);
+ }
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kMips64CvtSL, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kMips64CvtDL, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kMips64CvtSUl, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kMips64CvtDUl, node);
+}
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kMips64Float64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kMips64BitcastDL, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kMips64BitcastLD, node);
+}
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kMips64AddS, node);
}
@@ -579,16 +1064,45 @@
}
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kMips64SubS, node);
+}
+
+
void InstructionSelector::VisitFloat64Sub(Node* node) {
+ Mips64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kMips64Float64RoundUp, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
VisitRRR(this, kMips64SubD, node);
}
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kMips64MulS, node);
+}
+
+
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitRRR(this, kMips64MulD, node);
}
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kMips64DivS, node);
+}
+
+
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kMips64DivD, node);
}
@@ -602,19 +1116,108 @@
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+void InstructionSelector::VisitFloat32Max(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kMips64SqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float32Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float32Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRR(this, kMips64Float64Floor, node);
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float64Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRR(this, kMips64Float64Ceil, node);
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float32Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float32Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float64Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kMips64AbsS, node);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kMips64AbsD, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kMips64SqrtS, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kMips64SqrtD, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kMips64Float32RoundDown, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kMips64Float64RoundDown, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kMips64Float32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kMips64Float64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kMips64Float32RoundTruncate, node);
}
@@ -628,95 +1231,95 @@
}
-void InstructionSelector::VisitCall(Node* node) {
- Mips64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = NULL;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
-
- int push_count = buffer.pushed_nodes.size();
- if (push_count > 0) {
- Emit(kMips64StackClaim | MiscField::encode(push_count), NULL);
- }
- int slot = buffer.pushed_nodes.size() - 1;
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- Emit(kMips64StoreToStackSlot | MiscField::encode(slot), NULL,
- g.UseRegister(*input));
- slot--;
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
- buffer.instruction_args.size(), &buffer.instruction_args.front());
-
- call_instr->MarkAsCall();
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kMips64Float32RoundTiesEven, node);
}
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kMips64Float64RoundTiesEven, node);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ Mips64OperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kCArgSlotCount;
+ for (PushParameter input : (*arguments)) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ int push_count = static_cast<int>(descriptor->StackParameterCount());
+ if (push_count > 0) {
+ Emit(kMips64StackClaim, g.NoOutput(),
+ g.TempImmediate(push_count << kPointerSizeLog2));
+ }
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
+ }
+ }
+ }
+}
+
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedLoadWord64;
+ break;
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
@@ -725,45 +1328,51 @@
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedStoreWord64;
+ break;
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
- length_operand, g.UseRegister(value), g.UseRegister(buffer));
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ offset_operand, length_operand, g.UseRegister(value),
+ g.UseRegister(buffer));
}
@@ -771,13 +1380,13 @@
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
+ InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -785,14 +1394,33 @@
}
-// Shared routine for multiple float compare operations.
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Mips64OperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
+}
+
+
+// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMips64CmpD, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
}
@@ -805,13 +1433,53 @@
Node* right = node->InputAt(1);
// Match immediates on left or right side of comparison.
- if (g.CanBeImmediate(right, opcode, cont)) {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
- cont);
- } else if (g.CanBeImmediate(left, opcode, cont)) {
+ if (g.CanBeImmediate(right, opcode)) {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ } else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
- VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
- cont);
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
cont);
@@ -821,7 +1489,7 @@
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitWordCompare(selector, node, kMips64Cmp32, cont, false);
+ VisitWordCompare(selector, node, kMips64Cmp, cont, false);
}
@@ -833,15 +1501,14 @@
} // namespace
-void EmitWordCompareZero(InstructionSelector* selector, InstructionCode opcode,
- Node* value, FlagsContinuation* cont) {
+void EmitWordCompareZero(InstructionSelector* selector, Node* value,
+ FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- InstructionOperand* const value_operand = g.UseRegister(value);
+ InstructionCode opcode = cont->Encode(kMips64Cmp);
+ InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -852,13 +1519,7 @@
// Shared routine for word comparisons against zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
- // Initially set comparison against 0 to be 64-bit variant for branches that
- // cannot combine.
- InstructionCode opcode = kMips64Cmp;
while (selector->CanCover(user, value)) {
- if (user->opcode() == IrOpcode::kWord32Equal) {
- opcode = kMips64Cmp32;
- }
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
// Combine with comparisons against 0 by simply inverting the
@@ -868,7 +1529,6 @@
user = value;
value = m.left().node();
cont->Negate();
- opcode = kMips64Cmp32;
continue;
}
cont->OverwriteAndNegateIfEqual(kEqual);
@@ -908,27 +1568,39 @@
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
- Node* node = value->InputAt(0);
- Node* result = node->FindProjection(0);
- if (result == NULL || selector->IsDefined(result)) {
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -936,6 +1608,12 @@
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64Dsub, cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64DaddOvf, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64DsubOvf, cont);
default:
break;
}
@@ -943,7 +1621,6 @@
}
break;
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kMips64Tst32, cont, true);
case IrOpcode::kWord64And:
return VisitWordCompare(selector, value, kMips64Tst, cont, true);
default:
@@ -953,7 +1630,7 @@
}
// Continuation could not be combined with a compare, emit compare against 0.
- EmitWordCompareZero(selector, opcode, value, cont);
+ EmitWordCompareZero(selector, value, cont);
}
@@ -964,6 +1641,34 @@
}
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 10 + 2 * sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kMips64Sub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -1000,7 +1705,7 @@
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMips64Dadd, &cont);
}
@@ -1010,7 +1715,7 @@
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMips64Dsub, &cont);
}
@@ -1019,6 +1724,26 @@
}
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64DaddOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64DaddOvf, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64DsubOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64DsubOvf, &cont);
+}
+
+
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int64BinopMatcher m(node);
@@ -1048,30 +1773,94 @@
}
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ VisitRR(this, kMips64Float64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ VisitRR(this, kMips64Float64ExtractHighWord32, node);
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ return MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
} // namespace compiler
diff --git a/src/compiler/mips64/linkage-mips64.cc b/src/compiler/mips64/linkage-mips64.cc
deleted file mode 100644
index 0e1a590..0000000
--- a/src/compiler/mips64/linkage-mips64.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct MipsLinkageHelperTraits {
- static Register ReturnValueReg() { return v0; }
- static Register ReturnValue2Reg() { return v1; }
- static Register JSCallFunctionReg() { return a1; }
- static Register ContextReg() { return cp; }
- static Register RuntimeCallFunctionReg() { return a1; }
- static Register RuntimeCallArgCountReg() { return a0; }
- static RegList CCalleeSaveRegisters() {
- return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
- s6.bit() | s7.bit();
- }
- static Register CRegisterParameter(int i) {
- static Register register_parameters[] = {a0, a1, a2, a3, a4, a5, a6, a7};
- return register_parameters[i];
- }
- static int CRegisterParametersLength() { return 8; }
-};
-
-
-typedef LinkageHelper<MipsLinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc
index 330f32f..bde3f7f 100644
--- a/src/compiler/move-optimizer.cc
+++ b/src/compiler/move-optimizer.cc
@@ -8,196 +8,342 @@
namespace internal {
namespace compiler {
+namespace {
+
+typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
+
+struct MoveKeyCompare {
+ bool operator()(const MoveKey& a, const MoveKey& b) const {
+ if (a.first.EqualsCanonicalized(b.first)) {
+ return a.second.CompareCanonicalized(b.second);
+ }
+ return a.first.CompareCanonicalized(b.first);
+ }
+};
+
+struct OperandCompare {
+ bool operator()(const InstructionOperand& a,
+ const InstructionOperand& b) const {
+ return a.CompareCanonicalized(b);
+ }
+};
+
+typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
+typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
+
+
+bool GapsCanMoveOver(Instruction* instr, Zone* zone) {
+ if (instr->IsNop()) return true;
+ if (instr->ClobbersTemps() || instr->ClobbersRegisters() ||
+ instr->ClobbersDoubleRegisters()) {
+ return false;
+ }
+ if (instr->arch_opcode() != ArchOpcode::kArchNop) return false;
+
+ ZoneSet<InstructionOperand, OperandCompare> operands(zone);
+ for (size_t i = 0; i < instr->InputCount(); ++i) {
+ operands.insert(*instr->InputAt(i));
+ }
+ for (size_t i = 0; i < instr->OutputCount(); ++i) {
+ operands.insert(*instr->OutputAt(i));
+ }
+ for (size_t i = 0; i < instr->TempCount(); ++i) {
+ operands.insert(*instr->TempAt(i));
+ }
+ for (int i = Instruction::GapPosition::FIRST_GAP_POSITION;
+ i <= Instruction::GapPosition::LAST_GAP_POSITION; ++i) {
+ ParallelMove* moves = instr->parallel_moves()[i];
+ if (moves == nullptr) continue;
+ for (MoveOperands* move : *moves) {
+ if (operands.count(move->source()) > 0 ||
+ operands.count(move->destination()) > 0) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+int FindFirstNonEmptySlot(const Instruction* instr) {
+ int i = Instruction::FIRST_GAP_POSITION;
+ for (; i <= Instruction::LAST_GAP_POSITION; i++) {
+ ParallelMove* moves = instr->parallel_moves()[i];
+ if (moves == nullptr) continue;
+ for (MoveOperands* move : *moves) {
+ if (!move->IsRedundant()) return i;
+ move->Eliminate();
+ }
+ moves->clear(); // Clear this redundant move.
+ }
+ return i;
+}
+
+} // namespace
+
+
MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
: local_zone_(local_zone),
code_(code),
- temp_vector_0_(local_zone),
- temp_vector_1_(local_zone) {}
+ to_finalize_(local_zone),
+ local_vector_(local_zone) {}
void MoveOptimizer::Run() {
- // First smash all consecutive moves into the left most move slot.
- for (auto* block : code()->instruction_blocks()) {
- GapInstruction* prev_gap = nullptr;
- for (int index = block->code_start(); index < block->code_end(); ++index) {
- auto instr = code()->instructions()[index];
- if (!instr->IsGapMoves()) {
- if (instr->IsSourcePosition() || instr->IsNop()) continue;
- FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap);
- prev_gap = nullptr;
- continue;
- }
- auto gap = GapInstruction::cast(instr);
- // Find first non-empty slot.
- int i = GapInstruction::FIRST_INNER_POSITION;
- for (; i <= GapInstruction::LAST_INNER_POSITION; i++) {
- auto move = gap->parallel_moves()[i];
- if (move == nullptr) continue;
- auto move_ops = move->move_operands();
- auto op = move_ops->begin();
- for (; op != move_ops->end(); ++op) {
- if (!op->IsRedundant()) break;
- }
- if (op == move_ops->end()) {
- move_ops->Rewind(0); // Clear this redundant move.
- } else {
- break; // Found index of first non-redundant move.
+ for (InstructionBlock* block : code()->instruction_blocks()) {
+ CompressBlock(block);
+ }
+ for (InstructionBlock* block : code()->instruction_blocks()) {
+ if (block->PredecessorCount() <= 1) continue;
+ if (!block->IsDeferred()) {
+ bool has_only_deferred = true;
+ for (RpoNumber& pred_id : block->predecessors()) {
+ if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) {
+ has_only_deferred = false;
+ break;
}
}
- // Nothing to do here.
- if (i == GapInstruction::LAST_INNER_POSITION + 1) {
- if (prev_gap != nullptr) {
- // Slide prev_gap down so we always know where to look for it.
- std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
- prev_gap = gap;
- }
- continue;
- }
- // Move the first non-empty gap to position 0.
- std::swap(gap->parallel_moves()[0], gap->parallel_moves()[i]);
- auto left = gap->parallel_moves()[0];
- // Compress everything into position 0.
- for (++i; i <= GapInstruction::LAST_INNER_POSITION; ++i) {
- auto move = gap->parallel_moves()[i];
- if (move == nullptr) continue;
- CompressMoves(&temp_vector_0_, left, move);
- }
- if (prev_gap != nullptr) {
- // Smash left into prev_gap, killing left.
- auto pred_moves = prev_gap->parallel_moves()[0];
- CompressMoves(&temp_vector_0_, pred_moves, left);
- std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
- }
- prev_gap = gap;
+ // This would pull down common moves. If the moves occur in deferred
+ // blocks, and the closest common successor is not deferred, we lose the
+ // optimization of just spilling/filling in deferred blocks, when the
+ // current block is not deferred.
+ if (has_only_deferred) continue;
}
- FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap);
+ OptimizeMerge(block);
+ }
+ for (Instruction* gap : to_finalize_) {
+ FinalizeMoves(gap);
}
}
-static MoveOperands* PrepareInsertAfter(ParallelMove* left, MoveOperands* move,
- Zone* zone) {
- auto move_ops = left->move_operands();
- MoveOperands* replacement = nullptr;
- MoveOperands* to_eliminate = nullptr;
- for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) {
- if (curr->IsEliminated()) continue;
- if (curr->destination()->Equals(move->source())) {
- DCHECK_EQ(nullptr, replacement);
- replacement = curr;
- if (to_eliminate != nullptr) break;
- } else if (curr->destination()->Equals(move->destination())) {
- DCHECK_EQ(nullptr, to_eliminate);
- to_eliminate = curr;
- if (replacement != nullptr) break;
- }
- }
- DCHECK(!(replacement == to_eliminate && replacement != nullptr));
- if (replacement != nullptr) {
- auto new_source = new (zone) InstructionOperand(
- replacement->source()->kind(), replacement->source()->index());
- move->set_source(new_source);
- }
- return to_eliminate;
-}
+void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) {
+ if (right == nullptr) return;
+ MoveOpVector& eliminated = local_vector();
+ DCHECK(eliminated.empty());
-void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
- ParallelMove* right) {
- DCHECK(eliminated->empty());
- auto move_ops = right->move_operands();
- // Modify the right moves in place and collect moves that will be killed by
- // merging the two gaps.
- for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
- if (op->IsRedundant()) continue;
- MoveOperands* to_eliminate = PrepareInsertAfter(left, op, code_zone());
- if (to_eliminate != nullptr) {
- eliminated->push_back(to_eliminate);
+ if (!left->empty()) {
+ // Modify the right moves in place and collect moves that will be killed by
+ // merging the two gaps.
+ for (MoveOperands* move : *right) {
+ if (move->IsRedundant()) continue;
+ MoveOperands* to_eliminate = left->PrepareInsertAfter(move);
+ if (to_eliminate != nullptr) eliminated.push_back(to_eliminate);
}
+ // Eliminate dead moves.
+ for (MoveOperands* to_eliminate : eliminated) {
+ to_eliminate->Eliminate();
+ }
+ eliminated.clear();
}
- // Eliminate dead moves. Must happen before insertion of new moves as the
- // contents of eliminated are pointers into a list.
- for (auto to_eliminate : *eliminated) {
- to_eliminate->Eliminate();
- }
- eliminated->clear();
// Add all possibly modified moves from right side.
- for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
- if (op->IsRedundant()) continue;
- left->move_operands()->Add(*op, code_zone());
+ for (MoveOperands* move : *right) {
+ if (move->IsRedundant()) continue;
+ left->push_back(move);
}
// Nuke right.
- move_ops->Rewind(0);
+ right->clear();
+ DCHECK(eliminated.empty());
}
-void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
- GapInstruction* gap) {
- DCHECK(loads->empty());
- DCHECK(new_moves->empty());
- if (gap == nullptr) return;
- // Split multiple loads of the same constant or stack slot off into the second
- // slot and keep remaining moves in the first slot.
- auto move_ops = gap->parallel_moves()[0]->move_operands();
- for (auto move = move_ops->begin(); move != move_ops->end(); ++move) {
- if (move->IsRedundant()) {
- move->Eliminate();
- continue;
+// Smash all consecutive moves into the left most move slot and accumulate them
+// as much as possible across instructions.
+void MoveOptimizer::CompressBlock(InstructionBlock* block) {
+ Instruction* prev_instr = nullptr;
+ for (int index = block->code_start(); index < block->code_end(); ++index) {
+ Instruction* instr = code()->instructions()[index];
+ int i = FindFirstNonEmptySlot(instr);
+ bool has_moves = i <= Instruction::LAST_GAP_POSITION;
+
+ if (i == Instruction::LAST_GAP_POSITION) {
+ std::swap(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+ instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
+ } else if (i == Instruction::FIRST_GAP_POSITION) {
+ CompressMoves(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+ instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
}
- if (!(move->source()->IsConstant() || move->source()->IsStackSlot() ||
- move->source()->IsDoubleStackSlot()))
- continue;
- // Search for existing move to this slot.
- MoveOperands* found = nullptr;
- for (auto load : *loads) {
- if (load->source()->Equals(move->source())) {
- found = load;
- break;
+ // We either have no moves, or, after swapping or compressing, we have
+ // all the moves in the first gap position, and none in the second/end gap
+ // position.
+ ParallelMove* first =
+ instr->parallel_moves()[Instruction::FIRST_GAP_POSITION];
+ ParallelMove* last =
+ instr->parallel_moves()[Instruction::LAST_GAP_POSITION];
+ USE(last);
+
+ DCHECK(!has_moves ||
+ (first != nullptr && (last == nullptr || last->empty())));
+
+ if (prev_instr != nullptr) {
+ if (has_moves) {
+ // Smash first into prev_instr, killing left.
+ ParallelMove* pred_moves = prev_instr->parallel_moves()[0];
+ CompressMoves(pred_moves, first);
+ }
+ // Slide prev_instr down so we always know where to look for it.
+ std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
+ }
+
+ prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
+ if (GapsCanMoveOver(instr, local_zone())) continue;
+ if (prev_instr != nullptr) {
+ to_finalize_.push_back(prev_instr);
+ prev_instr = nullptr;
+ }
+ }
+ if (prev_instr != nullptr) {
+ to_finalize_.push_back(prev_instr);
+ }
+}
+
+
+const Instruction* MoveOptimizer::LastInstruction(
+ const InstructionBlock* block) const {
+ return code()->instructions()[block->last_instruction_index()];
+}
+
+
+void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
+ DCHECK(block->PredecessorCount() > 1);
+ // Ensure that the last instruction in all incoming blocks don't contain
+ // things that would prevent moving gap moves across them.
+ for (RpoNumber& pred_index : block->predecessors()) {
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+ const Instruction* last_instr =
+ code()->instructions()[pred->last_instruction_index()];
+ if (last_instr->IsCall()) return;
+ if (last_instr->TempCount() != 0) return;
+ if (last_instr->OutputCount() != 0) return;
+ for (size_t i = 0; i < last_instr->InputCount(); ++i) {
+ const InstructionOperand* op = last_instr->InputAt(i);
+ if (!op->IsConstant() && !op->IsImmediate()) return;
+ }
+ }
+ // TODO(dcarney): pass a ZonePool down for this?
+ MoveMap move_map(local_zone());
+ size_t correct_counts = 0;
+ // Accumulate set of shared moves.
+ for (RpoNumber& pred_index : block->predecessors()) {
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+ const Instruction* instr = LastInstruction(pred);
+ if (instr->parallel_moves()[0] == nullptr ||
+ instr->parallel_moves()[0]->empty()) {
+ return;
+ }
+ for (const MoveOperands* move : *instr->parallel_moves()[0]) {
+ if (move->IsRedundant()) continue;
+ InstructionOperand src = move->source();
+ InstructionOperand dst = move->destination();
+ MoveKey key = {src, dst};
+ auto res = move_map.insert(std::make_pair(key, 1));
+ if (!res.second) {
+ res.first->second++;
+ if (res.first->second == block->PredecessorCount()) {
+ correct_counts++;
+ }
}
}
- // Not found so insert.
- if (found == nullptr) {
- loads->push_back(move);
- // Replace source with copy for later use.
- auto dest = move->destination();
- move->set_destination(new (code_zone())
- InstructionOperand(dest->kind(), dest->index()));
+ }
+ if (move_map.empty() || correct_counts != move_map.size()) return;
+ // Find insertion point.
+ Instruction* instr = nullptr;
+ for (int i = block->first_instruction_index();
+ i <= block->last_instruction_index(); ++i) {
+ instr = code()->instructions()[i];
+ if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
+ break;
+ }
+ DCHECK_NOT_NULL(instr);
+ bool gap_initialized = true;
+ if (instr->parallel_moves()[0] == nullptr ||
+ instr->parallel_moves()[0]->empty()) {
+ to_finalize_.push_back(instr);
+ } else {
+ // Will compress after insertion.
+ gap_initialized = false;
+ std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
+ }
+ ParallelMove* moves = instr->GetOrCreateParallelMove(
+ static_cast<Instruction::GapPosition>(0), code_zone());
+ // Delete relevant entries in predecessors and move everything to block.
+ bool first_iteration = true;
+ for (RpoNumber& pred_index : block->predecessors()) {
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+ for (MoveOperands* move : *LastInstruction(pred)->parallel_moves()[0]) {
+ if (move->IsRedundant()) continue;
+ MoveKey key = {move->source(), move->destination()};
+ auto it = move_map.find(key);
+ USE(it);
+ DCHECK(it != move_map.end());
+ if (first_iteration) {
+ moves->AddMove(move->source(), move->destination());
+ }
+ move->Eliminate();
+ }
+ first_iteration = false;
+ }
+ // Compress.
+ if (!gap_initialized) {
+ CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]);
+ }
+}
+
+
+namespace {
+
+bool IsSlot(const InstructionOperand& op) {
+ return op.IsStackSlot() || op.IsDoubleStackSlot();
+}
+
+
+bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
+ if (!a->source().EqualsCanonicalized(b->source())) {
+ return a->source().CompareCanonicalized(b->source());
+ }
+ if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
+ if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
+ return a->destination().CompareCanonicalized(b->destination());
+}
+
+} // namespace
+
+
+// Split multiple loads of the same constant or stack slot off into the second
+// slot and keep remaining moves in the first slot.
+void MoveOptimizer::FinalizeMoves(Instruction* instr) {
+ MoveOpVector& loads = local_vector();
+ DCHECK(loads.empty());
+
+ // Find all the loads.
+ for (MoveOperands* move : *instr->parallel_moves()[0]) {
+ if (move->IsRedundant()) continue;
+ if (move->source().IsConstant() || IsSlot(move->source())) {
+ loads.push_back(move);
+ }
+ }
+ if (loads.empty()) return;
+ // Group the loads by source, moving the preferred destination to the
+ // beginning of the group.
+ std::sort(loads.begin(), loads.end(), LoadCompare);
+ MoveOperands* group_begin = nullptr;
+ for (MoveOperands* load : loads) {
+ // New group.
+ if (group_begin == nullptr ||
+ !load->source().EqualsCanonicalized(group_begin->source())) {
+ group_begin = load;
continue;
}
- if ((found->destination()->IsStackSlot() ||
- found->destination()->IsDoubleStackSlot()) &&
- !(move->destination()->IsStackSlot() ||
- move->destination()->IsDoubleStackSlot())) {
- // Found a better source for this load. Smash it in place to affect other
- // loads that have already been split.
- InstructionOperand::Kind found_kind = found->destination()->kind();
- int found_index = found->destination()->index();
- auto next_dest =
- new (code_zone()) InstructionOperand(found_kind, found_index);
- auto dest = move->destination();
- found->destination()->ConvertTo(dest->kind(), dest->index());
- move->set_destination(next_dest);
- }
- // move from load destination.
- move->set_source(found->destination());
- new_moves->push_back(move);
+ // Nothing to be gained from splitting here.
+ if (IsSlot(group_begin->destination())) continue;
+ // Insert new move into slot 1.
+ ParallelMove* slot_1 = instr->GetOrCreateParallelMove(
+ static_cast<Instruction::GapPosition>(1), code_zone());
+ slot_1->AddMove(group_begin->destination(), load->destination());
+ load->Eliminate();
}
- loads->clear();
- if (new_moves->empty()) return;
- // Insert all new moves into slot 1.
- auto slot_1 = gap->GetOrCreateParallelMove(
- static_cast<GapInstruction::InnerPosition>(1), code_zone());
- DCHECK(slot_1->move_operands()->is_empty());
- slot_1->move_operands()->AddBlock(MoveOperands(nullptr, nullptr),
- static_cast<int>(new_moves->size()),
- code_zone());
- auto it = slot_1->move_operands()->begin();
- for (auto new_move : *new_moves) {
- std::swap(*new_move, *it);
- ++it;
- }
- DCHECK_EQ(it, slot_1->move_operands()->end());
- new_moves->clear();
+ loads.clear();
}
} // namespace compiler
diff --git a/src/compiler/move-optimizer.h b/src/compiler/move-optimizer.h
index bbce686..c9a3289 100644
--- a/src/compiler/move-optimizer.h
+++ b/src/compiler/move-optimizer.h
@@ -12,27 +12,30 @@
namespace internal {
namespace compiler {
-class MoveOptimizer FINAL {
+class MoveOptimizer final {
public:
MoveOptimizer(Zone* local_zone, InstructionSequence* code);
void Run();
private:
typedef ZoneVector<MoveOperands*> MoveOpVector;
+ typedef ZoneVector<Instruction*> Instructions;
InstructionSequence* code() const { return code_; }
Zone* local_zone() const { return local_zone_; }
Zone* code_zone() const { return code()->zone(); }
+ MoveOpVector& local_vector() { return local_vector_; }
- void CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
- ParallelMove* right);
- void FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
- GapInstruction* gap);
+ void CompressBlock(InstructionBlock* blocke);
+ void CompressMoves(ParallelMove* left, ParallelMove* right);
+ const Instruction* LastInstruction(const InstructionBlock* block) const;
+ void OptimizeMerge(InstructionBlock* block);
+ void FinalizeMoves(Instruction* instr);
Zone* const local_zone_;
InstructionSequence* const code_;
- MoveOpVector temp_vector_0_;
- MoveOpVector temp_vector_1_;
+ Instructions to_finalize_;
+ MoveOpVector local_vector_;
DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
};
diff --git a/src/compiler/node-aux-data-inl.h b/src/compiler/node-aux-data-inl.h
deleted file mode 100644
index d8db4b9..0000000
--- a/src/compiler/node-aux-data-inl.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_NODE_AUX_DATA_INL_H_
-#define V8_COMPILER_NODE_AUX_DATA_INL_H_
-
-#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
-#include "src/compiler/node-aux-data.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <class T>
-NodeAuxData<T>::NodeAuxData(Zone* zone)
- : aux_data_(zone) {}
-
-
-template <class T>
-void NodeAuxData<T>::Set(Node* node, const T& data) {
- int id = node->id();
- if (id >= static_cast<int>(aux_data_.size())) {
- aux_data_.resize(id + 1);
- }
- aux_data_[id] = data;
-}
-
-
-template <class T>
-T NodeAuxData<T>::Get(Node* node) const {
- int id = node->id();
- if (id >= static_cast<int>(aux_data_.size())) {
- return T();
- }
- return aux_data_[id];
-}
-}
-}
-} // namespace v8::internal::compiler
-
-#endif
diff --git a/src/compiler/node-aux-data.h b/src/compiler/node-aux-data.h
index a08dc58..7a88292 100644
--- a/src/compiler/node-aux-data.h
+++ b/src/compiler/node-aux-data.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_NODE_AUX_DATA_H_
#define V8_COMPILER_NODE_AUX_DATA_H_
+#include "src/compiler/node.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -12,22 +13,81 @@
namespace compiler {
// Forward declarations.
-class Graph;
class Node;
template <class T>
class NodeAuxData {
public:
- inline explicit NodeAuxData(Zone* zone);
+ explicit NodeAuxData(Zone* zone) : aux_data_(zone) {}
- inline void Set(Node* node, const T& data);
- inline T Get(Node* node) const;
+ void Set(Node* node, T const& data) {
+ size_t const id = node->id();
+ if (id >= aux_data_.size()) aux_data_.resize(id + 1);
+ aux_data_[id] = data;
+ }
+
+ T Get(Node* node) const {
+ size_t const id = node->id();
+ return (id < aux_data_.size()) ? aux_data_[id] : T();
+ }
+
+ class const_iterator;
+ friend class const_iterator;
+
+ const_iterator begin() const;
+ const_iterator end() const;
private:
ZoneVector<T> aux_data_;
};
-}
-}
-} // namespace v8::internal::compiler
-#endif
+
+template <class T>
+class NodeAuxData<T>::const_iterator {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef int difference_type;
+ typedef std::pair<size_t, T> value_type;
+ typedef value_type* pointer;
+ typedef value_type& reference;
+
+ const_iterator(const ZoneVector<T>* data, size_t current)
+ : data_(data), current_(current) {}
+ const_iterator(const const_iterator& other)
+ : data_(other.data_), current_(other.current_) {}
+
+ value_type operator*() const {
+ return std::make_pair(current_, (*data_)[current_]);
+ }
+ bool operator==(const const_iterator& other) const {
+ return current_ == other.current_ && data_ == other.data_;
+ }
+ bool operator!=(const const_iterator& other) const {
+ return !(*this == other);
+ }
+ const_iterator& operator++() {
+ ++current_;
+ return *this;
+ }
+ const_iterator operator++(int);
+
+ private:
+ const ZoneVector<T>* data_;
+ size_t current_;
+};
+
+template <class T>
+typename NodeAuxData<T>::const_iterator NodeAuxData<T>::begin() const {
+ return typename NodeAuxData<T>::const_iterator(&aux_data_, 0);
+}
+
+template <class T>
+typename NodeAuxData<T>::const_iterator NodeAuxData<T>::end() const {
+ return typename NodeAuxData<T>::const_iterator(&aux_data_, aux_data_.size());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_NODE_AUX_DATA_H_
diff --git a/src/compiler/node-cache.cc b/src/compiler/node-cache.cc
index 92a3fa0..79c342b 100644
--- a/src/compiler/node-cache.cc
+++ b/src/compiler/node-cache.cc
@@ -36,7 +36,7 @@
size_t old_size = size_ + kLinearProbe;
size_ *= 4;
size_t num_entries = size_ + kLinearProbe;
- entries_ = zone->NewArray<Entry>(static_cast<int>(num_entries));
+ entries_ = zone->NewArray<Entry>(num_entries);
memset(entries_, 0, sizeof(Entry) * num_entries);
// Insert the old entries into the new block.
@@ -66,7 +66,7 @@
if (!entries_) {
// Allocate the initial entries and insert the first entry.
size_t num_entries = kInitialSize + kLinearProbe;
- entries_ = zone->NewArray<Entry>(static_cast<int>(num_entries));
+ entries_ = zone->NewArray<Entry>(num_entries);
size_ = kInitialSize;
memset(entries_, 0, sizeof(Entry) * num_entries);
Entry* entry = &entries_[hash & (kInitialSize - 1)];
diff --git a/src/compiler/node-cache.h b/src/compiler/node-cache.h
index b123922..a8f9071 100644
--- a/src/compiler/node-cache.h
+++ b/src/compiler/node-cache.h
@@ -27,7 +27,7 @@
// nodes such as constants, parameters, etc.
template <typename Key, typename Hash = base::hash<Key>,
typename Pred = std::equal_to<Key> >
-class NodeCache FINAL {
+class NodeCache final {
public:
explicit NodeCache(unsigned max = 256)
: entries_(nullptr), size_(0), max_(max) {}
@@ -35,7 +35,8 @@
// Search for node associated with {key} and return a pointer to a memory
// location in this cache that stores an entry for the key. If the location
- // returned by this method contains a non-NULL node, the caller can use that
+ // returned by this method contains a non-nullptr node, the caller can use
+ // that
// node. Otherwise it is the responsibility of the caller to fill the entry
// with a new node.
// Note that a previous cache entry may be overwritten if the cache becomes
diff --git a/src/compiler/node-marker.cc b/src/compiler/node-marker.cc
new file mode 100644
index 0000000..fdfb22b
--- /dev/null
+++ b/src/compiler/node-marker.cc
@@ -0,0 +1,21 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-marker.h"
+
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+NodeMarkerBase::NodeMarkerBase(Graph* graph, uint32_t num_states)
+ : mark_min_(graph->mark_max_), mark_max_(graph->mark_max_ += num_states) {
+ DCHECK_NE(0u, num_states); // user error!
+ DCHECK_LT(mark_min_, mark_max_); // check for wraparound.
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/node-marker.h b/src/compiler/node-marker.h
new file mode 100644
index 0000000..5ef2063
--- /dev/null
+++ b/src/compiler/node-marker.h
@@ -0,0 +1,67 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_MARKER_H_
+#define V8_COMPILER_NODE_MARKER_H_
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+
+
+// Base class for templatized NodeMarkers.
+class NodeMarkerBase {
+ public:
+ NodeMarkerBase(Graph* graph, uint32_t num_states);
+
+ V8_INLINE Mark Get(Node* node) {
+ Mark mark = node->mark();
+ if (mark < mark_min_) {
+ mark = mark_min_;
+ node->set_mark(mark_min_);
+ }
+ DCHECK_LT(mark, mark_max_);
+ return mark - mark_min_;
+ }
+ V8_INLINE void Set(Node* node, Mark mark) {
+ DCHECK_LT(mark, mark_max_ - mark_min_);
+ DCHECK_LT(node->mark(), mark_max_);
+ node->set_mark(mark + mark_min_);
+ }
+
+ private:
+ Mark const mark_min_;
+ Mark const mark_max_;
+
+ DISALLOW_COPY_AND_ASSIGN(NodeMarkerBase);
+};
+
+
+// A NodeMarker uses monotonically increasing marks to assign local "states"
+// to nodes. Only one NodeMarker per graph is valid at a given time.
+template <typename State>
+class NodeMarker : public NodeMarkerBase {
+ public:
+ V8_INLINE NodeMarker(Graph* graph, uint32_t num_states)
+ : NodeMarkerBase(graph, num_states) {}
+
+ V8_INLINE State Get(Node* node) {
+ return static_cast<State>(NodeMarkerBase::Get(node));
+ }
+
+ V8_INLINE void Set(Node* node, State state) {
+ NodeMarkerBase::Set(node, static_cast<Mark>(state));
+ }
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_NODE_MARKER_H_
diff --git a/src/compiler/node-matchers.cc b/src/compiler/node-matchers.cc
new file mode 100644
index 0000000..1627b88
--- /dev/null
+++ b/src/compiler/node-matchers.cc
@@ -0,0 +1,60 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool NodeMatcher::IsComparison() const {
+ return IrOpcode::IsComparisonOpcode(opcode());
+}
+
+
+BranchMatcher::BranchMatcher(Node* branch)
+ : NodeMatcher(branch), if_true_(nullptr), if_false_(nullptr) {
+ if (branch->opcode() != IrOpcode::kBranch) return;
+ for (Node* use : branch->uses()) {
+ if (use->opcode() == IrOpcode::kIfTrue) {
+ DCHECK_NULL(if_true_);
+ if_true_ = use;
+ } else if (use->opcode() == IrOpcode::kIfFalse) {
+ DCHECK_NULL(if_false_);
+ if_false_ = use;
+ }
+ }
+}
+
+
+DiamondMatcher::DiamondMatcher(Node* merge)
+ : NodeMatcher(merge),
+ branch_(nullptr),
+ if_true_(nullptr),
+ if_false_(nullptr) {
+ if (merge->InputCount() != 2) return;
+ if (merge->opcode() != IrOpcode::kMerge) return;
+ Node* input0 = merge->InputAt(0);
+ if (input0->InputCount() != 1) return;
+ Node* input1 = merge->InputAt(1);
+ if (input1->InputCount() != 1) return;
+ Node* branch = input0->InputAt(0);
+ if (branch != input1->InputAt(0)) return;
+ if (branch->opcode() != IrOpcode::kBranch) return;
+ if (input0->opcode() == IrOpcode::kIfTrue &&
+ input1->opcode() == IrOpcode::kIfFalse) {
+ branch_ = branch;
+ if_true_ = input0;
+ if_false_ = input1;
+ } else if (input0->opcode() == IrOpcode::kIfFalse &&
+ input1->opcode() == IrOpcode::kIfTrue) {
+ branch_ = branch;
+ if_true_ = input1;
+ if_false_ = input0;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index fc11a0a..37d0e1a 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -7,9 +7,10 @@
#include <cmath>
+// TODO(turbofan): Move ExternalReference out of assembler.h
+#include "src/assembler.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
-#include "src/unique.h"
namespace v8 {
namespace internal {
@@ -28,6 +29,10 @@
}
Node* InputAt(int index) const { return node()->InputAt(index); }
+ bool Equals(const Node* node) const { return node_ == node; }
+
+ bool IsComparison() const;
+
#define DEFINE_IS_OPCODE(Opcode) \
bool Is##Opcode() const { return opcode() == IrOpcode::k##Opcode; }
ALL_OP_LIST(DEFINE_IS_OPCODE)
@@ -56,14 +61,6 @@
return value_;
}
- bool Is(const T& value) const {
- return this->HasValue() && this->Value() == value;
- }
-
- bool IsInRange(const T& low, const T& high) const {
- return this->HasValue() && low <= this->Value() && this->Value() <= high;
- }
-
private:
T value_;
bool has_value_;
@@ -71,6 +68,18 @@
template <>
+inline ValueMatcher<uint32_t, IrOpcode::kInt32Constant>::ValueMatcher(
+ Node* node)
+ : NodeMatcher(node),
+ value_(),
+ has_value_(opcode() == IrOpcode::kInt32Constant) {
+ if (has_value_) {
+ value_ = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ }
+}
+
+
+template <>
inline ValueMatcher<int64_t, IrOpcode::kInt64Constant>::ValueMatcher(Node* node)
: NodeMatcher(node), value_(), has_value_(false) {
if (opcode() == IrOpcode::kInt32Constant) {
@@ -88,10 +97,10 @@
Node* node)
: NodeMatcher(node), value_(), has_value_(false) {
if (opcode() == IrOpcode::kInt32Constant) {
- value_ = OpParameter<uint32_t>(node);
+ value_ = static_cast<uint32_t>(OpParameter<int32_t>(node));
has_value_ = true;
} else if (opcode() == IrOpcode::kInt64Constant) {
- value_ = OpParameter<uint64_t>(node);
+ value_ = static_cast<uint64_t>(OpParameter<int64_t>(node));
has_value_ = true;
}
}
@@ -99,9 +108,15 @@
// A pattern matcher for integer constants.
template <typename T, IrOpcode::Value kOpcode>
-struct IntMatcher FINAL : public ValueMatcher<T, kOpcode> {
+struct IntMatcher final : public ValueMatcher<T, kOpcode> {
explicit IntMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
+ bool Is(const T& value) const {
+ return this->HasValue() && this->Value() == value;
+ }
+ bool IsInRange(const T& low, const T& high) const {
+ return this->HasValue() && low <= this->Value() && this->Value() <= high;
+ }
bool IsMultipleOf(T n) const {
return this->HasValue() && (this->Value() % n) == 0;
}
@@ -130,13 +145,20 @@
// A pattern matcher for floating point constants.
template <typename T, IrOpcode::Value kOpcode>
-struct FloatMatcher FINAL : public ValueMatcher<T, kOpcode> {
+struct FloatMatcher final : public ValueMatcher<T, kOpcode> {
explicit FloatMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
+ bool Is(const T& value) const {
+ return this->HasValue() && this->Value() == value;
+ }
+ bool IsInRange(const T& low, const T& high) const {
+ return this->HasValue() && low <= this->Value() && this->Value() <= high;
+ }
bool IsMinusZero() const {
return this->Is(0.0) && std::signbit(this->Value());
}
bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
+ bool IsZero() const { return this->Is(0.0) && !std::signbit(this->Value()); }
};
typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher;
@@ -145,11 +167,39 @@
// A pattern matcher for heap object constants.
-template <typename T>
-struct HeapObjectMatcher FINAL
- : public ValueMatcher<Unique<T>, IrOpcode::kHeapConstant> {
+struct HeapObjectMatcher final
+ : public ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant> {
explicit HeapObjectMatcher(Node* node)
- : ValueMatcher<Unique<T>, IrOpcode::kHeapConstant>(node) {}
+ : ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>(node) {}
+};
+
+
+// A pattern matcher for external reference constants.
+struct ExternalReferenceMatcher final
+ : public ValueMatcher<ExternalReference, IrOpcode::kExternalConstant> {
+ explicit ExternalReferenceMatcher(Node* node)
+ : ValueMatcher<ExternalReference, IrOpcode::kExternalConstant>(node) {}
+ bool Is(const ExternalReference& value) const {
+ return this->HasValue() && this->Value() == value;
+ }
+};
+
+
+// For shorter pattern matching code, this struct matches the inputs to
+// machine-level load operations.
+template <typename Object>
+struct LoadMatcher : public NodeMatcher {
+ explicit LoadMatcher(Node* node)
+ : NodeMatcher(node), object_(InputAt(0)), index_(InputAt(1)) {}
+
+ typedef Object ObjectMatcher;
+
+ Object const& object() const { return object_; }
+ IntPtrMatcher const& index() const { return index_; }
+
+ private:
+ Object const object_;
+ IntPtrMatcher const index_;
};
@@ -200,6 +250,7 @@
typedef BinopMatcher<Uint64Matcher, Uint64Matcher> Uint64BinopMatcher;
typedef BinopMatcher<IntPtrMatcher, IntPtrMatcher> IntPtrBinopMatcher;
typedef BinopMatcher<UintPtrMatcher, UintPtrMatcher> UintPtrBinopMatcher;
+typedef BinopMatcher<Float32Matcher, Float32Matcher> Float32BinopMatcher;
typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
@@ -333,19 +384,19 @@
struct BaseWithIndexAndDisplacementMatcher {
BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
: matches_(false),
- index_(NULL),
+ index_(nullptr),
scale_(0),
- base_(NULL),
- displacement_(NULL) {
+ base_(nullptr),
+ displacement_(nullptr) {
Initialize(node, allow_input_swap);
}
explicit BaseWithIndexAndDisplacementMatcher(Node* node)
: matches_(false),
- index_(NULL),
+ index_(nullptr),
scale_(0),
- base_(NULL),
- displacement_(NULL) {
+ base_(nullptr),
+ displacement_(nullptr) {
Initialize(node, node->op()->HasProperty(Operator::kCommutative));
}
@@ -383,10 +434,10 @@
AddMatcher m(node, allow_input_swap);
Node* left = m.left().node();
Node* right = m.right().node();
- Node* displacement = NULL;
- Node* base = NULL;
- Node* index = NULL;
- Node* scale_expression = NULL;
+ Node* displacement = nullptr;
+ Node* base = nullptr;
+ Node* index = nullptr;
+ Node* scale_expression = nullptr;
bool power_of_two_plus_one = false;
int scale = 0;
if (m.HasIndexInput() && left->OwnedBy(node)) {
@@ -468,7 +519,7 @@
}
}
int64_t value = 0;
- if (displacement != NULL) {
+ if (displacement != nullptr) {
switch (displacement->opcode()) {
case IrOpcode::kInt32Constant: {
value = OpParameter<int32_t>(displacement);
@@ -483,11 +534,11 @@
break;
}
if (value == 0) {
- displacement = NULL;
+ displacement = nullptr;
}
}
if (power_of_two_plus_one) {
- if (base != NULL) {
+ if (base != nullptr) {
// If the scale requires explicitly using the index as the base, but a
// base is already part of the match, then the (1 << N + 1) scale factor
// can't be folded into the match and the entire index * scale
@@ -511,6 +562,54 @@
typedef BaseWithIndexAndDisplacementMatcher<Int64AddMatcher>
BaseWithIndexAndDisplacement64Matcher;
+struct BranchMatcher : public NodeMatcher {
+ explicit BranchMatcher(Node* branch);
+
+ bool Matched() const { return if_true_ && if_false_; }
+
+ Node* Branch() const { return node(); }
+ Node* IfTrue() const { return if_true_; }
+ Node* IfFalse() const { return if_false_; }
+
+ private:
+ Node* if_true_;
+ Node* if_false_;
+};
+
+
+struct DiamondMatcher : public NodeMatcher {
+ explicit DiamondMatcher(Node* merge);
+
+ bool Matched() const { return branch_; }
+ bool IfProjectionsAreOwned() const {
+ return if_true_->OwnedBy(node()) && if_false_->OwnedBy(node());
+ }
+
+ Node* Branch() const { return branch_; }
+ Node* IfTrue() const { return if_true_; }
+ Node* IfFalse() const { return if_false_; }
+ Node* Merge() const { return node(); }
+
+ Node* TrueInputOf(Node* phi) const {
+ DCHECK(IrOpcode::IsPhiOpcode(phi->opcode()));
+ DCHECK_EQ(3, phi->InputCount());
+ DCHECK_EQ(Merge(), phi->InputAt(2));
+ return phi->InputAt(if_true_ == Merge()->InputAt(0) ? 0 : 1);
+ }
+
+ Node* FalseInputOf(Node* phi) const {
+ DCHECK(IrOpcode::IsPhiOpcode(phi->opcode()));
+ DCHECK_EQ(3, phi->InputCount());
+ DCHECK_EQ(Merge(), phi->InputAt(2));
+ return phi->InputAt(if_true_ == Merge()->InputAt(0) ? 1 : 0);
+ }
+
+ private:
+ Node* branch_;
+ Node* if_true_;
+ Node* if_false_;
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/node-properties-inl.h b/src/compiler/node-properties-inl.h
deleted file mode 100644
index 0d29614..0000000
--- a/src/compiler/node-properties-inl.h
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_NODE_PROPERTIES_INL_H_
-#define V8_COMPILER_NODE_PROPERTIES_INL_H_
-
-#include "src/v8.h"
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
-#include "src/compiler/operator-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// -----------------------------------------------------------------------------
-// Input layout.
-// Inputs are always arranged in order as follows:
-// 0 [ values, context, effects, control ] node->InputCount()
-
-inline int NodeProperties::FirstValueIndex(Node* node) { return 0; }
-
-inline int NodeProperties::FirstContextIndex(Node* node) {
- return PastValueIndex(node);
-}
-
-inline int NodeProperties::FirstFrameStateIndex(Node* node) {
- return PastContextIndex(node);
-}
-
-inline int NodeProperties::FirstEffectIndex(Node* node) {
- return PastFrameStateIndex(node);
-}
-
-inline int NodeProperties::FirstControlIndex(Node* node) {
- return PastEffectIndex(node);
-}
-
-
-inline int NodeProperties::PastValueIndex(Node* node) {
- return FirstValueIndex(node) + node->op()->ValueInputCount();
-}
-
-inline int NodeProperties::PastContextIndex(Node* node) {
- return FirstContextIndex(node) +
- OperatorProperties::GetContextInputCount(node->op());
-}
-
-inline int NodeProperties::PastFrameStateIndex(Node* node) {
- return FirstFrameStateIndex(node) +
- OperatorProperties::GetFrameStateInputCount(node->op());
-}
-
-inline int NodeProperties::PastEffectIndex(Node* node) {
- return FirstEffectIndex(node) + node->op()->EffectInputCount();
-}
-
-inline int NodeProperties::PastControlIndex(Node* node) {
- return FirstControlIndex(node) + node->op()->ControlInputCount();
-}
-
-
-// -----------------------------------------------------------------------------
-// Input accessors.
-
-inline Node* NodeProperties::GetValueInput(Node* node, int index) {
- DCHECK(0 <= index && index < node->op()->ValueInputCount());
- return node->InputAt(FirstValueIndex(node) + index);
-}
-
-inline Node* NodeProperties::GetContextInput(Node* node) {
- DCHECK(OperatorProperties::HasContextInput(node->op()));
- return node->InputAt(FirstContextIndex(node));
-}
-
-inline Node* NodeProperties::GetFrameStateInput(Node* node) {
- DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
- return node->InputAt(FirstFrameStateIndex(node));
-}
-
-inline Node* NodeProperties::GetEffectInput(Node* node, int index) {
- DCHECK(0 <= index && index < node->op()->EffectInputCount());
- return node->InputAt(FirstEffectIndex(node) + index);
-}
-
-inline Node* NodeProperties::GetControlInput(Node* node, int index) {
- DCHECK(0 <= index && index < node->op()->ControlInputCount());
- return node->InputAt(FirstControlIndex(node) + index);
-}
-
-inline int NodeProperties::GetFrameStateIndex(Node* node) {
- DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
- return FirstFrameStateIndex(node);
-}
-
-// -----------------------------------------------------------------------------
-// Edge kinds.
-
-inline bool NodeProperties::IsInputRange(Edge edge, int first, int num) {
- // TODO(titzer): edge.index() is linear time;
- // edges maybe need to be marked as value/effect/control.
- if (num == 0) return false;
- int index = edge.index();
- return first <= index && index < first + num;
-}
-
-inline bool NodeProperties::IsValueEdge(Edge edge) {
- Node* node = edge.from();
- return IsInputRange(edge, FirstValueIndex(node),
- node->op()->ValueInputCount());
-}
-
-inline bool NodeProperties::IsContextEdge(Edge edge) {
- Node* node = edge.from();
- return IsInputRange(edge, FirstContextIndex(node),
- OperatorProperties::GetContextInputCount(node->op()));
-}
-
-inline bool NodeProperties::IsEffectEdge(Edge edge) {
- Node* node = edge.from();
- return IsInputRange(edge, FirstEffectIndex(node),
- node->op()->EffectInputCount());
-}
-
-inline bool NodeProperties::IsControlEdge(Edge edge) {
- Node* node = edge.from();
- return IsInputRange(edge, FirstControlIndex(node),
- node->op()->ControlInputCount());
-}
-
-
-// -----------------------------------------------------------------------------
-// Miscellaneous predicates.
-
-inline bool NodeProperties::IsControl(Node* node) {
- return IrOpcode::IsControlOpcode(node->opcode());
-}
-
-
-// -----------------------------------------------------------------------------
-// Miscellaneous mutators.
-
-inline void NodeProperties::ReplaceControlInput(Node* node, Node* control) {
- node->ReplaceInput(FirstControlIndex(node), control);
-}
-
-inline void NodeProperties::ReplaceEffectInput(Node* node, Node* effect,
- int index) {
- DCHECK(index < node->op()->EffectInputCount());
- return node->ReplaceInput(FirstEffectIndex(node) + index, effect);
-}
-
-inline void NodeProperties::ReplaceFrameStateInput(Node* node,
- Node* frame_state) {
- DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
- node->ReplaceInput(FirstFrameStateIndex(node), frame_state);
-}
-
-inline void NodeProperties::RemoveNonValueInputs(Node* node) {
- node->TrimInputCount(node->op()->ValueInputCount());
-}
-
-
-// Replace value uses of {node} with {value} and effect uses of {node} with
-// {effect}. If {effect == NULL}, then use the effect input to {node}.
-inline void NodeProperties::ReplaceWithValue(Node* node, Node* value,
- Node* effect) {
- DCHECK(node->op()->ControlOutputCount() == 0);
- if (effect == NULL && node->op()->EffectInputCount() > 0) {
- effect = NodeProperties::GetEffectInput(node);
- }
-
- // Requires distinguishing between value and effect edges.
- for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge)) {
- DCHECK_NE(NULL, effect);
- edge.UpdateTo(effect);
- } else {
- edge.UpdateTo(value);
- }
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Type Bounds.
-
-inline bool NodeProperties::IsTyped(Node* node) {
- Bounds bounds = node->bounds();
- DCHECK((bounds.lower == NULL) == (bounds.upper == NULL));
- return bounds.upper != NULL;
-}
-
-inline Bounds NodeProperties::GetBounds(Node* node) {
- DCHECK(IsTyped(node));
- return node->bounds();
-}
-
-inline void NodeProperties::RemoveBounds(Node* node) {
- Bounds empty;
- node->set_bounds(empty);
-}
-
-inline void NodeProperties::SetBounds(Node* node, Bounds b) {
- DCHECK(b.lower != NULL && b.upper != NULL);
- node->set_bounds(b);
-}
-
-inline bool NodeProperties::AllValueInputsAreTyped(Node* node) {
- int input_count = node->op()->ValueInputCount();
- for (int i = 0; i < input_count; ++i) {
- if (!IsTyped(GetValueInput(node, i))) return false;
- }
- return true;
-}
-
-
-}
-}
-} // namespace v8::internal::compiler
-
-#endif // V8_COMPILER_NODE_PROPERTIES_INL_H_
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
new file mode 100644
index 0000000..cb6c3c4
--- /dev/null
+++ b/src/compiler/node-properties.cc
@@ -0,0 +1,416 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/verifier.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// static
+int NodeProperties::PastValueIndex(Node* node) {
+ return FirstValueIndex(node) + node->op()->ValueInputCount();
+}
+
+
+// static
+int NodeProperties::PastContextIndex(Node* node) {
+ return FirstContextIndex(node) +
+ OperatorProperties::GetContextInputCount(node->op());
+}
+
+
+// static
+int NodeProperties::PastFrameStateIndex(Node* node) {
+ return FirstFrameStateIndex(node) +
+ OperatorProperties::GetFrameStateInputCount(node->op());
+}
+
+
+// static
+int NodeProperties::PastEffectIndex(Node* node) {
+ return FirstEffectIndex(node) + node->op()->EffectInputCount();
+}
+
+
+// static
+int NodeProperties::PastControlIndex(Node* node) {
+ return FirstControlIndex(node) + node->op()->ControlInputCount();
+}
+
+
+// static
+Node* NodeProperties::GetValueInput(Node* node, int index) {
+ DCHECK(0 <= index && index < node->op()->ValueInputCount());
+ return node->InputAt(FirstValueIndex(node) + index);
+}
+
+
+// static
+Node* NodeProperties::GetContextInput(Node* node) {
+ DCHECK(OperatorProperties::HasContextInput(node->op()));
+ return node->InputAt(FirstContextIndex(node));
+}
+
+
+// static
+Node* NodeProperties::GetFrameStateInput(Node* node, int index) {
+ DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
+ return node->InputAt(FirstFrameStateIndex(node) + index);
+}
+
+
+// static
+Node* NodeProperties::GetEffectInput(Node* node, int index) {
+ DCHECK(0 <= index && index < node->op()->EffectInputCount());
+ return node->InputAt(FirstEffectIndex(node) + index);
+}
+
+
+// static
+Node* NodeProperties::GetControlInput(Node* node, int index) {
+ DCHECK(0 <= index && index < node->op()->ControlInputCount());
+ return node->InputAt(FirstControlIndex(node) + index);
+}
+
+
+// static
+bool NodeProperties::IsValueEdge(Edge edge) {
+ Node* const node = edge.from();
+ return IsInputRange(edge, FirstValueIndex(node),
+ node->op()->ValueInputCount());
+}
+
+
+// static
+bool NodeProperties::IsContextEdge(Edge edge) {
+ Node* const node = edge.from();
+ return IsInputRange(edge, FirstContextIndex(node),
+ OperatorProperties::GetContextInputCount(node->op()));
+}
+
+
+// static
+bool NodeProperties::IsFrameStateEdge(Edge edge) {
+ Node* const node = edge.from();
+ return IsInputRange(edge, FirstFrameStateIndex(node),
+ OperatorProperties::GetFrameStateInputCount(node->op()));
+}
+
+
+// static
+bool NodeProperties::IsEffectEdge(Edge edge) {
+ Node* const node = edge.from();
+ return IsInputRange(edge, FirstEffectIndex(node),
+ node->op()->EffectInputCount());
+}
+
+
+// static
+bool NodeProperties::IsControlEdge(Edge edge) {
+ Node* const node = edge.from();
+ return IsInputRange(edge, FirstControlIndex(node),
+ node->op()->ControlInputCount());
+}
+
+
+// static
+bool NodeProperties::IsExceptionalCall(Node* node) {
+ for (Edge const edge : node->use_edges()) {
+ if (!NodeProperties::IsControlEdge(edge)) continue;
+ if (edge.from()->opcode() == IrOpcode::kIfException) return true;
+ }
+ return false;
+}
+
+
+// static
+void NodeProperties::ReplaceValueInput(Node* node, Node* value, int index) {
+ DCHECK(index < node->op()->ValueInputCount());
+ node->ReplaceInput(FirstValueIndex(node) + index, value);
+}
+
+
+// static
+void NodeProperties::ReplaceValueInputs(Node* node, Node* value) {
+ int value_input_count = node->op()->ValueInputCount();
+ DCHECK_LE(1, value_input_count);
+ node->ReplaceInput(0, value);
+ while (--value_input_count > 0) {
+ node->RemoveInput(value_input_count);
+ }
+}
+
+
+// static
+void NodeProperties::ReplaceContextInput(Node* node, Node* context) {
+ node->ReplaceInput(FirstContextIndex(node), context);
+}
+
+
+// static
+void NodeProperties::ReplaceControlInput(Node* node, Node* control) {
+ node->ReplaceInput(FirstControlIndex(node), control);
+}
+
+
+// static
+void NodeProperties::ReplaceEffectInput(Node* node, Node* effect, int index) {
+ DCHECK(index < node->op()->EffectInputCount());
+ return node->ReplaceInput(FirstEffectIndex(node) + index, effect);
+}
+
+
+// static
+void NodeProperties::ReplaceFrameStateInput(Node* node, int index,
+ Node* frame_state) {
+ DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
+ node->ReplaceInput(FirstFrameStateIndex(node) + index, frame_state);
+}
+
+
+// static
+void NodeProperties::RemoveFrameStateInput(Node* node, int index) {
+ DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
+ node->RemoveInput(FirstFrameStateIndex(node) + index);
+}
+
+
+// static
+void NodeProperties::RemoveNonValueInputs(Node* node) {
+ node->TrimInputCount(node->op()->ValueInputCount());
+}
+
+
+// static
+void NodeProperties::RemoveValueInputs(Node* node) {
+ int value_input_count = node->op()->ValueInputCount();
+ while (--value_input_count >= 0) {
+ node->RemoveInput(value_input_count);
+ }
+}
+
+
+void NodeProperties::MergeControlToEnd(Graph* graph,
+ CommonOperatorBuilder* common,
+ Node* node) {
+ graph->end()->AppendInput(graph->zone(), node);
+ graph->end()->set_op(common->End(graph->end()->InputCount()));
+}
+
+
+// static
+void NodeProperties::ReplaceUses(Node* node, Node* value, Node* effect,
+ Node* success, Node* exception) {
+ // Requires distinguishing between value, effect and control edges.
+ for (Edge edge : node->use_edges()) {
+ if (IsControlEdge(edge)) {
+ if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ DCHECK_NOT_NULL(success);
+ edge.UpdateTo(success);
+ } else if (edge.from()->opcode() == IrOpcode::kIfException) {
+ DCHECK_NOT_NULL(exception);
+ edge.UpdateTo(exception);
+ } else {
+ UNREACHABLE();
+ }
+ } else if (IsEffectEdge(edge)) {
+ DCHECK_NOT_NULL(effect);
+ edge.UpdateTo(effect);
+ } else {
+ DCHECK_NOT_NULL(value);
+ edge.UpdateTo(value);
+ }
+ }
+}
+
+
+// static
+void NodeProperties::ChangeOp(Node* node, const Operator* new_op) {
+ node->set_op(new_op);
+ Verifier::VerifyNode(node);
+}
+
+
+// static
+Node* NodeProperties::FindProjection(Node* node, size_t projection_index) {
+ for (auto use : node->uses()) {
+ if (use->opcode() == IrOpcode::kProjection &&
+ ProjectionIndexOf(use->op()) == projection_index) {
+ return use;
+ }
+ }
+ return nullptr;
+}
+
+
+// static
+void NodeProperties::CollectControlProjections(Node* node, Node** projections,
+ size_t projection_count) {
+#ifdef DEBUG
+ DCHECK_LE(static_cast<int>(projection_count), node->UseCount());
+ std::memset(projections, 0, sizeof(*projections) * projection_count);
+#endif
+ size_t if_value_index = 0;
+ for (Edge const edge : node->use_edges()) {
+ if (!IsControlEdge(edge)) continue;
+ Node* use = edge.from();
+ size_t index;
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ index = 0;
+ break;
+ case IrOpcode::kIfFalse:
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ index = 1;
+ break;
+ case IrOpcode::kIfSuccess:
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
+ index = 0;
+ break;
+ case IrOpcode::kIfException:
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
+ index = 1;
+ break;
+ case IrOpcode::kIfValue:
+ DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
+ index = if_value_index++;
+ break;
+ case IrOpcode::kIfDefault:
+ DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
+ index = projection_count - 1;
+ break;
+ default:
+ continue;
+ }
+ DCHECK_LT(if_value_index, projection_count);
+ DCHECK_LT(index, projection_count);
+ DCHECK_NULL(projections[index]);
+ projections[index] = use;
+ }
+#ifdef DEBUG
+ for (size_t index = 0; index < projection_count; ++index) {
+ DCHECK_NOT_NULL(projections[index]);
+ }
+#endif
+}
+
+
+// static
+MaybeHandle<Context> NodeProperties::GetSpecializationContext(
+ Node* node, MaybeHandle<Context> context) {
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant:
+ return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
+ case IrOpcode::kParameter: {
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const index = ParameterIndexOf(node->op());
+ // The context is always the last parameter to a JavaScript function, and
+ // {Parameter} indices start at -1, so value outputs of {Start} look like
+ // this: closure, receiver, param0, ..., paramN, context.
+ if (index == start->op()->ValueOutputCount() - 2) {
+ return context;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return MaybeHandle<Context>();
+}
+
+
+// static
+MaybeHandle<Context> NodeProperties::GetSpecializationNativeContext(
+ Node* node, MaybeHandle<Context> native_context) {
+ while (true) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCreateBlockContext:
+ case IrOpcode::kJSCreateCatchContext:
+ case IrOpcode::kJSCreateFunctionContext:
+ case IrOpcode::kJSCreateModuleContext:
+ case IrOpcode::kJSCreateScriptContext:
+ case IrOpcode::kJSCreateWithContext: {
+ // Skip over the intermediate contexts, we're only interested in the
+ // very last context in the context chain anyway.
+ node = NodeProperties::GetContextInput(node);
+ break;
+ }
+ case IrOpcode::kHeapConstant: {
+ // Extract the native context from the actual {context}.
+ Handle<Context> context =
+ Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
+ return handle(context->native_context());
+ }
+ case IrOpcode::kOsrValue: {
+ int const index = OpParameter<int>(node);
+ if (index == Linkage::kOsrContextSpillSlotIndex) {
+ return native_context;
+ }
+ return MaybeHandle<Context>();
+ }
+ case IrOpcode::kParameter: {
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const index = ParameterIndexOf(node->op());
+ // The context is always the last parameter to a JavaScript function,
+ // and {Parameter} indices start at -1, so value outputs of {Start}
+ // look like this: closure, receiver, param0, ..., paramN, context.
+ if (index == start->op()->ValueOutputCount() - 2) {
+ return native_context;
+ }
+ return MaybeHandle<Context>();
+ }
+ default:
+ return MaybeHandle<Context>();
+ }
+ }
+}
+
+
+// static
+MaybeHandle<JSGlobalObject> NodeProperties::GetSpecializationGlobalObject(
+ Node* node, MaybeHandle<Context> native_context) {
+ Handle<Context> context;
+ if (GetSpecializationNativeContext(node, native_context).ToHandle(&context)) {
+ return handle(context->global_object());
+ }
+ return MaybeHandle<JSGlobalObject>();
+}
+
+
+// static
+Type* NodeProperties::GetTypeOrAny(Node* node) {
+ return IsTyped(node) ? node->type() : Type::Any();
+}
+
+
+// static
+bool NodeProperties::AllValueInputsAreTyped(Node* node) {
+ int input_count = node->op()->ValueInputCount();
+ for (int index = 0; index < input_count; ++index) {
+ if (!IsTyped(GetValueInput(node, index))) return false;
+ }
+ return true;
+}
+
+
+// static
+bool NodeProperties::IsInputRange(Edge edge, int first, int num) {
+ if (num == 0) return false;
+ int const index = edge.index();
+ return first <= index && index < first + num;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index 025be78..58005a7 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -12,51 +12,150 @@
namespace internal {
namespace compiler {
+class Graph;
class Operator;
+class CommonOperatorBuilder;
// A facade that simplifies access to the different kinds of inputs to a node.
-class NodeProperties {
+class NodeProperties final {
public:
- static inline Node* GetValueInput(Node* node, int index);
- static inline Node* GetContextInput(Node* node);
- static inline Node* GetFrameStateInput(Node* node);
- static inline Node* GetEffectInput(Node* node, int index = 0);
- static inline Node* GetControlInput(Node* node, int index = 0);
+ // ---------------------------------------------------------------------------
+ // Input layout.
+ // Inputs are always arranged in order as follows:
+ // 0 [ values, context, frame state, effects, control ] node->InputCount()
- static inline int GetFrameStateIndex(Node* node);
+ static int FirstValueIndex(Node* node) { return 0; }
+ static int FirstContextIndex(Node* node) { return PastValueIndex(node); }
+ static int FirstFrameStateIndex(Node* node) { return PastContextIndex(node); }
+ static int FirstEffectIndex(Node* node) { return PastFrameStateIndex(node); }
+ static int FirstControlIndex(Node* node) { return PastEffectIndex(node); }
+ static int PastValueIndex(Node* node);
+ static int PastContextIndex(Node* node);
+ static int PastFrameStateIndex(Node* node);
+ static int PastEffectIndex(Node* node);
+ static int PastControlIndex(Node* node);
- static inline bool IsValueEdge(Edge edge);
- static inline bool IsContextEdge(Edge edge);
- static inline bool IsEffectEdge(Edge edge);
- static inline bool IsControlEdge(Edge edge);
- static inline bool IsControl(Node* node);
+ // ---------------------------------------------------------------------------
+ // Input accessors.
- static inline void ReplaceControlInput(Node* node, Node* control);
- static inline void ReplaceEffectInput(Node* node, Node* effect,
- int index = 0);
- static inline void ReplaceFrameStateInput(Node* node, Node* frame_state);
- static inline void RemoveNonValueInputs(Node* node);
- static inline void ReplaceWithValue(Node* node, Node* value,
- Node* effect = NULL);
+ static Node* GetValueInput(Node* node, int index);
+ static Node* GetContextInput(Node* node);
+ static Node* GetFrameStateInput(Node* node, int index);
+ static Node* GetEffectInput(Node* node, int index = 0);
+ static Node* GetControlInput(Node* node, int index = 0);
- static inline bool IsTyped(Node* node);
- static inline Bounds GetBounds(Node* node);
- static inline void SetBounds(Node* node, Bounds bounds);
- static inline void RemoveBounds(Node* node);
- static inline bool AllValueInputsAreTyped(Node* node);
- static inline int FirstValueIndex(Node* node);
- static inline int FirstContextIndex(Node* node);
- static inline int FirstFrameStateIndex(Node* node);
- static inline int FirstEffectIndex(Node* node);
- static inline int FirstControlIndex(Node* node);
- static inline int PastValueIndex(Node* node);
- static inline int PastContextIndex(Node* node);
- static inline int PastFrameStateIndex(Node* node);
- static inline int PastEffectIndex(Node* node);
- static inline int PastControlIndex(Node* node);
+ // ---------------------------------------------------------------------------
+ // Edge kinds.
+ static bool IsValueEdge(Edge edge);
+ static bool IsContextEdge(Edge edge);
+ static bool IsFrameStateEdge(Edge edge);
+ static bool IsEffectEdge(Edge edge);
+ static bool IsControlEdge(Edge edge);
+
+
+ // ---------------------------------------------------------------------------
+ // Miscellaneous predicates.
+
+ static bool IsCommon(Node* node) {
+ return IrOpcode::IsCommonOpcode(node->opcode());
+ }
+ static bool IsControl(Node* node) {
+ return IrOpcode::IsControlOpcode(node->opcode());
+ }
+ static bool IsConstant(Node* node) {
+ return IrOpcode::IsConstantOpcode(node->opcode());
+ }
+ static bool IsPhi(Node* node) {
+ return IrOpcode::IsPhiOpcode(node->opcode());
+ }
+
+ // Determines whether exceptions thrown by the given node are handled locally
+ // within the graph (i.e. an IfException projection is present).
+ static bool IsExceptionalCall(Node* node);
+
+ // ---------------------------------------------------------------------------
+ // Miscellaneous mutators.
+
+ static void ReplaceValueInput(Node* node, Node* value, int index);
+ static void ReplaceContextInput(Node* node, Node* context);
+ static void ReplaceControlInput(Node* node, Node* control);
+ static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
+ static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
+ static void RemoveFrameStateInput(Node* node, int index);
+ static void RemoveNonValueInputs(Node* node);
+ static void RemoveValueInputs(Node* node);
+
+ // Replaces all value inputs of {node} with the single input {value}.
+ static void ReplaceValueInputs(Node* node, Node* value);
+
+ // Merge the control node {node} into the end of the graph, introducing a
+ // merge node or expanding an existing merge node if necessary.
+ static void MergeControlToEnd(Graph* graph, CommonOperatorBuilder* common,
+ Node* node);
+
+ // Replace all uses of {node} with the given replacement nodes. All occurring
+ // use kinds need to be replaced, {nullptr} is only valid if a use kind is
+ // guaranteed not to exist.
+ static void ReplaceUses(Node* node, Node* value, Node* effect = nullptr,
+ Node* success = nullptr, Node* exception = nullptr);
+
+ // Safe wrapper to mutate the operator of a node. Checks that the node is
+ // currently in a state that satisfies constraints of the new operator.
+ static void ChangeOp(Node* node, const Operator* new_op);
+
+ // ---------------------------------------------------------------------------
+ // Miscellaneous utilities.
+
+ static Node* FindProjection(Node* node, size_t projection_index);
+
+ // Collect the branch-related projections from a node, such as IfTrue,
+ // IfFalse, IfSuccess, IfException, IfValue and IfDefault.
+ // - Branch: [ IfTrue, IfFalse ]
+ // - Call : [ IfSuccess, IfException ]
+ // - Switch: [ IfValue, ..., IfDefault ]
+ static void CollectControlProjections(Node* node, Node** proj, size_t count);
+
+ // ---------------------------------------------------------------------------
+ // Context.
+
+ // Try to retrieve the specialization context from the given {node},
+ // optionally utilizing the knowledge about the (outermost) function
+ // {context}.
+ static MaybeHandle<Context> GetSpecializationContext(
+ Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
+
+ // Try to retrieve the specialization native context from the given
+ // {node}, optionally utilizing the knowledge about the (outermost)
+ // {native_context}.
+ static MaybeHandle<Context> GetSpecializationNativeContext(
+ Node* node, MaybeHandle<Context> native_context = MaybeHandle<Context>());
+
+ // Try to retrieve the specialization global object from the given
+ // {node}, optionally utilizing the knowledge about the (outermost)
+ // {native_context}.
+ static MaybeHandle<JSGlobalObject> GetSpecializationGlobalObject(
+ Node* node, MaybeHandle<Context> native_context = MaybeHandle<Context>());
+
+ // ---------------------------------------------------------------------------
+ // Type.
+
+ static bool IsTyped(Node* node) { return node->type() != nullptr; }
+ static Type* GetType(Node* node) {
+ DCHECK(IsTyped(node));
+ return node->type();
+ }
+ static Type* GetTypeOrAny(Node* node);
+ static void SetType(Node* node, Type* type) {
+ DCHECK_NOT_NULL(type);
+ node->set_type(type);
+ }
+ static void RemoveType(Node* node) { node->set_type(nullptr); }
+ static bool AllValueInputsAreTyped(Node* node);
+
+ private:
static inline bool IsInputRange(Edge edge, int first, int count);
};
diff --git a/src/compiler/node.cc b/src/compiler/node.cc
index 8f44c24..198c353 100644
--- a/src/compiler/node.cc
+++ b/src/compiler/node.cc
@@ -4,83 +4,235 @@
#include "src/compiler/node.h"
-#include "src/compiler/graph.h"
-#include "src/zone.h"
-
namespace v8 {
namespace internal {
namespace compiler {
-Node::Node(NodeId id, int input_count, int reserved_input_count)
- : id_(id),
- bit_field_(InputCountField::encode(input_count) |
- ReservedInputCountField::encode(reserved_input_count) |
- HasAppendableInputsField::encode(false)),
- first_use_(nullptr),
- last_use_(nullptr) {
- inputs_.static_ = reinterpret_cast<Input*>(this + 1);
+Node::OutOfLineInputs* Node::OutOfLineInputs::New(Zone* zone, int capacity) {
+ size_t size =
+ sizeof(OutOfLineInputs) + capacity * (sizeof(Node*) + sizeof(Use));
+ intptr_t raw_buffer = reinterpret_cast<intptr_t>(zone->New(size));
+ Node::OutOfLineInputs* outline =
+ reinterpret_cast<OutOfLineInputs*>(raw_buffer + capacity * sizeof(Use));
+ outline->capacity_ = capacity;
+ outline->count_ = 0;
+ return outline;
}
-Node* Node::New(Graph* graph, int input_count, Node** inputs,
- bool has_extensible_inputs) {
- size_t node_size = sizeof(Node);
- int reserve_input_count = has_extensible_inputs ? kDefaultReservedInputs : 0;
- size_t inputs_size = (input_count + reserve_input_count) * sizeof(Input);
- size_t uses_size = input_count * sizeof(Use);
- int size = static_cast<int>(node_size + inputs_size + uses_size);
- Zone* zone = graph->zone();
- void* buffer = zone->New(size);
- Node* result =
- new (buffer) Node(graph->NextNodeID(), input_count, reserve_input_count);
- Input* input =
- reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
- Use* use =
- reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
+void Node::OutOfLineInputs::ExtractFrom(Use* old_use_ptr, Node** old_input_ptr,
+ int count) {
+ // Extract the inputs from the old use and input pointers and copy them
+ // to this out-of-line-storage.
+ Use* new_use_ptr = reinterpret_cast<Use*>(this) - 1;
+ Node** new_input_ptr = inputs_;
+ for (int current = 0; current < count; current++) {
+ new_use_ptr->bit_field_ =
+ Use::InputIndexField::encode(current) | Use::InlineField::encode(false);
+ DCHECK_EQ(old_input_ptr, old_use_ptr->input_ptr());
+ DCHECK_EQ(new_input_ptr, new_use_ptr->input_ptr());
+ Node* old_to = *old_input_ptr;
+ if (old_to) {
+ *old_input_ptr = nullptr;
+ old_to->RemoveUse(old_use_ptr);
+ *new_input_ptr = old_to;
+ old_to->AppendUse(new_use_ptr);
+ } else {
+ *new_input_ptr = nullptr;
+ }
+ old_input_ptr++;
+ new_input_ptr++;
+ old_use_ptr--;
+ new_use_ptr--;
+ }
+ this->count_ = count;
+}
+
+Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
+ Node* const* inputs, bool has_extensible_inputs) {
+ Node** input_ptr;
+ Use* use_ptr;
+ Node* node;
+ bool is_inline;
+
+#if DEBUG
+ // Verify that none of the inputs are {nullptr}.
+ for (int i = 0; i < input_count; i++) {
+ if (inputs[i] == nullptr) {
+ V8_Fatal(__FILE__, __LINE__, "Node::New() Error: #%d:%s[%d] is nullptr",
+ static_cast<int>(id), op->mnemonic(), i);
+ }
+ }
+#endif
+
+ if (input_count > kMaxInlineCapacity) {
+ // Allocate out-of-line inputs.
+ int capacity =
+ has_extensible_inputs ? input_count + kMaxInlineCapacity : input_count;
+ OutOfLineInputs* outline = OutOfLineInputs::New(zone, capacity);
+
+ // Allocate node.
+ void* node_buffer = zone->New(sizeof(Node));
+ node = new (node_buffer) Node(id, op, kOutlineMarker, 0);
+ node->inputs_.outline_ = outline;
+
+ outline->node_ = node;
+ outline->count_ = input_count;
+
+ input_ptr = outline->inputs_;
+ use_ptr = reinterpret_cast<Use*>(outline);
+ is_inline = false;
+ } else {
+ // Allocate node with inline inputs.
+ int capacity = input_count;
+ if (has_extensible_inputs) {
+ const int max = kMaxInlineCapacity;
+ capacity = std::min(input_count + 3, max);
+ }
+
+ size_t size = sizeof(Node) + capacity * (sizeof(Node*) + sizeof(Use));
+ intptr_t raw_buffer = reinterpret_cast<intptr_t>(zone->New(size));
+ void* node_buffer =
+ reinterpret_cast<void*>(raw_buffer + capacity * sizeof(Use));
+
+ node = new (node_buffer) Node(id, op, input_count, capacity);
+ input_ptr = node->inputs_.inline_;
+ use_ptr = reinterpret_cast<Use*>(node);
+ is_inline = true;
+ }
+
+ // Initialize the input pointers and the uses.
for (int current = 0; current < input_count; ++current) {
Node* to = *inputs++;
- input->to = to;
- input->use = use;
- use->input_index = current;
- use->from = result;
+ input_ptr[current] = to;
+ Use* use = use_ptr - 1 - current;
+ use->bit_field_ = Use::InputIndexField::encode(current) |
+ Use::InlineField::encode(is_inline);
to->AppendUse(use);
- ++use;
- ++input;
}
- return result;
+ node->Verify();
+ return node;
+}
+
+
+Node* Node::Clone(Zone* zone, NodeId id, const Node* node) {
+ int const input_count = node->InputCount();
+ Node* const* const inputs = node->has_inline_inputs()
+ ? node->inputs_.inline_
+ : node->inputs_.outline_->inputs_;
+ Node* const clone = New(zone, id, node->op(), input_count, inputs, false);
+ clone->set_type(node->type());
+ return clone;
}
void Node::Kill() {
DCHECK_NOT_NULL(op());
- RemoveAllInputs();
+ NullAllInputs();
DCHECK(uses().empty());
}
-void Node::CollectProjections(NodeVector* projections) {
- for (size_t i = 0; i < projections->size(); i++) {
- (*projections)[i] = NULL;
+void Node::AppendInput(Zone* zone, Node* new_to) {
+ DCHECK_NOT_NULL(zone);
+ DCHECK_NOT_NULL(new_to);
+
+ int inline_count = InlineCountField::decode(bit_field_);
+ int inline_capacity = InlineCapacityField::decode(bit_field_);
+ if (inline_count < inline_capacity) {
+ // Append inline input.
+ bit_field_ = InlineCountField::update(bit_field_, inline_count + 1);
+ *GetInputPtr(inline_count) = new_to;
+ Use* use = GetUsePtr(inline_count);
+ use->bit_field_ = Use::InputIndexField::encode(inline_count) |
+ Use::InlineField::encode(true);
+ new_to->AppendUse(use);
+ } else {
+ // Append out-of-line input.
+ int input_count = InputCount();
+ OutOfLineInputs* outline = nullptr;
+ if (inline_count != kOutlineMarker) {
+ // switch to out of line inputs.
+ outline = OutOfLineInputs::New(zone, input_count * 2 + 3);
+ outline->node_ = this;
+ outline->ExtractFrom(GetUsePtr(0), GetInputPtr(0), input_count);
+ bit_field_ = InlineCountField::update(bit_field_, kOutlineMarker);
+ inputs_.outline_ = outline;
+ } else {
+ // use current out of line inputs.
+ outline = inputs_.outline_;
+ if (input_count >= outline->capacity_) {
+ // out of space in out-of-line inputs.
+ outline = OutOfLineInputs::New(zone, input_count * 2 + 3);
+ outline->node_ = this;
+ outline->ExtractFrom(GetUsePtr(0), GetInputPtr(0), input_count);
+ inputs_.outline_ = outline;
+ }
+ }
+ outline->count_++;
+ *GetInputPtr(input_count) = new_to;
+ Use* use = GetUsePtr(input_count);
+ use->bit_field_ = Use::InputIndexField::encode(input_count) |
+ Use::InlineField::encode(false);
+ new_to->AppendUse(use);
}
- for (UseIter i = uses().begin(); i != uses().end(); ++i) {
- if ((*i)->opcode() != IrOpcode::kProjection) continue;
- size_t index = OpParameter<size_t>(*i);
- DCHECK_LT(index, projections->size());
- DCHECK_EQ(NULL, (*projections)[index]);
- (*projections)[index] = *i;
- }
+ Verify();
}
-Node* Node::FindProjection(size_t projection_index) {
- for (UseIter i = uses().begin(); i != uses().end(); ++i) {
- if ((*i)->opcode() == IrOpcode::kProjection &&
- OpParameter<size_t>(*i) == projection_index) {
- return *i;
- }
+void Node::InsertInput(Zone* zone, int index, Node* new_to) {
+ DCHECK_NOT_NULL(zone);
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, InputCount());
+ AppendInput(zone, InputAt(InputCount() - 1));
+ for (int i = InputCount() - 1; i > index; --i) {
+ ReplaceInput(i, InputAt(i - 1));
}
- return NULL;
+ ReplaceInput(index, new_to);
+ Verify();
+}
+
+
+void Node::RemoveInput(int index) {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, InputCount());
+ for (; index < InputCount() - 1; ++index) {
+ ReplaceInput(index, InputAt(index + 1));
+ }
+ TrimInputCount(InputCount() - 1);
+ Verify();
+}
+
+
+void Node::ClearInputs(int start, int count) {
+ Node** input_ptr = GetInputPtr(start);
+ Use* use_ptr = GetUsePtr(start);
+ while (count-- > 0) {
+ DCHECK_EQ(input_ptr, use_ptr->input_ptr());
+ Node* input = *input_ptr;
+ *input_ptr = nullptr;
+ if (input) input->RemoveUse(use_ptr);
+ input_ptr++;
+ use_ptr--;
+ }
+ Verify();
+}
+
+
+void Node::NullAllInputs() { ClearInputs(0, InputCount()); }
+
+
+void Node::TrimInputCount(int new_input_count) {
+ int current_count = InputCount();
+ DCHECK_LE(new_input_count, current_count);
+ if (new_input_count == current_count) return; // Nothing to do.
+ ClearInputs(new_input_count, current_count - new_input_count);
+ if (has_inline_inputs()) {
+ bit_field_ = InlineCountField::update(bit_field_, new_input_count);
+ } else {
+ inputs_.outline_->count_ = new_input_count;
+ }
}
@@ -93,17 +245,124 @@
}
-Node* Node::UseAt(int index) const {
- DCHECK_LE(0, index);
- DCHECK_LT(index, UseCount());
- Use* current = first_use_;
- while (index-- != 0) {
- current = current->next;
+void Node::ReplaceUses(Node* that) {
+ DCHECK(this->first_use_ == nullptr || this->first_use_->prev == nullptr);
+ DCHECK(that->first_use_ == nullptr || that->first_use_->prev == nullptr);
+
+ // Update the pointers to {this} to point to {that}.
+ Use* last_use = nullptr;
+ for (Use* use = this->first_use_; use; use = use->next) {
+ *use->input_ptr() = that;
+ last_use = use;
}
- return current->from;
+ if (last_use) {
+ // Concat the use list of {this} and {that}.
+ last_use->next = that->first_use_;
+ if (that->first_use_) that->first_use_->prev = last_use;
+ that->first_use_ = this->first_use_;
+ }
+ first_use_ = nullptr;
}
+bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
+ unsigned mask = 0;
+ for (Use* use = first_use_; use; use = use->next) {
+ Node* from = use->from();
+ if (from == owner1) {
+ mask |= 1;
+ } else if (from == owner2) {
+ mask |= 2;
+ } else {
+ return false;
+ }
+ }
+ return mask == 3;
+}
+
+
+void Node::Print() const {
+ OFStream os(stdout);
+ os << *this << std::endl;
+}
+
+
+Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
+ : op_(op),
+ type_(nullptr),
+ mark_(0),
+ bit_field_(IdField::encode(id) | InlineCountField::encode(inline_count) |
+ InlineCapacityField::encode(inline_capacity)),
+ first_use_(nullptr) {
+ // Inputs must either be out of line or within the inline capacity.
+ DCHECK(inline_capacity <= kMaxInlineCapacity);
+ DCHECK(inline_count == kOutlineMarker || inline_count <= inline_capacity);
+}
+
+
+void Node::AppendUse(Use* use) {
+ DCHECK(first_use_ == nullptr || first_use_->prev == nullptr);
+ DCHECK_EQ(this, *use->input_ptr());
+ use->next = first_use_;
+ use->prev = nullptr;
+ if (first_use_) first_use_->prev = use;
+ first_use_ = use;
+}
+
+
+void Node::RemoveUse(Use* use) {
+ DCHECK(first_use_ == nullptr || first_use_->prev == nullptr);
+ if (use->prev) {
+ DCHECK_NE(first_use_, use);
+ use->prev->next = use->next;
+ } else {
+ DCHECK_EQ(first_use_, use);
+ first_use_ = use->next;
+ }
+ if (use->next) {
+ use->next->prev = use->prev;
+ }
+}
+
+
+#if DEBUG
+void Node::Verify() {
+ // Check basic sanity of input data structures.
+ fflush(stdout);
+ int count = this->InputCount();
+ // Avoid quadratic explosion for mega nodes; only verify if the input
+ // count is less than 200 or is a round number of 100s.
+ if (count > 200 && count % 100) return;
+
+ for (int i = 0; i < count; i++) {
+ CHECK_EQ(i, this->GetUsePtr(i)->input_index());
+ CHECK_EQ(this->GetInputPtr(i), this->GetUsePtr(i)->input_ptr());
+ CHECK_EQ(count, this->InputCount());
+ }
+ { // Direct input iteration.
+ int index = 0;
+ for (Node* input : this->inputs()) {
+ CHECK_EQ(this->InputAt(index), input);
+ index++;
+ }
+ CHECK_EQ(count, index);
+ CHECK_EQ(this->InputCount(), index);
+ }
+ { // Input edge iteration.
+ int index = 0;
+ for (Edge edge : this->input_edges()) {
+ CHECK_EQ(edge.from(), this);
+ CHECK_EQ(index, edge.index());
+ CHECK_EQ(this->InputAt(index), edge.to());
+ index++;
+ }
+ CHECK_EQ(count, index);
+ CHECK_EQ(this->InputCount(), index);
+ }
+}
+#endif
+
+
std::ostream& operator<<(std::ostream& os, const Node& n) {
os << n.id() << ": " << *n.op();
if (n.InputCount() > 0) {
@@ -117,6 +376,46 @@
return os;
}
+
+Node::InputEdges::iterator Node::InputEdges::iterator::operator++(int n) {
+ iterator result(*this);
+ ++(*this);
+ return result;
+}
+
+
+bool Node::InputEdges::empty() const { return begin() == end(); }
+
+
+Node::Inputs::const_iterator Node::Inputs::const_iterator::operator++(int n) {
+ const_iterator result(*this);
+ ++(*this);
+ return result;
+}
+
+
+bool Node::Inputs::empty() const { return begin() == end(); }
+
+
+Node::UseEdges::iterator Node::UseEdges::iterator::operator++(int n) {
+ iterator result(*this);
+ ++(*this);
+ return result;
+}
+
+
+bool Node::UseEdges::empty() const { return begin() == end(); }
+
+
+Node::Uses::const_iterator Node::Uses::const_iterator::operator++(int n) {
+ const_iterator result(*this);
+ ++(*this);
+ return result;
+}
+
+
+bool Node::Uses::empty() const { return begin() == end(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/node.h b/src/compiler/node.h
index 2295b7b..c73482f 100644
--- a/src/compiler/node.h
+++ b/src/compiler/node.h
@@ -5,17 +5,9 @@
#ifndef V8_COMPILER_NODE_H_
#define V8_COMPILER_NODE_H_
-#include <deque>
-#include <set>
-#include <vector>
-
-#include "src/v8.h"
-
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/types.h"
-#include "src/zone.h"
-#include "src/zone-allocator.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -32,9 +24,11 @@
// {NodeMarker} has a range of values that indicate states of a node.
typedef uint32_t Mark;
+
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
-typedef int NodeId;
+typedef uint32_t NodeId;
+
// A Node is the basic primitive of graphs. Nodes are chained together by
// input/use chains but by default otherwise contain only an identifying number
@@ -45,50 +39,82 @@
// compilation, e.g. during lowering passes. Other information that needs to be
// associated with Nodes during compilation must be stored out-of-line indexed
// by the Node's id.
-class Node FINAL {
+class Node final {
public:
- void Initialize(const Operator* op) {
- set_op(op);
- set_mark(0);
- }
+ static Node* New(Zone* zone, NodeId id, const Operator* op, int input_count,
+ Node* const* inputs, bool has_extensible_inputs);
+ static Node* Clone(Zone* zone, NodeId id, const Node* node);
- bool IsDead() const { return InputCount() > 0 && InputAt(0) == NULL; }
+ bool IsDead() const { return InputCount() > 0 && !InputAt(0); }
void Kill();
- void CollectProjections(ZoneVector<Node*>* projections);
- Node* FindProjection(size_t projection_index);
-
const Operator* op() const { return op_; }
- void set_op(const Operator* op) { op_ = op; }
IrOpcode::Value opcode() const {
DCHECK(op_->opcode() <= IrOpcode::kLast);
return static_cast<IrOpcode::Value>(op_->opcode());
}
- NodeId id() const { return id_; }
+ NodeId id() const { return IdField::decode(bit_field_); }
- int InputCount() const { return input_count(); }
- Node* InputAt(int index) const { return GetInputRecordPtr(index)->to; }
- inline void ReplaceInput(int index, Node* new_input);
- inline void AppendInput(Zone* zone, Node* new_input);
- inline void InsertInput(Zone* zone, int index, Node* new_input);
- inline void RemoveInput(int index);
+ int InputCount() const {
+ return has_inline_inputs() ? InlineCountField::decode(bit_field_)
+ : inputs_.outline_->count_;
+ }
+
+#if DEBUG
+ void Verify();
+#define BOUNDS_CHECK(index) \
+ do { \
+ if (index < 0 || index >= InputCount()) { \
+ V8_Fatal(__FILE__, __LINE__, "Node #%d:%s->InputAt(%d) out of bounds", \
+ id(), op()->mnemonic(), index); \
+ } \
+ } while (false)
+#else
+ // No bounds checks or verification in release mode.
+ inline void Verify() {}
+#define BOUNDS_CHECK(index) \
+ do { \
+ } while (false)
+#endif
+
+ Node* InputAt(int index) const {
+ BOUNDS_CHECK(index);
+ return *GetInputPtrConst(index);
+ }
+
+ void ReplaceInput(int index, Node* new_to) {
+ BOUNDS_CHECK(index);
+ Node** input_ptr = GetInputPtr(index);
+ Node* old_to = *input_ptr;
+ if (old_to != new_to) {
+ Use* use = GetUsePtr(index);
+ if (old_to) old_to->RemoveUse(use);
+ *input_ptr = new_to;
+ if (new_to) new_to->AppendUse(use);
+ }
+ }
+
+#undef BOUNDS_CHECK
+
+ void AppendInput(Zone* zone, Node* new_to);
+ void InsertInput(Zone* zone, int index, Node* new_to);
+ void RemoveInput(int index);
+ void NullAllInputs();
+ void TrimInputCount(int new_input_count);
int UseCount() const;
- Node* UseAt(int index) const;
- inline void ReplaceUses(Node* replace_to);
- template <class UnaryPredicate>
- inline void ReplaceUsesIf(UnaryPredicate pred, Node* replace_to);
- inline void RemoveAllInputs();
+ void ReplaceUses(Node* replace_to);
- inline void TrimInputCount(int input_count);
-
- class InputEdges {
+ class InputEdges final {
public:
+ typedef Edge value_type;
+
class iterator;
- iterator begin() const;
- iterator end() const;
+ inline iterator begin() const;
+ inline iterator end() const;
+
bool empty() const;
explicit InputEdges(Node* node) : node_(node) {}
@@ -97,11 +123,16 @@
Node* node_;
};
- class Inputs {
+ InputEdges input_edges() { return InputEdges(this); }
+
+ class Inputs final {
public:
- class iterator;
- iterator begin() const;
- iterator end() const;
+ typedef Node* value_type;
+
+ class const_iterator;
+ inline const_iterator begin() const;
+ inline const_iterator end() const;
+
bool empty() const;
explicit Inputs(Node* node) : node_(node) {}
@@ -111,13 +142,15 @@
};
Inputs inputs() { return Inputs(this); }
- InputEdges input_edges() { return InputEdges(this); }
- class UseEdges {
+ class UseEdges final {
public:
+ typedef Edge value_type;
+
class iterator;
- iterator begin() const;
- iterator end() const;
+ inline iterator begin() const;
+ inline iterator end() const;
+
bool empty() const;
explicit UseEdges(Node* node) : node_(node) {}
@@ -126,11 +159,16 @@
Node* node_;
};
- class Uses {
+ UseEdges use_edges() { return UseEdges(this); }
+
+ class Uses final {
public:
- class iterator;
- iterator begin() const;
- iterator end() const;
+ typedef Node* value_type;
+
+ class const_iterator;
+ inline const_iterator begin() const;
+ inline const_iterator end() const;
+
bool empty() const;
explicit Uses(Node* node) : node_(node) {}
@@ -140,324 +178,164 @@
};
Uses uses() { return Uses(this); }
- UseEdges use_edges() { return UseEdges(this); }
- bool OwnedBy(Node* owner) const;
-
- static Node* New(Graph* graph, int input_count, Node** inputs,
- bool has_extensible_inputs);
-
- protected:
- friend class Graph;
- friend class Edge;
-
- class Use : public ZoneObject {
- public:
- Node* from;
- Use* next;
- Use* prev;
- int input_index;
- };
-
- class Input {
- public:
- Node* to;
- Use* use;
-
- void Update(Node* new_to);
- };
-
- void EnsureAppendableInputs(Zone* zone);
-
- Input* GetInputRecordPtr(int index) const {
- if (has_appendable_inputs()) {
- return &((*inputs_.appendable_)[index]);
- } else {
- return &inputs_.static_[index];
- }
+ // Returns true if {owner} is the user of {this} node.
+ bool OwnedBy(Node* owner) const {
+ return first_use_ && first_use_->from() == owner && !first_use_->next;
}
- inline void AppendUse(Use* use);
- inline void RemoveUse(Use* use);
+ // Returns true if {owner1} and {owner2} are the only users of {this} node.
+ bool OwnedBy(Node const* owner1, Node const* owner2) const;
+ void Print() const;
+
+ private:
+ struct Use;
+ // Out of line storage for inputs when the number of inputs overflowed the
+ // capacity of the inline-allocated space.
+ struct OutOfLineInputs {
+ Node* node_;
+ int count_;
+ int capacity_;
+ Node* inputs_[1];
+
+ static OutOfLineInputs* New(Zone* zone, int capacity);
+ void ExtractFrom(Use* use_ptr, Node** input_ptr, int count);
+ };
+
+ // A link in the use chain for a node. Every input {i} to a node {n} has an
+ // associated {Use} which is linked into the use chain of the {i} node.
+ struct Use {
+ Use* next;
+ Use* prev;
+ uint32_t bit_field_;
+
+ int input_index() const { return InputIndexField::decode(bit_field_); }
+ bool is_inline_use() const { return InlineField::decode(bit_field_); }
+ Node** input_ptr() {
+ int index = input_index();
+ Use* start = this + 1 + index;
+ Node** inputs = is_inline_use()
+ ? reinterpret_cast<Node*>(start)->inputs_.inline_
+ : reinterpret_cast<OutOfLineInputs*>(start)->inputs_;
+ return &inputs[index];
+ }
+
+ Node* from() {
+ Use* start = this + 1 + input_index();
+ return is_inline_use() ? reinterpret_cast<Node*>(start)
+ : reinterpret_cast<OutOfLineInputs*>(start)->node_;
+ }
+
+ typedef BitField<bool, 0, 1> InlineField;
+ typedef BitField<unsigned, 1, 17> InputIndexField;
+ // Leaving some space in the bitset in case we ever decide to record
+ // the output index.
+ };
+
+ //============================================================================
+ //== Memory layout ===========================================================
+ //============================================================================
+ // Saving space for big graphs is important. We use a memory layout trick to
+ // be able to map {Node} objects to {Use} objects and vice-versa in a
+ // space-efficient manner.
+ //
+ // {Use} links are laid out in memory directly before a {Node}, followed by
+ // direct pointers to input {Nodes}.
+ //
+ // inline case:
+ // |Use #N |Use #N-1|...|Use #1 |Use #0 |Node xxxx |I#0|I#1|...|I#N-1|I#N|
+ // ^ ^ ^
+ // + Use + Node + Input
+ //
+ // Since every {Use} instance records its {input_index}, pointer arithmetic
+ // can compute the {Node}.
+ //
+ // out-of-line case:
+ // |Node xxxx |
+ // ^ + outline ------------------+
+ // +----------------------------------------+
+ // | |
+ // v | node
+ // |Use #N |Use #N-1|...|Use #1 |Use #0 |OOL xxxxx |I#0|I#1|...|I#N-1|I#N|
+ // ^ ^
+ // + Use + Input
+ //
+ // Out-of-line storage of input lists is needed if appending an input to
+ // a node exceeds the maximum inline capacity.
+
+ Node(NodeId id, const Operator* op, int inline_count, int inline_capacity);
+
+ Node* const* GetInputPtrConst(int input_index) const {
+ return has_inline_inputs() ? &(inputs_.inline_[input_index])
+ : &inputs_.outline_->inputs_[input_index];
+ }
+ Node** GetInputPtr(int input_index) {
+ return has_inline_inputs() ? &(inputs_.inline_[input_index])
+ : &inputs_.outline_->inputs_[input_index];
+ }
+ Use* GetUsePtr(int input_index) {
+ Use* ptr = has_inline_inputs() ? reinterpret_cast<Use*>(this)
+ : reinterpret_cast<Use*>(inputs_.outline_);
+ return &ptr[-1 - input_index];
+ }
+
+ void AppendUse(Use* use);
+ void RemoveUse(Use* use);
void* operator new(size_t, void* location) { return location; }
- private:
- inline Node(NodeId id, int input_count, int reserve_input_count);
+ // Only NodeProperties should manipulate the op.
+ void set_op(const Operator* op) { op_ = op; }
- typedef ZoneDeque<Input> InputDeque;
-
- friend class NodeProperties;
- template <typename State>
- friend class NodeMarker;
-
- // Only NodeProperties should manipulate the bounds.
- Bounds bounds() { return bounds_; }
- void set_bounds(Bounds b) { bounds_ = b; }
+ // Only NodeProperties should manipulate the type.
+ Type* type() const { return type_; }
+ void set_type(Type* type) { type_ = type; }
// Only NodeMarkers should manipulate the marks on nodes.
Mark mark() { return mark_; }
void set_mark(Mark mark) { mark_ = mark; }
- int input_count() const { return InputCountField::decode(bit_field_); }
- void set_input_count(int input_count) {
- DCHECK_LE(0, input_count);
- bit_field_ = InputCountField::update(bit_field_, input_count);
+ inline bool has_inline_inputs() const {
+ return InlineCountField::decode(bit_field_) != kOutlineMarker;
}
- int reserved_input_count() const {
- return ReservedInputCountField::decode(bit_field_);
- }
- void set_reserved_input_count(int reserved_input_count) {
- DCHECK_LE(0, reserved_input_count);
- bit_field_ =
- ReservedInputCountField::update(bit_field_, reserved_input_count);
- }
+ void ClearInputs(int start, int count);
- bool has_appendable_inputs() const {
- return HasAppendableInputsField::decode(bit_field_);
- }
- void set_has_appendable_inputs(bool has_appendable_inputs) {
- bit_field_ =
- HasAppendableInputsField::update(bit_field_, has_appendable_inputs);
- }
-
- typedef BitField<unsigned, 0, 29> InputCountField;
- typedef BitField<unsigned, 29, 2> ReservedInputCountField;
- typedef BitField<unsigned, 31, 1> HasAppendableInputsField;
- static const int kDefaultReservedInputs = ReservedInputCountField::kMax;
+ typedef BitField<NodeId, 0, 24> IdField;
+ typedef BitField<unsigned, 24, 4> InlineCountField;
+ typedef BitField<unsigned, 28, 4> InlineCapacityField;
+ static const int kOutlineMarker = InlineCountField::kMax;
+ static const int kMaxInlineCount = InlineCountField::kMax - 1;
+ static const int kMaxInlineCapacity = InlineCapacityField::kMax - 1;
const Operator* op_;
- Bounds bounds_;
+ Type* type_;
Mark mark_;
- NodeId id_;
- unsigned bit_field_;
- union {
- // When a node is initially allocated, it uses a static buffer to hold its
- // inputs under the assumption that the number of outputs will not increase.
- // When the first input is appended, the static buffer is converted into a
- // deque to allow for space-efficient growing.
- Input* static_;
- InputDeque* appendable_;
- } inputs_;
+ uint32_t bit_field_;
Use* first_use_;
- Use* last_use_;
+ union {
+ // Inline storage for inputs or out-of-line storage.
+ Node* inline_[1];
+ OutOfLineInputs* outline_;
+ } inputs_;
+
+ friend class Edge;
+ friend class NodeMarkerBase;
+ friend class NodeProperties;
DISALLOW_COPY_AND_ASSIGN(Node);
};
-// An encapsulation for information associated with a single use of node as a
-// input from another node, allowing access to both the defining node and
-// the node having the input.
-class Edge {
- public:
- Node* from() const { return input_->use->from; }
- Node* to() const { return input_->to; }
- int index() const {
- int index = input_->use->input_index;
- DCHECK(index < input_->use->from->input_count());
- return index;
- }
-
- bool operator==(const Edge& other) { return input_ == other.input_; }
- bool operator!=(const Edge& other) { return !(*this == other); }
-
- void UpdateTo(Node* new_to) { input_->Update(new_to); }
-
- private:
- friend class Node::Uses::iterator;
- friend class Node::Inputs::iterator;
- friend class Node::UseEdges::iterator;
- friend class Node::InputEdges::iterator;
-
- explicit Edge(Node::Input* input) : input_(input) {}
-
- Node::Input* input_;
-};
-
-
-// A forward iterator to visit the edges for the input dependencies of a node..
-class Node::InputEdges::iterator {
- public:
- typedef std::forward_iterator_tag iterator_category;
- typedef int difference_type;
- typedef Edge value_type;
- typedef Edge* pointer;
- typedef Edge& reference;
- iterator(const Node::InputEdges::iterator& other) // NOLINT
- : input_(other.input_) {}
- iterator() : input_(NULL) {}
-
- Edge operator*() const { return Edge(input_); }
- bool operator==(const iterator& other) const { return Equals(other); }
- bool operator!=(const iterator& other) const { return !Equals(other); }
- iterator& operator++() {
- DCHECK(input_ != NULL);
- Edge edge(input_);
- Node* from = edge.from();
- SetInput(from, input_->use->input_index + 1);
- return *this;
- }
- iterator operator++(int) {
- iterator result(*this);
- ++(*this);
- return result;
- }
-
- private:
- friend class Node;
-
- explicit iterator(Node* from, int index = 0) : input_(NULL) {
- SetInput(from, index);
- }
-
- bool Equals(const iterator& other) const { return other.input_ == input_; }
- void SetInput(Node* from, int index) {
- DCHECK(index >= 0 && index <= from->InputCount());
- if (index < from->InputCount()) {
- input_ = from->GetInputRecordPtr(index);
- } else {
- input_ = NULL;
- }
- }
-
- Input* input_;
-};
-
-
-// A forward iterator to visit the inputs of a node.
-class Node::Inputs::iterator {
- public:
- typedef std::forward_iterator_tag iterator_category;
- typedef int difference_type;
- typedef Node* value_type;
- typedef Node** pointer;
- typedef Node*& reference;
-
- iterator(const Node::Inputs::iterator& other) // NOLINT
- : iter_(other.iter_) {}
-
- Node* operator*() const { return (*iter_).to(); }
- bool operator==(const iterator& other) const { return Equals(other); }
- bool operator!=(const iterator& other) const { return !Equals(other); }
- iterator& operator++() {
- ++iter_;
- return *this;
- }
- iterator operator++(int) {
- iterator result(*this);
- ++(*this);
- return result;
- }
-
-
- private:
- friend class Node::Inputs;
-
- explicit iterator(Node* node, int index) : iter_(node, index) {}
-
- bool Equals(const iterator& other) const { return other.iter_ == iter_; }
-
- Node::InputEdges::iterator iter_;
-};
-
-// A forward iterator to visit the uses edges of a node. The edges are returned
-// in
-// the order in which they were added as inputs.
-class Node::UseEdges::iterator {
- public:
- iterator(const Node::UseEdges::iterator& other) // NOLINT
- : current_(other.current_),
- next_(other.next_) {}
-
- Edge operator*() const { return Edge(CurrentInput()); }
-
- bool operator==(const iterator& other) { return Equals(other); }
- bool operator!=(const iterator& other) { return !Equals(other); }
- iterator& operator++() {
- DCHECK(current_ != NULL);
- current_ = next_;
- next_ = (current_ == NULL) ? NULL : current_->next;
- return *this;
- }
- iterator operator++(int) {
- iterator result(*this);
- ++(*this);
- return result;
- }
-
- private:
- friend class Node::UseEdges;
-
- iterator() : current_(NULL), next_(NULL) {}
- explicit iterator(Node* node)
- : current_(node->first_use_),
- next_(current_ == NULL ? NULL : current_->next) {}
-
- bool Equals(const iterator& other) const {
- return other.current_ == current_;
- }
-
- Input* CurrentInput() const {
- return current_->from->GetInputRecordPtr(current_->input_index);
- }
-
- Node::Use* current_;
- Node::Use* next_;
-};
-
-
-// A forward iterator to visit the uses of a node. The uses are returned in
-// the order in which they were added as inputs.
-class Node::Uses::iterator {
- public:
- iterator(const Node::Uses::iterator& other) // NOLINT
- : current_(other.current_) {}
-
- Node* operator*() { return current_->from; }
-
- bool operator==(const iterator& other) { return other.current_ == current_; }
- bool operator!=(const iterator& other) { return other.current_ != current_; }
- iterator& operator++() {
- DCHECK(current_ != NULL);
- current_ = current_->next;
- return *this;
- }
-
- private:
- friend class Node::Uses;
-
- iterator() : current_(NULL) {}
- explicit iterator(Node* node) : current_(node->first_use_) {}
-
- Input* CurrentInput() const {
- return current_->from->GetInputRecordPtr(current_->input_index);
- }
-
- Node::Use* current_;
-};
-
-
std::ostream& operator<<(std::ostream& os, const Node& n);
-typedef std::set<Node*, std::less<Node*>, zone_allocator<Node*> > NodeSet;
-typedef NodeSet::iterator NodeSetIter;
-typedef NodeSet::reverse_iterator NodeSetRIter;
+// Typedefs to shorten commonly used Node containers.
typedef ZoneDeque<Node*> NodeDeque;
-
+typedef ZoneSet<Node*> NodeSet;
typedef ZoneVector<Node*> NodeVector;
-typedef NodeVector::iterator NodeVectorIter;
-typedef NodeVector::const_iterator NodeVectorConstIter;
-typedef NodeVector::reverse_iterator NodeVectorRIter;
-
typedef ZoneVector<NodeVector> NodeVectorVector;
-typedef NodeVectorVector::iterator NodeVectorVectorIter;
-typedef NodeVectorVector::reverse_iterator NodeVectorVectorRIter;
-typedef Node::Uses::iterator UseIter;
-typedef Node::Inputs::iterator InputIter;
// Helper to extract parameters from Operator1<*> nodes.
template <typename T>
@@ -465,194 +343,219 @@
return OpParameter<T>(node->op());
}
-inline Node::InputEdges::iterator Node::InputEdges::begin() const {
+
+// An encapsulation for information associated with a single use of node as a
+// input from another node, allowing access to both the defining node and
+// the node having the input.
+class Edge final {
+ public:
+ Node* from() const { return use_->from(); }
+ Node* to() const { return *input_ptr_; }
+ int index() const {
+ int const index = use_->input_index();
+ DCHECK_LT(index, use_->from()->InputCount());
+ return index;
+ }
+
+ bool operator==(const Edge& other) { return input_ptr_ == other.input_ptr_; }
+ bool operator!=(const Edge& other) { return !(*this == other); }
+
+ void UpdateTo(Node* new_to) {
+ Node* old_to = *input_ptr_;
+ if (old_to != new_to) {
+ if (old_to) old_to->RemoveUse(use_);
+ *input_ptr_ = new_to;
+ if (new_to) new_to->AppendUse(use_);
+ }
+ }
+
+ private:
+ friend class Node::UseEdges::iterator;
+ friend class Node::InputEdges::iterator;
+
+ Edge(Node::Use* use, Node** input_ptr) : use_(use), input_ptr_(input_ptr) {
+ DCHECK_NOT_NULL(use);
+ DCHECK_NOT_NULL(input_ptr);
+ DCHECK_EQ(input_ptr, use->input_ptr());
+ }
+
+ Node::Use* use_;
+ Node** input_ptr_;
+};
+
+
+// A forward iterator to visit the edges for the input dependencies of a node.
+class Node::InputEdges::iterator final {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef int difference_type;
+ typedef Edge value_type;
+ typedef Edge* pointer;
+ typedef Edge& reference;
+
+ iterator() : use_(nullptr), input_ptr_(nullptr) {}
+ iterator(const iterator& other)
+ : use_(other.use_), input_ptr_(other.input_ptr_) {}
+
+ Edge operator*() const { return Edge(use_, input_ptr_); }
+ bool operator==(const iterator& other) const {
+ return input_ptr_ == other.input_ptr_;
+ }
+ bool operator!=(const iterator& other) const { return !(*this == other); }
+ iterator& operator++() {
+ input_ptr_++;
+ use_--;
+ return *this;
+ }
+ iterator operator++(int);
+
+ private:
+ friend class Node;
+
+ explicit iterator(Node* from, int index = 0)
+ : use_(from->GetUsePtr(index)), input_ptr_(from->GetInputPtr(index)) {}
+
+ Use* use_;
+ Node** input_ptr_;
+};
+
+
+Node::InputEdges::iterator Node::InputEdges::begin() const {
return Node::InputEdges::iterator(this->node_, 0);
}
-inline Node::InputEdges::iterator Node::InputEdges::end() const {
+
+Node::InputEdges::iterator Node::InputEdges::end() const {
return Node::InputEdges::iterator(this->node_, this->node_->InputCount());
}
-inline Node::Inputs::iterator Node::Inputs::begin() const {
- return Node::Inputs::iterator(this->node_, 0);
+
+// A forward iterator to visit the inputs of a node.
+class Node::Inputs::const_iterator final {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef int difference_type;
+ typedef Node* value_type;
+ typedef Node** pointer;
+ typedef Node*& reference;
+
+ const_iterator(const const_iterator& other) : iter_(other.iter_) {}
+
+ Node* operator*() const { return (*iter_).to(); }
+ bool operator==(const const_iterator& other) const {
+ return iter_ == other.iter_;
+ }
+ bool operator!=(const const_iterator& other) const {
+ return !(*this == other);
+ }
+ const_iterator& operator++() {
+ ++iter_;
+ return *this;
+ }
+ const_iterator operator++(int);
+
+ private:
+ friend class Node::Inputs;
+
+ const_iterator(Node* node, int index) : iter_(node, index) {}
+
+ Node::InputEdges::iterator iter_;
+};
+
+
+Node::Inputs::const_iterator Node::Inputs::begin() const {
+ return const_iterator(this->node_, 0);
}
-inline Node::Inputs::iterator Node::Inputs::end() const {
- return Node::Inputs::iterator(this->node_, this->node_->InputCount());
+
+Node::Inputs::const_iterator Node::Inputs::end() const {
+ return const_iterator(this->node_, this->node_->InputCount());
}
-inline Node::UseEdges::iterator Node::UseEdges::begin() const {
+
+// A forward iterator to visit the uses edges of a node.
+class Node::UseEdges::iterator final {
+ public:
+ iterator(const iterator& other)
+ : current_(other.current_), next_(other.next_) {}
+
+ Edge operator*() const { return Edge(current_, current_->input_ptr()); }
+ bool operator==(const iterator& other) const {
+ return current_ == other.current_;
+ }
+ bool operator!=(const iterator& other) const { return !(*this == other); }
+ iterator& operator++() {
+ DCHECK_NOT_NULL(current_);
+ current_ = next_;
+ next_ = current_ ? current_->next : nullptr;
+ return *this;
+ }
+ iterator operator++(int);
+
+ private:
+ friend class Node::UseEdges;
+
+ iterator() : current_(nullptr), next_(nullptr) {}
+ explicit iterator(Node* node)
+ : current_(node->first_use_),
+ next_(current_ ? current_->next : nullptr) {}
+
+ Node::Use* current_;
+ Node::Use* next_;
+};
+
+
+Node::UseEdges::iterator Node::UseEdges::begin() const {
return Node::UseEdges::iterator(this->node_);
}
-inline Node::UseEdges::iterator Node::UseEdges::end() const {
+
+Node::UseEdges::iterator Node::UseEdges::end() const {
return Node::UseEdges::iterator();
}
-inline Node::Uses::iterator Node::Uses::begin() const {
- return Node::Uses::iterator(this->node_);
+
+// A forward iterator to visit the uses of a node.
+class Node::Uses::const_iterator final {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef int difference_type;
+ typedef Node* value_type;
+ typedef Node** pointer;
+ typedef Node*& reference;
+
+ const_iterator(const const_iterator& other) : current_(other.current_) {}
+
+ Node* operator*() const { return current_->from(); }
+ bool operator==(const const_iterator& other) const {
+ return other.current_ == current_;
+ }
+ bool operator!=(const const_iterator& other) const {
+ return other.current_ != current_;
+ }
+ const_iterator& operator++() {
+ DCHECK_NOT_NULL(current_);
+ current_ = current_->next;
+ return *this;
+ }
+ const_iterator operator++(int);
+
+ private:
+ friend class Node::Uses;
+
+ const_iterator() : current_(nullptr) {}
+ explicit const_iterator(Node* node) : current_(node->first_use_) {}
+
+ Node::Use* current_;
+};
+
+
+Node::Uses::const_iterator Node::Uses::begin() const {
+ return const_iterator(this->node_);
}
-inline Node::Uses::iterator Node::Uses::end() const {
- return Node::Uses::iterator();
-}
-inline bool Node::InputEdges::empty() const { return begin() == end(); }
-inline bool Node::Uses::empty() const { return begin() == end(); }
-inline bool Node::UseEdges::empty() const { return begin() == end(); }
-inline bool Node::Inputs::empty() const { return begin() == end(); }
-
-inline void Node::ReplaceUses(Node* replace_to) {
- for (Use* use = first_use_; use != NULL; use = use->next) {
- use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
- }
- if (replace_to->last_use_ == NULL) {
- DCHECK_EQ(NULL, replace_to->first_use_);
- replace_to->first_use_ = first_use_;
- replace_to->last_use_ = last_use_;
- } else if (first_use_ != NULL) {
- DCHECK_NE(NULL, replace_to->first_use_);
- replace_to->last_use_->next = first_use_;
- first_use_->prev = replace_to->last_use_;
- replace_to->last_use_ = last_use_;
- }
- first_use_ = NULL;
- last_use_ = NULL;
-}
-
-template <class UnaryPredicate>
-inline void Node::ReplaceUsesIf(UnaryPredicate pred, Node* replace_to) {
- for (Use* use = first_use_; use != NULL;) {
- Use* next = use->next;
- if (pred(use->from)) {
- RemoveUse(use);
- replace_to->AppendUse(use);
- use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
- }
- use = next;
- }
-}
-
-inline void Node::RemoveAllInputs() {
- for (Edge edge : input_edges()) {
- edge.UpdateTo(NULL);
- }
-}
-
-inline void Node::TrimInputCount(int new_input_count) {
- if (new_input_count == input_count()) return; // Nothing to do.
-
- DCHECK(new_input_count < input_count());
-
- // Update inline inputs.
- for (int i = new_input_count; i < input_count(); i++) {
- Node::Input* input = GetInputRecordPtr(i);
- input->Update(NULL);
- }
- set_input_count(new_input_count);
-}
-
-inline void Node::ReplaceInput(int index, Node* new_to) {
- Input* input = GetInputRecordPtr(index);
- input->Update(new_to);
-}
-
-inline void Node::Input::Update(Node* new_to) {
- Node* old_to = this->to;
- if (new_to == old_to) return; // Nothing to do.
- // Snip out the use from where it used to be
- if (old_to != NULL) {
- old_to->RemoveUse(use);
- }
- to = new_to;
- // And put it into the new node's use list.
- if (new_to != NULL) {
- new_to->AppendUse(use);
- } else {
- use->next = NULL;
- use->prev = NULL;
- }
-}
-
-inline void Node::EnsureAppendableInputs(Zone* zone) {
- if (!has_appendable_inputs()) {
- void* deque_buffer = zone->New(sizeof(InputDeque));
- InputDeque* deque = new (deque_buffer) InputDeque(zone);
- for (int i = 0; i < input_count(); ++i) {
- deque->push_back(inputs_.static_[i]);
- }
- inputs_.appendable_ = deque;
- set_has_appendable_inputs(true);
- }
-}
-
-inline void Node::AppendInput(Zone* zone, Node* to_append) {
- Use* new_use = new (zone) Use;
- Input new_input;
- new_input.to = to_append;
- new_input.use = new_use;
- if (reserved_input_count() > 0) {
- DCHECK(!has_appendable_inputs());
- set_reserved_input_count(reserved_input_count() - 1);
- inputs_.static_[input_count()] = new_input;
- } else {
- EnsureAppendableInputs(zone);
- inputs_.appendable_->push_back(new_input);
- }
- new_use->input_index = input_count();
- new_use->from = this;
- to_append->AppendUse(new_use);
- set_input_count(input_count() + 1);
-}
-
-inline void Node::InsertInput(Zone* zone, int index, Node* to_insert) {
- DCHECK(index >= 0 && index < InputCount());
- // TODO(turbofan): Optimize this implementation!
- AppendInput(zone, InputAt(InputCount() - 1));
- for (int i = InputCount() - 1; i > index; --i) {
- ReplaceInput(i, InputAt(i - 1));
- }
- ReplaceInput(index, to_insert);
-}
-
-inline void Node::RemoveInput(int index) {
- DCHECK(index >= 0 && index < InputCount());
- // TODO(turbofan): Optimize this implementation!
- for (; index < InputCount() - 1; ++index) {
- ReplaceInput(index, InputAt(index + 1));
- }
- TrimInputCount(InputCount() - 1);
-}
-
-inline void Node::AppendUse(Use* use) {
- use->next = NULL;
- use->prev = last_use_;
- if (last_use_ == NULL) {
- first_use_ = use;
- } else {
- last_use_->next = use;
- }
- last_use_ = use;
-}
-
-inline void Node::RemoveUse(Use* use) {
- if (last_use_ == use) {
- last_use_ = use->prev;
- }
- if (use->prev != NULL) {
- use->prev->next = use->next;
- } else {
- first_use_ = use->next;
- }
- if (use->next != NULL) {
- use->next->prev = use->prev;
- }
-}
-
-inline bool Node::OwnedBy(Node* owner) const {
- return first_use_ != NULL && first_use_->from == owner &&
- first_use_->next == NULL;
-}
+Node::Uses::const_iterator Node::Uses::end() const { return const_iterator(); }
} // namespace compiler
} // namespace internal
diff --git a/src/compiler/opcodes.cc b/src/compiler/opcodes.cc
index 044395c..2a8e01a 100644
--- a/src/compiler/opcodes.cc
+++ b/src/compiler/opcodes.cc
@@ -5,6 +5,7 @@
#include "src/compiler/opcodes.h"
#include <algorithm>
+#include <ostream>
#include "src/base/macros.h"
@@ -25,10 +26,15 @@
// static
char const* IrOpcode::Mnemonic(Value value) {
- size_t const n = std::max<size_t>(value, arraysize(kMnemonics) - 1);
+ size_t const n = std::min<size_t>(value, arraysize(kMnemonics) - 1);
return kMnemonics[n];
}
+
+std::ostream& operator<<(std::ostream& os, IrOpcode::Value opcode) {
+ return os << IrOpcode::Mnemonic(opcode);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index d229b6d..a97fdfa 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -5,48 +5,61 @@
#ifndef V8_COMPILER_OPCODES_H_
#define V8_COMPILER_OPCODES_H_
-// Opcodes for control operators.
-#define INNER_CONTROL_OP_LIST(V) \
- V(Dead) \
- V(Loop) \
- V(Branch) \
- V(IfTrue) \
- V(IfFalse) \
- V(Merge) \
- V(Return) \
- V(Terminate) \
- V(Throw)
+#include <iosfwd>
+// Opcodes for control operators.
#define CONTROL_OP_LIST(V) \
- INNER_CONTROL_OP_LIST(V) \
V(Start) \
+ V(Loop) \
+ V(Branch) \
+ V(Switch) \
+ V(IfTrue) \
+ V(IfFalse) \
+ V(IfSuccess) \
+ V(IfException) \
+ V(IfValue) \
+ V(IfDefault) \
+ V(Merge) \
+ V(Deoptimize) \
+ V(Return) \
+ V(TailCall) \
+ V(Terminate) \
+ V(OsrNormalEntry) \
+ V(OsrLoopEntry) \
+ V(Throw) \
V(End)
-// Opcodes for common operators.
-#define LEAF_OP_LIST(V) \
- V(Int32Constant) \
- V(Int64Constant) \
- V(Float32Constant) \
- V(Float64Constant) \
- V(ExternalConstant) \
- V(NumberConstant) \
+// Opcodes for constant operators.
+#define CONSTANT_OP_LIST(V) \
+ V(Int32Constant) \
+ V(Int64Constant) \
+ V(Float32Constant) \
+ V(Float64Constant) \
+ V(ExternalConstant) \
+ V(NumberConstant) \
V(HeapConstant)
#define INNER_OP_LIST(V) \
V(Select) \
V(Phi) \
+ V(EffectSet) \
V(EffectPhi) \
- V(ValueEffect) \
- V(Finish) \
+ V(Guard) \
+ V(BeginRegion) \
+ V(FinishRegion) \
V(FrameState) \
V(StateValues) \
+ V(TypedStateValues) \
+ V(ObjectState) \
V(Call) \
V(Parameter) \
+ V(OsrValue) \
V(Projection)
#define COMMON_OP_LIST(V) \
- LEAF_OP_LIST(V) \
- INNER_OP_LIST(V)
+ CONSTANT_OP_LIST(V) \
+ INNER_OP_LIST(V) \
+ V(Dead)
// Opcodes for JavaScript operators.
#define JS_COMPARE_BINOP_LIST(V) \
@@ -79,8 +92,6 @@
JS_BITWISE_BINOP_LIST(V) \
JS_ARITH_BINOP_LIST(V)
-#define JS_LOGIC_UNOP_LIST(V) V(JSUnaryNot)
-
#define JS_CONVERSION_UNOP_LIST(V) \
V(JSToBoolean) \
V(JSToNumber) \
@@ -92,23 +103,32 @@
V(JSTypeOf)
#define JS_SIMPLE_UNOP_LIST(V) \
- JS_LOGIC_UNOP_LIST(V) \
JS_CONVERSION_UNOP_LIST(V) \
JS_OTHER_UNOP_LIST(V)
-#define JS_OBJECT_OP_LIST(V) \
- V(JSCreate) \
- V(JSLoadProperty) \
- V(JSLoadNamed) \
- V(JSStoreProperty) \
- V(JSStoreNamed) \
- V(JSDeleteProperty) \
- V(JSHasProperty) \
+#define JS_OBJECT_OP_LIST(V) \
+ V(JSCreate) \
+ V(JSCreateArguments) \
+ V(JSCreateArray) \
+ V(JSCreateClosure) \
+ V(JSCreateIterResultObject) \
+ V(JSCreateLiteralArray) \
+ V(JSCreateLiteralObject) \
+ V(JSCreateLiteralRegExp) \
+ V(JSLoadProperty) \
+ V(JSLoadNamed) \
+ V(JSLoadGlobal) \
+ V(JSStoreProperty) \
+ V(JSStoreNamed) \
+ V(JSStoreGlobal) \
+ V(JSDeleteProperty) \
+ V(JSHasProperty) \
V(JSInstanceOf)
#define JS_CONTEXT_OP_LIST(V) \
V(JSLoadContext) \
V(JSStoreContext) \
+ V(JSLoadDynamic) \
V(JSCreateFunctionContext) \
V(JSCreateCatchContext) \
V(JSCreateWithContext) \
@@ -120,8 +140,15 @@
V(JSCallConstruct) \
V(JSCallFunction) \
V(JSCallRuntime) \
+ V(JSConvertReceiver) \
+ V(JSForInDone) \
+ V(JSForInNext) \
+ V(JSForInPrepare) \
+ V(JSForInStep) \
+ V(JSLoadMessage) \
+ V(JSStoreMessage) \
V(JSYield) \
- V(JSDebugger)
+ V(JSStackCheck)
#define JS_OP_LIST(V) \
JS_SIMPLE_BINOP_LIST(V) \
@@ -131,112 +158,170 @@
JS_OTHER_OP_LIST(V)
// Opcodes for VirtuaMachine-level operators.
-#define SIMPLIFIED_OP_LIST(V) \
- V(AnyToBoolean) \
- V(BooleanNot) \
- V(BooleanToNumber) \
- V(NumberEqual) \
- V(NumberLessThan) \
- V(NumberLessThanOrEqual) \
- V(NumberAdd) \
- V(NumberSubtract) \
- V(NumberMultiply) \
- V(NumberDivide) \
- V(NumberModulus) \
- V(NumberToInt32) \
- V(NumberToUint32) \
- V(ReferenceEqual) \
- V(StringEqual) \
- V(StringLessThan) \
- V(StringLessThanOrEqual) \
- V(StringAdd) \
- V(ChangeTaggedToInt32) \
- V(ChangeTaggedToUint32) \
- V(ChangeTaggedToFloat64) \
- V(ChangeInt32ToTagged) \
- V(ChangeUint32ToTagged) \
- V(ChangeFloat64ToTagged) \
- V(ChangeBoolToBit) \
- V(ChangeBitToBool) \
- V(LoadField) \
- V(LoadBuffer) \
- V(LoadElement) \
- V(StoreField) \
- V(StoreBuffer) \
- V(StoreElement) \
- V(ObjectIsSmi) \
- V(ObjectIsNonNegativeSmi)
+#define SIMPLIFIED_COMPARE_BINOP_LIST(V) \
+ V(NumberEqual) \
+ V(NumberLessThan) \
+ V(NumberLessThanOrEqual) \
+ V(ReferenceEqual) \
+ V(StringEqual) \
+ V(StringLessThan) \
+ V(StringLessThanOrEqual)
+
+#define SIMPLIFIED_OP_LIST(V) \
+ SIMPLIFIED_COMPARE_BINOP_LIST(V) \
+ V(BooleanNot) \
+ V(BooleanToNumber) \
+ V(NumberAdd) \
+ V(NumberSubtract) \
+ V(NumberMultiply) \
+ V(NumberDivide) \
+ V(NumberModulus) \
+ V(NumberBitwiseOr) \
+ V(NumberBitwiseXor) \
+ V(NumberBitwiseAnd) \
+ V(NumberShiftLeft) \
+ V(NumberShiftRight) \
+ V(NumberShiftRightLogical) \
+ V(NumberToInt32) \
+ V(NumberToUint32) \
+ V(NumberIsHoleNaN) \
+ V(PlainPrimitiveToNumber) \
+ V(ChangeTaggedToInt32) \
+ V(ChangeTaggedToUint32) \
+ V(ChangeTaggedToFloat64) \
+ V(ChangeInt32ToTagged) \
+ V(ChangeUint32ToTagged) \
+ V(ChangeFloat64ToTagged) \
+ V(ChangeBoolToBit) \
+ V(ChangeBitToBool) \
+ V(Allocate) \
+ V(LoadField) \
+ V(LoadBuffer) \
+ V(LoadElement) \
+ V(StoreField) \
+ V(StoreBuffer) \
+ V(StoreElement) \
+ V(ObjectIsNumber) \
+ V(ObjectIsSmi)
// Opcodes for Machine-level operators.
-#define MACHINE_OP_LIST(V) \
- V(Load) \
- V(Store) \
- V(Word32And) \
- V(Word32Or) \
- V(Word32Xor) \
- V(Word32Shl) \
- V(Word32Shr) \
- V(Word32Sar) \
- V(Word32Ror) \
- V(Word32Equal) \
- V(Word64And) \
- V(Word64Or) \
- V(Word64Xor) \
- V(Word64Shl) \
- V(Word64Shr) \
- V(Word64Sar) \
- V(Word64Ror) \
- V(Word64Equal) \
- V(Int32Add) \
- V(Int32AddWithOverflow) \
- V(Int32Sub) \
- V(Int32SubWithOverflow) \
- V(Int32Mul) \
- V(Int32MulHigh) \
- V(Int32Div) \
- V(Int32Mod) \
- V(Int32LessThan) \
- V(Int32LessThanOrEqual) \
- V(Uint32Div) \
- V(Uint32LessThan) \
- V(Uint32LessThanOrEqual) \
- V(Uint32Mod) \
- V(Uint32MulHigh) \
- V(Int64Add) \
- V(Int64Sub) \
- V(Int64Mul) \
- V(Int64Div) \
- V(Int64Mod) \
- V(Int64LessThan) \
- V(Int64LessThanOrEqual) \
- V(Uint64Div) \
- V(Uint64LessThan) \
- V(Uint64Mod) \
- V(ChangeFloat32ToFloat64) \
- V(ChangeFloat64ToInt32) \
- V(ChangeFloat64ToUint32) \
- V(ChangeInt32ToFloat64) \
- V(ChangeInt32ToInt64) \
- V(ChangeUint32ToFloat64) \
- V(ChangeUint32ToUint64) \
- V(TruncateFloat64ToFloat32) \
- V(TruncateFloat64ToInt32) \
- V(TruncateInt64ToInt32) \
- V(Float64Add) \
- V(Float64Sub) \
- V(Float64Mul) \
- V(Float64Div) \
- V(Float64Mod) \
- V(Float64Sqrt) \
- V(Float64Equal) \
- V(Float64LessThan) \
- V(Float64LessThanOrEqual) \
- V(Float64Floor) \
- V(Float64Ceil) \
- V(Float64RoundTruncate) \
- V(Float64RoundTiesAway) \
- V(LoadStackPointer) \
- V(CheckedLoad) \
+#define MACHINE_COMPARE_BINOP_LIST(V) \
+ V(Word32Equal) \
+ V(Word64Equal) \
+ V(Int32LessThan) \
+ V(Int32LessThanOrEqual) \
+ V(Uint32LessThan) \
+ V(Uint32LessThanOrEqual) \
+ V(Int64LessThan) \
+ V(Int64LessThanOrEqual) \
+ V(Uint64LessThan) \
+ V(Uint64LessThanOrEqual) \
+ V(Float32Equal) \
+ V(Float32LessThan) \
+ V(Float32LessThanOrEqual) \
+ V(Float64Equal) \
+ V(Float64LessThan) \
+ V(Float64LessThanOrEqual)
+
+#define MACHINE_OP_LIST(V) \
+ MACHINE_COMPARE_BINOP_LIST(V) \
+ V(Load) \
+ V(Store) \
+ V(Word32And) \
+ V(Word32Or) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar) \
+ V(Word32Ror) \
+ V(Word32Clz) \
+ V(Word32Ctz) \
+ V(Word32Popcnt) \
+ V(Word64Popcnt) \
+ V(Word64And) \
+ V(Word64Or) \
+ V(Word64Xor) \
+ V(Word64Shl) \
+ V(Word64Shr) \
+ V(Word64Sar) \
+ V(Word64Ror) \
+ V(Word64Clz) \
+ V(Word64Ctz) \
+ V(Int32Add) \
+ V(Int32AddWithOverflow) \
+ V(Int32Sub) \
+ V(Int32SubWithOverflow) \
+ V(Int32Mul) \
+ V(Int32MulHigh) \
+ V(Int32Div) \
+ V(Int32Mod) \
+ V(Uint32Div) \
+ V(Uint32Mod) \
+ V(Uint32MulHigh) \
+ V(Int64Add) \
+ V(Int64AddWithOverflow) \
+ V(Int64Sub) \
+ V(Int64SubWithOverflow) \
+ V(Int64Mul) \
+ V(Int64Div) \
+ V(Int64Mod) \
+ V(Uint64Div) \
+ V(Uint64Mod) \
+ V(ChangeFloat32ToFloat64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(TryTruncateFloat32ToInt64) \
+ V(TryTruncateFloat64ToInt64) \
+ V(TryTruncateFloat32ToUint64) \
+ V(TryTruncateFloat64ToUint64) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(TruncateFloat64ToFloat32) \
+ V(TruncateFloat64ToInt32) \
+ V(TruncateInt64ToInt32) \
+ V(RoundInt64ToFloat32) \
+ V(RoundInt64ToFloat64) \
+ V(RoundUint64ToFloat32) \
+ V(RoundUint64ToFloat64) \
+ V(BitcastFloat32ToInt32) \
+ V(BitcastFloat64ToInt64) \
+ V(BitcastInt32ToFloat32) \
+ V(BitcastInt64ToFloat64) \
+ V(Float32Add) \
+ V(Float32Sub) \
+ V(Float32Mul) \
+ V(Float32Div) \
+ V(Float32Max) \
+ V(Float32Min) \
+ V(Float32Abs) \
+ V(Float32Sqrt) \
+ V(Float32RoundDown) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Mod) \
+ V(Float64Max) \
+ V(Float64Min) \
+ V(Float64Abs) \
+ V(Float64Sqrt) \
+ V(Float64RoundDown) \
+ V(Float32RoundUp) \
+ V(Float64RoundUp) \
+ V(Float32RoundTruncate) \
+ V(Float64RoundTruncate) \
+ V(Float64RoundTiesAway) \
+ V(Float32RoundTiesEven) \
+ V(Float64RoundTiesEven) \
+ V(Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32) \
+ V(Float64InsertLowWord32) \
+ V(Float64InsertHighWord32) \
+ V(LoadStackPointer) \
+ V(LoadFramePointer) \
+ V(CheckedLoad) \
V(CheckedStore)
#define VALUE_OP_LIST(V) \
@@ -271,60 +356,53 @@
// Returns the mnemonic name of an opcode.
static char const* Mnemonic(Value value);
- static bool IsJsOpcode(Value val) {
- switch (val) {
-// TODO(turbofan): make this a range check.
-#define RETURN_NAME(x) \
- case k##x: \
- return true;
- JS_OP_LIST(RETURN_NAME)
-#undef RETURN_NAME
- default:
- return false;
- }
+ // Returns true if opcode for common operator.
+ static bool IsCommonOpcode(Value value) {
+ return kStart <= value && value <= kDead;
}
- static bool IsControlOpcode(Value val) {
- switch (val) {
-// TODO(turbofan): make this a range check.
-#define RETURN_NAME(x) \
- case k##x: \
- return true;
- CONTROL_OP_LIST(RETURN_NAME)
-#undef RETURN_NAME
- default:
- return false;
- }
+ // Returns true if opcode for control operator.
+ static bool IsControlOpcode(Value value) {
+ return kStart <= value && value <= kEnd;
}
- static bool IsLeafOpcode(Value val) {
- switch (val) {
-// TODO(turbofan): make this a table lookup.
-#define RETURN_NAME(x) \
- case k##x: \
- return true;
- LEAF_OP_LIST(RETURN_NAME)
-#undef RETURN_NAME
- default:
- return false;
- }
+ // Returns true if opcode for JavaScript operator.
+ static bool IsJsOpcode(Value value) {
+ return kJSEqual <= value && value <= kJSStackCheck;
}
- static bool IsCommonOpcode(Value val) {
- switch (val) {
-// TODO(turbofan): make this a table lookup or a range check.
-#define RETURN_NAME(x) \
- case k##x: \
- return true;
- CONTROL_OP_LIST(RETURN_NAME)
- COMMON_OP_LIST(RETURN_NAME)
-#undef RETURN_NAME
- default:
- return false;
- }
+ // Returns true if opcode for constant operator.
+ static bool IsConstantOpcode(Value value) {
+ return kInt32Constant <= value && value <= kHeapConstant;
+ }
+
+ static bool IsPhiOpcode(Value value) {
+ return value == kPhi || value == kEffectPhi;
+ }
+
+ static bool IsMergeOpcode(Value value) {
+ return value == kMerge || value == kLoop;
+ }
+
+ static bool IsIfProjectionOpcode(Value value) {
+ return kIfTrue <= value && value <= kIfDefault;
+ }
+
+ // Returns true if opcode can be inlined.
+ static bool IsInlineeOpcode(Value value) {
+ return value == kJSCallConstruct || value == kJSCallFunction;
+ }
+
+ // Returns true if opcode for comparison operator.
+ static bool IsComparisonOpcode(Value value) {
+ return (kJSEqual <= value && value <= kJSGreaterThanOrEqual) ||
+ (kNumberEqual <= value && value <= kStringLessThanOrEqual) ||
+ (kWord32Equal <= value && value <= kFloat64LessThanOrEqual);
}
};
+std::ostream& operator<<(std::ostream&, IrOpcode::Value);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index abfc5fd..bd704a3 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -20,63 +20,95 @@
// static
-bool OperatorProperties::HasFrameStateInput(const Operator* op) {
- if (!FLAG_turbo_deoptimization) {
- return false;
- }
+int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
switch (op->opcode()) {
case IrOpcode::kFrameState:
- return true;
+ return 1;
case IrOpcode::kJSCallRuntime: {
const CallRuntimeParameters& p = CallRuntimeParametersOf(op);
- return Linkage::NeedsFrameState(p.id());
+ return Linkage::FrameStateInputCount(p.id());
}
// Strict equality cannot lazily deoptimize.
case IrOpcode::kJSStrictEqual:
case IrOpcode::kJSStrictNotEqual:
- return false;
+ return 0;
- // Calls
- case IrOpcode::kJSCallFunction:
+ // We record the frame state immediately before and immediately after every
+ // construct/function call.
case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSCallFunction:
+ return 2;
// Compare operations
case IrOpcode::kJSEqual:
- case IrOpcode::kJSGreaterThan:
- case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSNotEqual:
case IrOpcode::kJSHasProperty:
case IrOpcode::kJSInstanceOf:
- case IrOpcode::kJSLessThan:
- case IrOpcode::kJSLessThanOrEqual:
- case IrOpcode::kJSNotEqual:
- // Binary operations
+ // Object operations
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+
+ // Context operations
+ case IrOpcode::kJSLoadDynamic:
+ case IrOpcode::kJSCreateScriptContext:
+
+ // Conversions
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSToString:
+
+ // Misc operations
+ case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSForInNext:
+ case IrOpcode::kJSForInPrepare:
+ case IrOpcode::kJSStackCheck:
+ case IrOpcode::kJSDeleteProperty:
+ return 1;
+
+ // We record the frame state immediately before and immediately after
+ // every property or global variable access.
+ case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSStoreNamed:
+ case IrOpcode::kJSLoadProperty:
+ case IrOpcode::kJSStoreProperty:
+ case IrOpcode::kJSLoadGlobal:
+ case IrOpcode::kJSStoreGlobal:
+ return 2;
+
+ // Binary operators that can deopt in the middle the operation (e.g.,
+ // as a result of lazy deopt in ToNumber conversion) need a second frame
+ // state so that we can resume before the operation.
+ case IrOpcode::kJSMultiply:
case IrOpcode::kJSAdd:
case IrOpcode::kJSBitwiseAnd:
case IrOpcode::kJSBitwiseOr:
case IrOpcode::kJSBitwiseXor:
case IrOpcode::kJSDivide:
- case IrOpcode::kJSLoadNamed:
- case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSModulus:
- case IrOpcode::kJSMultiply:
case IrOpcode::kJSShiftLeft:
case IrOpcode::kJSShiftRight:
case IrOpcode::kJSShiftRightLogical:
- case IrOpcode::kJSStoreNamed:
- case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSSubtract:
+ return 2;
- // Conversions
- case IrOpcode::kJSToObject:
-
- // Other
- case IrOpcode::kJSDeleteProperty:
- return true;
+ // Compare operators that can deopt in the middle the operation (e.g.,
+ // as a result of lazy deopt in ToNumber conversion) need a second frame
+ // state so that we can resume before the operation.
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSLessThanOrEqual:
+ return 2;
default:
- return false;
+ return 0;
}
}
@@ -95,7 +127,9 @@
return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
- opcode == IrOpcode::kIfFalse;
+ opcode == IrOpcode::kIfFalse || opcode == IrOpcode::kIfSuccess ||
+ opcode == IrOpcode::kIfException || opcode == IrOpcode::kIfValue ||
+ opcode == IrOpcode::kIfDefault;
}
} // namespace compiler
diff --git a/src/compiler/operator-properties.h b/src/compiler/operator-properties.h
index 37c9755..e7ecd93 100644
--- a/src/compiler/operator-properties.h
+++ b/src/compiler/operator-properties.h
@@ -15,17 +15,14 @@
class Operator;
-class OperatorProperties FINAL {
+class OperatorProperties final {
public:
static bool HasContextInput(const Operator* op);
- static bool HasFrameStateInput(const Operator* op);
-
static int GetContextInputCount(const Operator* op) {
return HasContextInput(op) ? 1 : 0;
}
- static int GetFrameStateInputCount(const Operator* op) {
- return HasFrameStateInput(op) ? 1 : 0;
- }
+ static int GetFrameStateInputCount(const Operator* op);
+
static int GetTotalInputCount(const Operator* op);
static bool IsBasicBlockBegin(const Operator* op);
diff --git a/src/compiler/operator.cc b/src/compiler/operator.cc
index c8687f4..ae10348 100644
--- a/src/compiler/operator.cc
+++ b/src/compiler/operator.cc
@@ -10,13 +10,20 @@
namespace internal {
namespace compiler {
+namespace {
template <typename N>
-static inline N CheckRange(size_t val) {
- CHECK(val <= std::numeric_limits<N>::max());
+V8_INLINE N CheckRange(size_t val) {
+ CHECK_LE(val, std::numeric_limits<N>::max());
return static_cast<N>(val);
}
+} // namespace
+
+
+// static
+STATIC_CONST_MEMBER_DEFINITION const size_t Operator::kMaxControlOutputCount;
+
Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
size_t value_in, size_t effect_in, size_t control_in,
@@ -29,7 +36,7 @@
control_in_(CheckRange<uint16_t>(control_in)),
value_out_(CheckRange<uint16_t>(value_out)),
effect_out_(CheckRange<uint8_t>(effect_out)),
- control_out_(CheckRange<uint8_t>(control_out)) {}
+ control_out_(CheckRange<uint16_t>(control_out)) {}
std::ostream& operator<<(std::ostream& os, const Operator& op) {
diff --git a/src/compiler/operator.h b/src/compiler/operator.h
index fb144ce..fa85d59 100644
--- a/src/compiler/operator.h
+++ b/src/compiler/operator.h
@@ -9,6 +9,7 @@
#include "src/base/flags.h"
#include "src/base/functional.h"
+#include "src/handles.h"
#include "src/zone.h"
namespace v8 {
@@ -29,7 +30,7 @@
// meaningful to the operator itself.
class Operator : public ZoneObject {
public:
- typedef uint8_t Opcode;
+ typedef uint16_t Opcode;
// Properties inform the operator-independent optimizer about legal
// transformations for nodes that have this operator.
@@ -44,6 +45,7 @@
// create new scheduling dependencies.
kNoThrow = 1 << 6, // Can never generate an exception.
kFoldable = kNoRead | kNoWrite,
+ kKontrol = kFoldable | kNoThrow,
kEliminatable = kNoWrite | kNoThrow,
kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent
};
@@ -84,6 +86,9 @@
Properties properties() const { return properties_; }
+ // TODO(bmeurer): Use bit fields below?
+ static const size_t kMaxControlOutputCount = (1u << 16) - 1;
+
// TODO(titzer): convert return values here to size_t.
int ValueInputCount() const { return value_in_; }
int EffectInputCount() const { return effect_in_; }
@@ -93,7 +98,15 @@
int EffectOutputCount() const { return effect_out_; }
int ControlOutputCount() const { return control_out_; }
- static inline size_t ZeroIfPure(Properties properties) {
+ static size_t ZeroIfEliminatable(Properties properties) {
+ return (properties & kEliminatable) == kEliminatable ? 0 : 1;
+ }
+
+ static size_t ZeroIfNoThrow(Properties properties) {
+ return (properties & kNoThrow) == kNoThrow ? 0 : 2;
+ }
+
+ static size_t ZeroIfPure(Properties properties) {
return (properties & kPure) == kPure ? 0 : 1;
}
@@ -113,7 +126,7 @@
uint16_t control_in_;
uint16_t value_out_;
uint8_t effect_out_;
- uint8_t control_out_;
+ uint16_t control_out_;
DISALLOW_COPY_AND_ASSIGN(Operator);
};
@@ -123,10 +136,19 @@
std::ostream& operator<<(std::ostream& os, const Operator& op);
+// Default equality function for below Operator1<*> class.
+template <typename T>
+struct OpEqualTo : public std::equal_to<T> {};
+
+
+// Default hashing function for below Operator1<*> class.
+template <typename T>
+struct OpHash : public base::hash<T> {};
+
+
// A templatized implementation of Operator that has one static parameter of
-// type {T}.
-template <typename T, typename Pred = std::equal_to<T>,
- typename Hash = base::hash<T>>
+// type {T} with the proper default equality and hashing functions.
+template <typename T, typename Pred = OpEqualTo<T>, typename Hash = OpHash<T>>
class Operator1 : public Operator {
public:
Operator1(Opcode opcode, Properties properties, const char* mnemonic,
@@ -141,12 +163,13 @@
T const& parameter() const { return parameter_; }
- bool Equals(const Operator* other) const FINAL {
+ bool Equals(const Operator* other) const final {
if (opcode() != other->opcode()) return false;
- const Operator1<T>* that = static_cast<const Operator1<T>*>(other);
+ const Operator1<T, Pred, Hash>* that =
+ reinterpret_cast<const Operator1<T, Pred, Hash>*>(other);
return this->pred_(this->parameter(), that->parameter());
}
- size_t HashCode() const FINAL {
+ size_t HashCode() const final {
return base::hash_combine(this->opcode(), this->hash_(this->parameter()));
}
virtual void PrintParameter(std::ostream& os) const {
@@ -154,7 +177,7 @@
}
protected:
- void PrintTo(std::ostream& os) const FINAL {
+ void PrintTo(std::ostream& os) const final {
os << mnemonic();
PrintParameter(os);
}
@@ -169,22 +192,38 @@
// Helper to extract parameters from Operator1<*> operator.
template <typename T>
inline T const& OpParameter(const Operator* op) {
- return static_cast<const Operator1<T>*>(op)->parameter();
+ return reinterpret_cast<const Operator1<T, OpEqualTo<T>, OpHash<T>>*>(op)
+ ->parameter();
}
+
// NOTE: We have to be careful to use the right equal/hash functions below, for
-// float/double we always use the ones operating on the bit level.
+// float/double we always use the ones operating on the bit level, for Handle<>
+// we always use the ones operating on the location level.
template <>
-inline float const& OpParameter(const Operator* op) {
- return static_cast<const Operator1<float, base::bit_equal_to<float>,
- base::bit_hash<float>>*>(op)->parameter();
-}
+struct OpEqualTo<float> : public base::bit_equal_to<float> {};
+template <>
+struct OpHash<float> : public base::bit_hash<float> {};
template <>
-inline double const& OpParameter(const Operator* op) {
- return static_cast<const Operator1<double, base::bit_equal_to<double>,
- base::bit_hash<double>>*>(op)->parameter();
-}
+struct OpEqualTo<double> : public base::bit_equal_to<double> {};
+template <>
+struct OpHash<double> : public base::bit_hash<double> {};
+
+template <>
+struct OpEqualTo<Handle<HeapObject>> : public Handle<HeapObject>::equal_to {};
+template <>
+struct OpHash<Handle<HeapObject>> : public Handle<HeapObject>::hash {};
+
+template <>
+struct OpEqualTo<Handle<String>> : public Handle<String>::equal_to {};
+template <>
+struct OpHash<Handle<String>> : public Handle<String>::hash {};
+
+template <>
+struct OpEqualTo<Handle<ScopeInfo>> : public Handle<ScopeInfo>::equal_to {};
+template <>
+struct OpHash<Handle<ScopeInfo>> : public Handle<ScopeInfo>::hash {};
} // namespace compiler
} // namespace internal
diff --git a/src/compiler/osr.cc b/src/compiler/osr.cc
new file mode 100644
index 0000000..55431c2
--- /dev/null
+++ b/src/compiler/osr.cc
@@ -0,0 +1,337 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast/scopes.h"
+#include "src/compiler.h"
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/dead-code-elimination.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/graph-trimmer.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/loop-analysis.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/osr.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OsrHelper::OsrHelper(CompilationInfo* info)
+ : parameter_count_(info->scope()->num_parameters()),
+ stack_slot_count_(info->scope()->num_stack_slots() +
+ info->osr_expr_stack_height()) {}
+
+
+#ifdef DEBUG
+#define TRACE_COND (FLAG_trace_turbo_graph && FLAG_trace_osr)
+#else
+#define TRACE_COND false
+#endif
+
+#define TRACE(...) \
+ do { \
+ if (TRACE_COND) PrintF(__VA_ARGS__); \
+ } while (false)
+
+
+// Peel outer loops and rewire the graph so that control reduction can
+// produce a properly formed graph.
+static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
+ Zone* tmp_zone, Node* dead,
+ LoopTree* loop_tree, LoopTree::Loop* osr_loop,
+ Node* osr_normal_entry, Node* osr_loop_entry) {
+ const size_t original_count = graph->NodeCount();
+ AllNodes all(tmp_zone, graph);
+ NodeVector tmp_inputs(tmp_zone);
+ Node* sentinel = graph->NewNode(dead->op());
+
+ // Make a copy of the graph for each outer loop.
+ ZoneVector<NodeVector*> copies(tmp_zone);
+ for (LoopTree::Loop* loop = osr_loop->parent(); loop; loop = loop->parent()) {
+ void* stuff = tmp_zone->New(sizeof(NodeVector));
+ NodeVector* mapping =
+ new (stuff) NodeVector(original_count, sentinel, tmp_zone);
+ copies.push_back(mapping);
+ TRACE("OsrDuplication #%zu, depth %zu, header #%d:%s\n", copies.size(),
+ loop->depth(), loop_tree->HeaderNode(loop)->id(),
+ loop_tree->HeaderNode(loop)->op()->mnemonic());
+
+ // Prepare the mapping for OSR values and the OSR loop entry.
+ mapping->at(osr_normal_entry->id()) = dead;
+ mapping->at(osr_loop_entry->id()) = dead;
+
+ // The outer loops are dead in this copy.
+ for (LoopTree::Loop* outer = loop->parent(); outer;
+ outer = outer->parent()) {
+ for (Node* node : loop_tree->HeaderNodes(outer)) {
+ mapping->at(node->id()) = dead;
+ TRACE(" ---- #%d:%s -> dead (header)\n", node->id(),
+ node->op()->mnemonic());
+ }
+ }
+
+ // Copy all nodes.
+ for (size_t i = 0; i < all.live.size(); i++) {
+ Node* orig = all.live[i];
+ Node* copy = mapping->at(orig->id());
+ if (copy != sentinel) {
+ // Mapping already exists.
+ continue;
+ }
+ if (orig->InputCount() == 0 || orig->opcode() == IrOpcode::kParameter ||
+ orig->opcode() == IrOpcode::kOsrValue) {
+ // No need to copy leaf nodes or parameters.
+ mapping->at(orig->id()) = orig;
+ continue;
+ }
+
+ // Copy the node.
+ tmp_inputs.clear();
+ for (Node* input : orig->inputs()) {
+ tmp_inputs.push_back(mapping->at(input->id()));
+ }
+ copy = graph->NewNode(orig->op(), orig->InputCount(), &tmp_inputs[0]);
+ if (NodeProperties::IsTyped(orig)) {
+ NodeProperties::SetType(copy, NodeProperties::GetType(orig));
+ }
+ mapping->at(orig->id()) = copy;
+ TRACE(" copy #%d:%s -> #%d\n", orig->id(), orig->op()->mnemonic(),
+ copy->id());
+ }
+
+ // Fix missing inputs.
+ for (Node* orig : all.live) {
+ Node* copy = mapping->at(orig->id());
+ for (int j = 0; j < copy->InputCount(); j++) {
+ if (copy->InputAt(j) == sentinel) {
+ copy->ReplaceInput(j, mapping->at(orig->InputAt(j)->id()));
+ }
+ }
+ }
+
+ // Construct the entry into this loop from previous copies.
+
+ // Gather the live loop header nodes, {loop_header} first.
+ Node* loop_header = loop_tree->HeaderNode(loop);
+ NodeVector header_nodes(tmp_zone);
+ header_nodes.reserve(loop->HeaderSize());
+ header_nodes.push_back(loop_header); // put the loop header first.
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ if (node != loop_header && all.IsLive(node)) {
+ header_nodes.push_back(node);
+ }
+ }
+
+ // Gather backedges from the previous copies of the inner loops of {loop}.
+ NodeVectorVector backedges(tmp_zone);
+ TRACE("Gathering backedges...\n");
+ for (int i = 1; i < loop_header->InputCount(); i++) {
+ if (TRACE_COND) {
+ Node* control = loop_header->InputAt(i);
+ size_t incoming_depth = 0;
+ for (int j = 0; j < control->op()->ControlInputCount(); j++) {
+ Node* k = NodeProperties::GetControlInput(control, j);
+ incoming_depth =
+ std::max(incoming_depth, loop_tree->ContainingLoop(k)->depth());
+ }
+
+ TRACE(" edge @%d #%d:%s, incoming depth %zu\n", i, control->id(),
+ control->op()->mnemonic(), incoming_depth);
+ }
+
+ for (int pos = static_cast<int>(copies.size()) - 1; pos >= 0; pos--) {
+ backedges.push_back(NodeVector(tmp_zone));
+ backedges.back().reserve(header_nodes.size());
+
+ NodeVector* previous_map = pos > 0 ? copies[pos - 1] : nullptr;
+
+ for (Node* node : header_nodes) {
+ Node* input = node->InputAt(i);
+ if (previous_map) input = previous_map->at(input->id());
+ backedges.back().push_back(input);
+ TRACE(" node #%d:%s(@%d) = #%d:%s\n", node->id(),
+ node->op()->mnemonic(), i, input->id(),
+ input->op()->mnemonic());
+ }
+ }
+ }
+
+ int backedge_count = static_cast<int>(backedges.size());
+ if (backedge_count == 1) {
+ // Simple case of single backedge, therefore a single entry.
+ int index = 0;
+ for (Node* node : header_nodes) {
+ Node* copy = mapping->at(node->id());
+ Node* input = backedges[0][index];
+ copy->ReplaceInput(0, input);
+ TRACE(" header #%d:%s(0) => #%d:%s\n", copy->id(),
+ copy->op()->mnemonic(), input->id(), input->op()->mnemonic());
+ index++;
+ }
+ } else {
+ // Complex case of multiple backedges from previous copies requires
+ // merging the backedges to create the entry into the loop header.
+ Node* merge = nullptr;
+ int index = 0;
+ for (Node* node : header_nodes) {
+ // Gather edge inputs into {tmp_inputs}.
+ tmp_inputs.clear();
+ for (int edge = 0; edge < backedge_count; edge++) {
+ tmp_inputs.push_back(backedges[edge][index]);
+ }
+ Node* copy = mapping->at(node->id());
+ Node* input;
+ if (node == loop_header) {
+ // Create the merge for the entry into the loop header.
+ input = merge = graph->NewNode(common->Merge(backedge_count),
+ backedge_count, &tmp_inputs[0]);
+ copy->ReplaceInput(0, merge);
+ } else {
+ // Create a phi that merges values at entry into the loop header.
+ DCHECK_NOT_NULL(merge);
+ DCHECK(IrOpcode::IsPhiOpcode(node->opcode()));
+ tmp_inputs.push_back(merge);
+ Node* phi = input = graph->NewNode(
+ common->ResizeMergeOrPhi(node->op(), backedge_count),
+ backedge_count + 1, &tmp_inputs[0]);
+ copy->ReplaceInput(0, phi);
+ }
+
+ // Print the merge.
+ if (TRACE_COND) {
+ TRACE(" header #%d:%s(0) => #%d:%s(", copy->id(),
+ copy->op()->mnemonic(), input->id(), input->op()->mnemonic());
+ for (size_t i = 0; i < tmp_inputs.size(); i++) {
+ if (i > 0) TRACE(", ");
+ Node* input = tmp_inputs[i];
+ TRACE("#%d:%s", input->id(), input->op()->mnemonic());
+ }
+ TRACE(")\n");
+ }
+
+ index++;
+ }
+ }
+ }
+
+ // Kill the outer loops in the original graph.
+ TRACE("Killing outer loop headers...\n");
+ for (LoopTree::Loop* outer = osr_loop->parent(); outer;
+ outer = outer->parent()) {
+ Node* loop_header = loop_tree->HeaderNode(outer);
+ loop_header->ReplaceUses(dead);
+ TRACE(" ---- #%d:%s\n", loop_header->id(), loop_header->op()->mnemonic());
+ }
+
+ // Merge the ends of the graph copies.
+ Node* const end = graph->end();
+ int const input_count = end->InputCount();
+ for (int i = 0; i < input_count; ++i) {
+ NodeId const id = end->InputAt(i)->id();
+ for (NodeVector* const copy : copies) {
+ end->AppendInput(graph->zone(), copy->at(id));
+ NodeProperties::ChangeOp(end, common->End(end->InputCount()));
+ }
+ }
+
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Graph after OSR duplication -- " << std::endl;
+ os << AsRPO(*graph);
+ }
+}
+
+
+void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
+ Zone* tmp_zone) {
+ Graph* graph = jsgraph->graph();
+ Node* osr_normal_entry = nullptr;
+ Node* osr_loop_entry = nullptr;
+ Node* osr_loop = nullptr;
+
+ for (Node* node : graph->start()->uses()) {
+ if (node->opcode() == IrOpcode::kOsrLoopEntry) {
+ osr_loop_entry = node; // found the OSR loop entry
+ } else if (node->opcode() == IrOpcode::kOsrNormalEntry) {
+ osr_normal_entry = node;
+ }
+ }
+
+ if (osr_loop_entry == nullptr) {
+ // No OSR entry found, do nothing.
+ CHECK(osr_normal_entry);
+ return;
+ }
+
+ for (Node* use : osr_loop_entry->uses()) {
+ if (use->opcode() == IrOpcode::kLoop) {
+ CHECK(!osr_loop); // should be only one OSR loop.
+ osr_loop = use; // found the OSR loop.
+ }
+ }
+
+ CHECK(osr_loop); // Should have found the OSR loop.
+
+ // Analyze the graph to determine how deeply nested the OSR loop is.
+ LoopTree* loop_tree = LoopFinder::BuildLoopTree(graph, tmp_zone);
+
+ Node* dead = jsgraph->Dead();
+ LoopTree::Loop* loop = loop_tree->ContainingLoop(osr_loop);
+ if (loop->depth() > 0) {
+ PeelOuterLoopsForOsr(graph, common, tmp_zone, dead, loop_tree, loop,
+ osr_normal_entry, osr_loop_entry);
+ }
+
+ // Replace the normal entry with {Dead} and the loop entry with {Start}
+ // and run the control reducer to clean up the graph.
+ osr_normal_entry->ReplaceUses(dead);
+ osr_normal_entry->Kill();
+ osr_loop_entry->ReplaceUses(graph->start());
+ osr_loop_entry->Kill();
+
+ // Remove the first input to the {osr_loop}.
+ int const live_input_count = osr_loop->InputCount() - 1;
+ CHECK_NE(0, live_input_count);
+ for (Node* const use : osr_loop->uses()) {
+ if (NodeProperties::IsPhi(use)) {
+ use->RemoveInput(0);
+ NodeProperties::ChangeOp(
+ use, common->ResizeMergeOrPhi(use->op(), live_input_count));
+ }
+ }
+ osr_loop->RemoveInput(0);
+ NodeProperties::ChangeOp(
+ osr_loop, common->ResizeMergeOrPhi(osr_loop->op(), live_input_count));
+
+ // Run control reduction and graph trimming.
+ // TODO(bmeurer): The OSR deconstruction could be a regular reducer and play
+ // nice together with the rest, instead of having this custom stuff here.
+ GraphReducer graph_reducer(tmp_zone, graph);
+ DeadCodeElimination dce(&graph_reducer, graph, common);
+ CommonOperatorReducer cor(&graph_reducer, graph, common, jsgraph->machine());
+ graph_reducer.AddReducer(&dce);
+ graph_reducer.AddReducer(&cor);
+ graph_reducer.ReduceGraph();
+ GraphTrimmer trimmer(tmp_zone, graph);
+ NodeVector roots(tmp_zone);
+ jsgraph->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+}
+
+
+void OsrHelper::SetupFrame(Frame* frame) {
+ // The optimized frame will subsume the unoptimized frame. Do so by reserving
+ // the first spill slots.
+ frame->ReserveSpillSlots(UnoptimizedFrameSlots());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/osr.h b/src/compiler/osr.h
new file mode 100644
index 0000000..89773f0
--- /dev/null
+++ b/src/compiler/osr.h
@@ -0,0 +1,126 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OSR_H_
+#define V8_COMPILER_OSR_H_
+
+#include "src/zone.h"
+
+// TurboFan structures OSR graphs in a way that separates almost all phases of
+// compilation from OSR implementation details. This is accomplished with
+// special control nodes that are added at graph building time. In particular,
+// the graph is built in such a way that typing still computes the best types
+// and optimizations and lowering work unchanged. All that remains is to
+// deconstruct the OSR artifacts before scheduling and code generation.
+
+// Graphs built for OSR from the AstGraphBuilder are structured as follows:
+// Start
+// +-------------------^^-----+
+// | |
+// OsrNormalEntry OsrLoopEntry <-------------+
+// | | |
+// control flow before loop | A OsrValue
+// | | | |
+// | +------------------------+ | +-------+
+// | | +-------------+ | | +--------+
+// | | | | | | | |
+// ( Loop )<-----------|------------------ ( phi ) |
+// | | |
+// loop body | backedge(s) |
+// | | | |
+// | +--------------+ B <-----+
+// |
+// end
+
+// The control structure expresses the relationship that the loop has a separate
+// entrypoint which corresponds to entering the loop directly from the middle
+// of unoptimized code.
+// Similarly, the values that come in from unoptimized code are represented with
+// {OsrValue} nodes that merge into any phis associated with the OSR loop.
+// In the above diagram, nodes {A} and {B} represent values in the "normal"
+// graph that correspond to the values of those phis before the loop and on any
+// backedges, respectively.
+
+// To deconstruct OSR, we simply replace the uses of the {OsrNormalEntry}
+// control node with {Dead} and {OsrLoopEntry} with start and run the
+// {ControlReducer}. Control reduction propagates the dead control forward,
+// essentially "killing" all the code before the OSR loop. The entrypoint to the
+// loop corresponding to the "normal" entry path will also be removed, as well
+// as the inputs to the loop phis, resulting in the reduced graph:
+
+// Start
+// Dead |^-------------------------+
+// | | |
+// | | |
+// | | |
+// disconnected, dead | A=dead OsrValue
+// | |
+// +------------------+ +------+
+// | +-------------+ | +--------+
+// | | | | | |
+// ( Loop )<-----------|------------------ ( phi ) |
+// | | |
+// loop body | backedge(s) |
+// | | | |
+// | +--------------+ B <-----+
+// |
+// end
+
+// Other than the presences of the OsrValue nodes, this is a normal, schedulable
+// graph. OsrValue nodes are handled specially in the instruction selector to
+// simply load from the unoptimized frame.
+
+// For nested OSR loops, loop peeling must first be applied as many times as
+// necessary in order to bring the OSR loop up to the top level (i.e. to be
+// an outer loop).
+
+namespace v8 {
+namespace internal {
+
+class CompilationInfo;
+
+namespace compiler {
+
+class JSGraph;
+class CommonOperatorBuilder;
+class Frame;
+class Linkage;
+
+// Encapsulates logic relating to OSR compilations as well has handles some
+// details of the frame layout.
+class OsrHelper {
+ public:
+ explicit OsrHelper(CompilationInfo* info);
+ // Only for testing.
+ OsrHelper(size_t parameter_count, size_t stack_slot_count)
+ : parameter_count_(parameter_count),
+ stack_slot_count_(stack_slot_count) {}
+
+ // Deconstructs the artificial {OsrNormalEntry} and rewrites the graph so
+ // that only the path corresponding to {OsrLoopEntry} remains.
+ void Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
+ Zone* tmp_zone);
+
+ // Prepares the frame w.r.t. OSR.
+ void SetupFrame(Frame* frame);
+
+ // Returns the number of unoptimized frame slots for this OSR.
+ size_t UnoptimizedFrameSlots() { return stack_slot_count_; }
+
+ // Returns the environment index of the first stack slot.
+ static int FirstStackSlotIndex(int parameter_count) {
+ // n.b. unlike Crankshaft, TurboFan environments do not contain the context.
+ return 1 + parameter_count; // receiver + params
+ }
+
+ private:
+ size_t parameter_count_;
+ size_t stack_slot_count_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_OSR_H_
diff --git a/src/compiler/pipeline-statistics.cc b/src/compiler/pipeline-statistics.cc
index e58c396..b98f837 100644
--- a/src/compiler/pipeline-statistics.cc
+++ b/src/compiler/pipeline-statistics.cc
@@ -36,23 +36,23 @@
diff->max_allocated_bytes_ + allocated_bytes_at_start_;
diff->total_allocated_bytes_ =
outer_zone_diff + scope_->GetTotalAllocatedBytes();
- scope_.Reset(NULL);
+ scope_.Reset(nullptr);
timer_.Stop();
}
PipelineStatistics::PipelineStatistics(CompilationInfo* info,
ZonePool* zone_pool)
- : isolate_(info->zone()->isolate()),
+ : isolate_(info->isolate()),
outer_zone_(info->zone()),
zone_pool_(zone_pool),
compilation_stats_(isolate_->GetTurboStatistics()),
source_size_(0),
- phase_kind_name_(NULL),
- phase_name_(NULL) {
- if (!info->shared_info().is_null()) {
+ phase_kind_name_(nullptr),
+ phase_name_(nullptr) {
+ if (info->has_shared_info()) {
source_size_ = static_cast<size_t>(info->shared_info()->SourceSize());
- SmartArrayPointer<char> name =
+ base::SmartArrayPointer<char> name =
info->shared_info()->DebugName()->ToCString();
function_name_ = name.get();
}
diff --git a/src/compiler/pipeline-statistics.h b/src/compiler/pipeline-statistics.h
index 01cc9de..2b6563d 100644
--- a/src/compiler/pipeline-statistics.h
+++ b/src/compiler/pipeline-statistics.h
@@ -36,7 +36,7 @@
void End(PipelineStatistics* pipeline_stats,
CompilationStatistics::BasicStats* diff);
- SmartPointer<ZonePool::StatsScope> scope_;
+ base::SmartPointer<ZonePool::StatsScope> scope_;
base::ElapsedTimer timer_;
size_t outer_zone_initial_size_;
size_t allocated_bytes_at_start_;
@@ -76,10 +76,10 @@
public:
PhaseScope(PipelineStatistics* pipeline_stats, const char* name)
: pipeline_stats_(pipeline_stats) {
- if (pipeline_stats_ != NULL) pipeline_stats_->BeginPhase(name);
+ if (pipeline_stats_ != nullptr) pipeline_stats_->BeginPhase(name);
}
~PhaseScope() {
- if (pipeline_stats_ != NULL) pipeline_stats_->EndPhase();
+ if (pipeline_stats_ != nullptr) pipeline_stats_->EndPhase();
}
private:
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index c7432c6..4d6aacd 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -7,27 +7,46 @@
#include <fstream> // NOLINT(readability/streams)
#include <sstream>
+#include "src/base/adapters.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/basic-block-instrumentor.h"
+#include "src/compiler/branch-elimination.h"
+#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/change-lowering.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
-#include "src/compiler/control-reducer.h"
+#include "src/compiler/control-flow-optimizer.h"
+#include "src/compiler/dead-code-elimination.h"
+#include "src/compiler/escape-analysis.h"
+#include "src/compiler/escape-analysis-reducer.h"
+#include "src/compiler/frame-elider.h"
#include "src/compiler/graph-replay.h"
+#include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/greedy-allocator.h"
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-call-reducer.h"
+#include "src/compiler/js-context-relaxation.h"
#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-frame-specialization.h"
#include "src/compiler/js-generic-lowering.h"
-#include "src/compiler/js-inlining.h"
+#include "src/compiler/js-global-object-specialization.h"
+#include "src/compiler/js-inlining-heuristic.h"
+#include "src/compiler/js-intrinsic-lowering.h"
+#include "src/compiler/js-native-context-specialization.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/jump-threading.h"
+#include "src/compiler/live-range-separator.h"
#include "src/compiler/load-elimination.h"
+#include "src/compiler/loop-analysis.h"
+#include "src/compiler/loop-peeling.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/move-optimizer.h"
+#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/register-allocator-verifier.h"
@@ -35,12 +54,17 @@
#include "src/compiler/scheduler.h"
#include "src/compiler/select-lowering.h"
#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/tail-call-optimization.h"
+#include "src/compiler/type-hint-analyzer.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
#include "src/compiler/zone-pool.h"
#include "src/ostreams.h"
+#include "src/register-configuration.h"
+#include "src/type-info.h"
#include "src/utils.h"
namespace v8 {
@@ -49,8 +73,79 @@
class PipelineData {
public:
- explicit PipelineData(ZonePool* zone_pool, CompilationInfo* info)
- : isolate_(info->zone()->isolate()),
+ // For main entry point.
+ PipelineData(ZonePool* zone_pool, CompilationInfo* info,
+ PipelineStatistics* pipeline_statistics)
+ : isolate_(info->isolate()),
+ info_(info),
+ outer_zone_(info_->zone()),
+ zone_pool_(zone_pool),
+ pipeline_statistics_(pipeline_statistics),
+ compilation_failed_(false),
+ code_(Handle<Code>::null()),
+ graph_zone_scope_(zone_pool_),
+ graph_zone_(graph_zone_scope_.zone()),
+ graph_(nullptr),
+ loop_assignment_(nullptr),
+ simplified_(nullptr),
+ machine_(nullptr),
+ common_(nullptr),
+ javascript_(nullptr),
+ jsgraph_(nullptr),
+ schedule_(nullptr),
+ instruction_zone_scope_(zone_pool_),
+ instruction_zone_(instruction_zone_scope_.zone()),
+ sequence_(nullptr),
+ frame_(nullptr),
+ register_allocation_zone_scope_(zone_pool_),
+ register_allocation_zone_(register_allocation_zone_scope_.zone()),
+ register_allocation_data_(nullptr) {
+ PhaseScope scope(pipeline_statistics, "init pipeline data");
+ graph_ = new (graph_zone_) Graph(graph_zone_);
+ source_positions_.Reset(new SourcePositionTable(graph_));
+ simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
+ machine_ = new (graph_zone_) MachineOperatorBuilder(
+ graph_zone_, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+ common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
+ javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
+ jsgraph_ = new (graph_zone_)
+ JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
+ }
+
+ // For machine graph testing entry point.
+ PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
+ Schedule* schedule)
+ : isolate_(info->isolate()),
+ info_(info),
+ outer_zone_(nullptr),
+ zone_pool_(zone_pool),
+ pipeline_statistics_(nullptr),
+ compilation_failed_(false),
+ code_(Handle<Code>::null()),
+ graph_zone_scope_(zone_pool_),
+ graph_zone_(nullptr),
+ graph_(graph),
+ source_positions_(new SourcePositionTable(graph_)),
+ loop_assignment_(nullptr),
+ simplified_(nullptr),
+ machine_(nullptr),
+ common_(nullptr),
+ javascript_(nullptr),
+ jsgraph_(nullptr),
+ schedule_(schedule),
+ instruction_zone_scope_(zone_pool_),
+ instruction_zone_(instruction_zone_scope_.zone()),
+ sequence_(nullptr),
+ frame_(nullptr),
+ register_allocation_zone_scope_(zone_pool_),
+ register_allocation_zone_(register_allocation_zone_scope_.zone()),
+ register_allocation_data_(nullptr) {}
+
+ // For register allocation testing entry point.
+ PipelineData(ZonePool* zone_pool, CompilationInfo* info,
+ InstructionSequence* sequence)
+ : isolate_(info->isolate()),
info_(info),
outer_zone_(nullptr),
zone_pool_(zone_pool),
@@ -61,57 +156,26 @@
graph_zone_(nullptr),
graph_(nullptr),
loop_assignment_(nullptr),
+ simplified_(nullptr),
machine_(nullptr),
common_(nullptr),
javascript_(nullptr),
jsgraph_(nullptr),
- typer_(nullptr),
- context_node_(nullptr),
schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
- instruction_zone_(nullptr),
- sequence_(nullptr),
+ instruction_zone_(sequence->zone()),
+ sequence_(sequence),
frame_(nullptr),
- register_allocator_(nullptr) {}
+ register_allocation_zone_scope_(zone_pool_),
+ register_allocation_zone_(register_allocation_zone_scope_.zone()),
+ register_allocation_data_(nullptr) {}
~PipelineData() {
+ DeleteRegisterAllocationZone();
DeleteInstructionZone();
DeleteGraphZone();
}
- // For main entry point.
- void Initialize(PipelineStatistics* pipeline_statistics) {
- PhaseScope scope(pipeline_statistics, "init pipeline data");
- outer_zone_ = info()->zone();
- pipeline_statistics_ = pipeline_statistics;
- graph_zone_ = graph_zone_scope_.zone();
- graph_ = new (graph_zone()) Graph(graph_zone());
- source_positions_.Reset(new SourcePositionTable(graph()));
- machine_ = new (graph_zone()) MachineOperatorBuilder(
- graph_zone(), kMachPtr,
- InstructionSelector::SupportedMachineOperatorFlags());
- common_ = new (graph_zone()) CommonOperatorBuilder(graph_zone());
- javascript_ = new (graph_zone()) JSOperatorBuilder(graph_zone());
- jsgraph_ =
- new (graph_zone()) JSGraph(graph(), common(), javascript(), machine());
- typer_.Reset(new Typer(graph(), info()->context()));
- instruction_zone_ = instruction_zone_scope_.zone();
- }
-
- // For machine graph testing entry point.
- void InitializeTorTesting(Graph* graph, Schedule* schedule) {
- graph_ = graph;
- source_positions_.Reset(new SourcePositionTable(graph));
- schedule_ = schedule;
- instruction_zone_ = instruction_zone_scope_.zone();
- }
-
- // For register allocation testing entry point.
- void InitializeTorTesting(InstructionSequence* sequence) {
- instruction_zone_ = sequence->zone();
- sequence_ = sequence;
- }
-
Isolate* isolate() const { return isolate_; }
CompilationInfo* info() const { return info_; }
ZonePool* zone_pool() const { return zone_pool_; }
@@ -136,46 +200,55 @@
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
- Typer* typer() const { return typer_.get(); }
+ MaybeHandle<Context> native_context() const {
+ if (info()->is_native_context_specializing()) {
+ return handle(info()->native_context(), isolate());
+ }
+ return MaybeHandle<Context>();
+ }
LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) {
- DCHECK_EQ(nullptr, loop_assignment_);
+ DCHECK(!loop_assignment_);
loop_assignment_ = loop_assignment;
}
- Node* context_node() const { return context_node_; }
- void set_context_node(Node* context_node) {
- DCHECK_EQ(nullptr, context_node_);
- context_node_ = context_node;
+ TypeHintAnalysis* type_hint_analysis() const { return type_hint_analysis_; }
+ void set_type_hint_analysis(TypeHintAnalysis* type_hint_analysis) {
+ DCHECK_NULL(type_hint_analysis_);
+ type_hint_analysis_ = type_hint_analysis;
}
Schedule* schedule() const { return schedule_; }
void set_schedule(Schedule* schedule) {
- DCHECK_EQ(nullptr, schedule_);
+ DCHECK(!schedule_);
schedule_ = schedule;
}
Zone* instruction_zone() const { return instruction_zone_; }
InstructionSequence* sequence() const { return sequence_; }
Frame* frame() const { return frame_; }
- RegisterAllocator* register_allocator() const { return register_allocator_; }
+
+ Zone* register_allocation_zone() const { return register_allocation_zone_; }
+ RegisterAllocationData* register_allocation_data() const {
+ return register_allocation_data_;
+ }
void DeleteGraphZone() {
// Destroy objects with destructors first.
source_positions_.Reset(nullptr);
- typer_.Reset(nullptr);
if (graph_zone_ == nullptr) return;
// Destroy zone and clear pointers.
graph_zone_scope_.Destroy();
graph_zone_ = nullptr;
graph_ = nullptr;
loop_assignment_ = nullptr;
+ type_hint_analysis_ = nullptr;
+ simplified_ = nullptr;
machine_ = nullptr;
common_ = nullptr;
javascript_ = nullptr;
jsgraph_ = nullptr;
- context_node_ = nullptr;
schedule_ = nullptr;
}
@@ -185,26 +258,40 @@
instruction_zone_ = nullptr;
sequence_ = nullptr;
frame_ = nullptr;
- register_allocator_ = nullptr;
+ }
+
+ void DeleteRegisterAllocationZone() {
+ if (register_allocation_zone_ == nullptr) return;
+ register_allocation_zone_scope_.Destroy();
+ register_allocation_zone_ = nullptr;
+ register_allocation_data_ = nullptr;
}
void InitializeInstructionSequence() {
- DCHECK_EQ(nullptr, sequence_);
+ DCHECK(sequence_ == nullptr);
InstructionBlocks* instruction_blocks =
InstructionSequence::InstructionBlocksFor(instruction_zone(),
schedule());
- sequence_ = new (instruction_zone())
- InstructionSequence(instruction_zone(), instruction_blocks);
+ sequence_ = new (instruction_zone()) InstructionSequence(
+ info()->isolate(), instruction_zone(), instruction_blocks);
}
- void InitializeRegisterAllocator(Zone* local_zone,
- const RegisterConfiguration* config,
- const char* debug_name) {
- DCHECK_EQ(nullptr, register_allocator_);
- DCHECK_EQ(nullptr, frame_);
- frame_ = new (instruction_zone()) Frame();
- register_allocator_ = new (instruction_zone())
- RegisterAllocator(config, local_zone, frame(), sequence(), debug_name);
+ void InitializeRegisterAllocationData(const RegisterConfiguration* config,
+ CallDescriptor* descriptor,
+ const char* debug_name) {
+ DCHECK(frame_ == nullptr);
+ DCHECK(register_allocation_data_ == nullptr);
+ int fixed_frame_size = 0;
+ if (descriptor != nullptr) {
+ fixed_frame_size = (descriptor->IsCFunctionCall())
+ ? StandardFrameConstants::kFixedSlotCountAboveFp +
+ StandardFrameConstants::kCPSlotCount
+ : StandardFrameConstants::kFixedSlotCount;
+ }
+ frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
+ register_allocation_data_ = new (register_allocation_zone())
+ RegisterAllocationData(config, register_allocation_zone(), frame(),
+ sequence(), debug_name);
}
private:
@@ -217,43 +304,42 @@
Handle<Code> code_;
// All objects in the following group of fields are allocated in graph_zone_.
- // They are all set to NULL when the graph_zone_ is destroyed.
+ // They are all set to nullptr when the graph_zone_ is destroyed.
ZonePool::Scope graph_zone_scope_;
Zone* graph_zone_;
Graph* graph_;
// TODO(dcarney): make this into a ZoneObject.
- SmartPointer<SourcePositionTable> source_positions_;
+ base::SmartPointer<SourcePositionTable> source_positions_;
LoopAssignmentAnalysis* loop_assignment_;
+ TypeHintAnalysis* type_hint_analysis_ = nullptr;
+ SimplifiedOperatorBuilder* simplified_;
MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_;
JSOperatorBuilder* javascript_;
JSGraph* jsgraph_;
- // TODO(dcarney): make this into a ZoneObject.
- SmartPointer<Typer> typer_;
- Node* context_node_;
Schedule* schedule_;
// All objects in the following group of fields are allocated in
- // instruction_zone_. They are all set to NULL when the instruction_zone_ is
+ // instruction_zone_. They are all set to nullptr when the instruction_zone_
+ // is
// destroyed.
ZonePool::Scope instruction_zone_scope_;
Zone* instruction_zone_;
InstructionSequence* sequence_;
Frame* frame_;
- RegisterAllocator* register_allocator_;
+
+ // All objects in the following group of fields are allocated in
+ // register_allocation_zone_. They are all set to nullptr when the zone is
+ // destroyed.
+ ZonePool::Scope register_allocation_zone_scope_;
+ Zone* register_allocation_zone_;
+ RegisterAllocationData* register_allocation_data_;
DISALLOW_COPY_AND_ASSIGN(PipelineData);
};
-static inline bool VerifyGraphs() {
-#ifdef DEBUG
- return true;
-#else
- return FLAG_turbo_verify;
-#endif
-}
-
+namespace {
struct TurboCfgFile : public std::ofstream {
explicit TurboCfgFile(Isolate* isolate)
@@ -262,48 +348,47 @@
};
-static void TraceSchedule(Schedule* schedule) {
+void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
+ if (FLAG_trace_turbo) {
+ FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
+ if (json_file != nullptr) {
+ OFStream json_of(json_file);
+ json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
+ std::stringstream schedule_stream;
+ schedule_stream << *schedule;
+ std::string schedule_string(schedule_stream.str());
+ for (const auto& c : schedule_string) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+ json_of << "\"},\n";
+ fclose(json_file);
+ }
+ }
if (!FLAG_trace_turbo_graph && !FLAG_trace_turbo_scheduler) return;
OFStream os(stdout);
os << "-- Schedule --------------------------------------\n" << *schedule;
}
-static SmartArrayPointer<char> GetDebugName(CompilationInfo* info) {
- SmartArrayPointer<char> name;
- if (info->IsStub()) {
- if (info->code_stub() != NULL) {
- CodeStub::Major major_key = info->code_stub()->MajorKey();
- const char* major_name = CodeStub::MajorName(major_key, false);
- size_t len = strlen(major_name);
- name.Reset(new char[len]);
- memcpy(name.get(), major_name, len);
- }
- } else {
- AllowHandleDereference allow_deref;
- name = info->function()->debug_name()->ToCString();
- }
- return name;
-}
-
-
-class AstGraphBuilderWithPositions : public AstGraphBuilder {
+class AstGraphBuilderWithPositions final : public AstGraphBuilder {
public:
AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph,
LoopAssignmentAnalysis* loop_assignment,
+ TypeHintAnalysis* type_hint_analysis,
SourcePositionTable* source_positions)
- : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment),
- source_positions_(source_positions) {}
+ : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment,
+ type_hint_analysis),
+ source_positions_(source_positions),
+ start_position_(info->shared_info()->start_position()) {}
- bool CreateGraph() {
- SourcePositionTable::Scope pos(source_positions_,
- SourcePosition::Unknown());
- return AstGraphBuilder::CreateGraph();
+ bool CreateGraph(bool stack_check) {
+ SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
+ return AstGraphBuilder::CreateGraph(stack_check);
}
#define DEF_VISIT(type) \
- void Visit##type(type* node) OVERRIDE { \
+ void Visit##type(type* node) override { \
SourcePositionTable::Scope pos(source_positions_, \
SourcePosition(node->position())); \
AstGraphBuilder::Visit##type(node); \
@@ -311,13 +396,55 @@
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
- Node* GetFunctionContext() { return AstGraphBuilder::GetFunctionContext(); }
+ private:
+ SourcePositionTable* const source_positions_;
+ SourcePosition const start_position_;
+};
+
+
+class SourcePositionWrapper final : public Reducer {
+ public:
+ SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
+ : reducer_(reducer), table_(table) {}
+ ~SourcePositionWrapper() final {}
+
+ Reduction Reduce(Node* node) final {
+ SourcePosition const pos = table_->GetSourcePosition(node);
+ SourcePositionTable::Scope position(table_, pos);
+ return reducer_->Reduce(node);
+ }
+
+ void Finalize() final { reducer_->Finalize(); }
private:
- SourcePositionTable* source_positions_;
+ Reducer* const reducer_;
+ SourcePositionTable* const table_;
+
+ DISALLOW_COPY_AND_ASSIGN(SourcePositionWrapper);
};
+class JSGraphReducer final : public GraphReducer {
+ public:
+ JSGraphReducer(JSGraph* jsgraph, Zone* zone)
+ : GraphReducer(zone, jsgraph->graph(), jsgraph->Dead()) {}
+ ~JSGraphReducer() final {}
+};
+
+
+void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
+ Reducer* reducer) {
+ if (data->info()->is_source_positions_enabled()) {
+ void* const buffer = data->graph_zone()->New(sizeof(SourcePositionWrapper));
+ SourcePositionWrapper* const wrapper =
+ new (buffer) SourcePositionWrapper(reducer, data->source_positions());
+ graph_reducer->AddReducer(wrapper);
+ } else {
+ graph_reducer->AddReducer(reducer);
+ }
+}
+
+
class PipelineRunScope {
public:
PipelineRunScope(PipelineData* data, const char* phase_name)
@@ -333,6 +460,8 @@
ZonePool::Scope zone_scope_;
};
+} // namespace
+
template <typename Phase>
void Pipeline::Run() {
@@ -361,33 +490,39 @@
};
-struct GraphBuilderPhase {
- static const char* phase_name() { return "graph builder"; }
+struct TypeHintAnalysisPhase {
+ static const char* phase_name() { return "type hint analysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- AstGraphBuilderWithPositions graph_builder(
- temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
- data->source_positions());
- if (graph_builder.CreateGraph()) {
- data->set_context_node(graph_builder.GetFunctionContext());
- } else {
- data->set_compilation_failed();
- }
+ TypeHintAnalyzer analyzer(data->graph_zone());
+ Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
+ TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
+ data->set_type_hint_analysis(type_hint_analysis);
}
};
-struct ContextSpecializerPhase {
- static const char* phase_name() { return "context specializing"; }
+struct GraphBuilderPhase {
+ static const char* phase_name() { return "graph builder"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- JSContextSpecializer spec(data->info(), data->jsgraph(),
- data->context_node());
- GraphReducer graph_reducer(data->graph(), temp_zone);
- graph_reducer.AddReducer(&spec);
- graph_reducer.ReduceGraph();
+ bool stack_check = !data->info()->IsStub();
+ bool succeeded = false;
+
+ if (data->info()->shared_info()->HasBytecodeArray()) {
+ BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
+ data->jsgraph());
+ succeeded = graph_builder.CreateGraph(stack_check);
+ } else {
+ AstGraphBuilderWithPositions graph_builder(
+ temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
+ data->type_hint_analysis(), data->source_positions());
+ succeeded = graph_builder.CreateGraph(stack_check);
+ }
+
+ if (!succeeded) {
+ data->set_compilation_failed();
+ }
}
};
@@ -396,10 +531,51 @@
static const char* phase_name() { return "inlining"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- JSInliner inliner(temp_zone, data->info(), data->jsgraph());
- inliner.Inline();
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ JSCallReducer call_reducer(data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSCallReducer::kDeoptimizationEnabled
+ : JSCallReducer::kNoFlags,
+ data->native_context());
+ JSContextSpecialization context_specialization(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_function_context_specializing()
+ ? data->info()->context()
+ : MaybeHandle<Context>());
+ JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
+ data->jsgraph());
+ JSGlobalObjectSpecialization global_object_specialization(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSGlobalObjectSpecialization::kDeoptimizationEnabled
+ : JSGlobalObjectSpecialization::kNoFlags,
+ data->native_context(), data->info()->dependencies());
+ JSNativeContextSpecialization native_context_specialization(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSNativeContextSpecialization::kDeoptimizationEnabled
+ : JSNativeContextSpecialization::kNoFlags,
+ data->native_context(), data->info()->dependencies(), temp_zone);
+ JSInliningHeuristic inlining(&graph_reducer,
+ data->info()->is_inlining_enabled()
+ ? JSInliningHeuristic::kGeneralInlining
+ : JSInliningHeuristic::kRestrictedInlining,
+ temp_zone, data->info(), data->jsgraph());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ if (data->info()->is_frame_specializing()) {
+ AddReducer(data, &graph_reducer, &frame_specialization);
+ }
+ AddReducer(data, &graph_reducer, &global_object_specialization);
+ AddReducer(data, &graph_reducer, &native_context_specialization);
+ AddReducer(data, &graph_reducer, &context_specialization);
+ AddReducer(data, &graph_reducer, &call_reducer);
+ AddReducer(data, &graph_reducer, &inlining);
+ graph_reducer.ReduceGraph();
}
};
@@ -407,7 +583,21 @@
struct TyperPhase {
static const char* phase_name() { return "typer"; }
- void Run(PipelineData* data, Zone* temp_zone) { data->typer()->Run(); }
+ void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ typer->Run(roots);
+ }
+};
+
+
+struct OsrDeconstructionPhase {
+ static const char* phase_name() { return "OSR deconstruction"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ OsrHelper osr_helper(data->info());
+ osr_helper.Deconstruct(data->jsgraph(), data->common(), temp_zone);
+ }
};
@@ -415,21 +605,66 @@
static const char* phase_name() { return "typed lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- ValueNumberingReducer vn_reducer(temp_zone);
- LoadElimination load_elimination;
- JSBuiltinReducer builtin_reducer(data->jsgraph());
- JSTypedLowering typed_lowering(data->jsgraph(), temp_zone);
- SimplifiedOperatorReducer simple_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer;
- GraphReducer graph_reducer(data->graph(), temp_zone);
- graph_reducer.AddReducer(&vn_reducer);
- graph_reducer.AddReducer(&builtin_reducer);
- graph_reducer.AddReducer(&typed_lowering);
- graph_reducer.AddReducer(&load_elimination);
- graph_reducer.AddReducer(&simple_reducer);
- graph_reducer.AddReducer(&common_reducer);
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ LoadElimination load_elimination(&graph_reducer);
+ JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
+ JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
+ if (data->info()->is_deoptimization_enabled()) {
+ typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
+ }
+ if (data->info()->shared_info()->HasBytecodeArray()) {
+ typed_lowering_flags |= JSTypedLowering::kDisableBinaryOpReduction;
+ }
+ JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
+ typed_lowering_flags, data->jsgraph(),
+ temp_zone);
+ JSIntrinsicLowering intrinsic_lowering(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSIntrinsicLowering::kDeoptimizationEnabled
+ : JSIntrinsicLowering::kDeoptimizationDisabled);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &builtin_reducer);
+ AddReducer(data, &graph_reducer, &typed_lowering);
+ AddReducer(data, &graph_reducer, &intrinsic_lowering);
+ AddReducer(data, &graph_reducer, &load_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+
+struct BranchEliminationPhase {
+ static const char* phase_name() { return "branch condition elimination"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ AddReducer(data, &graph_reducer, &branch_condition_elimination);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+
+struct EscapeAnalysisPhase {
+ static const char* phase_name() { return "escape analysis"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
+ temp_zone);
+ escape_analysis.Run();
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
+ &escape_analysis, temp_zone);
+ AddReducer(data, &graph_reducer, &escape_reducer);
graph_reducer.ReduceGraph();
}
};
@@ -439,63 +674,96 @@
static const char* phase_name() { return "simplified lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- SimplifiedLowering lowering(data->jsgraph(), temp_zone);
+ SimplifiedLowering lowering(data->jsgraph(), temp_zone,
+ data->source_positions());
lowering.LowerAllNodes();
- ValueNumberingReducer vn_reducer(temp_zone);
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+ ValueNumberingReducer value_numbering(temp_zone);
MachineOperatorReducer machine_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer;
- GraphReducer graph_reducer(data->graph(), temp_zone);
- graph_reducer.AddReducer(&vn_reducer);
- graph_reducer.AddReducer(&simple_reducer);
- graph_reducer.AddReducer(&machine_reducer);
- graph_reducer.AddReducer(&common_reducer);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
+ AddReducer(data, &graph_reducer, &machine_reducer);
+ AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
}
};
+struct ControlFlowOptimizationPhase {
+ static const char* phase_name() { return "control flow optimization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ ControlFlowOptimizer optimizer(data->graph(), data->common(),
+ data->machine(), temp_zone);
+ optimizer.Optimize();
+ }
+};
+
+
struct ChangeLoweringPhase {
static const char* phase_name() { return "change lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- Linkage linkage(data->graph_zone(), data->info());
- ValueNumberingReducer vn_reducer(temp_zone);
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
- ChangeLowering lowering(data->jsgraph(), &linkage);
+ ValueNumberingReducer value_numbering(temp_zone);
+ ChangeLowering lowering(data->jsgraph());
MachineOperatorReducer machine_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer;
- GraphReducer graph_reducer(data->graph(), temp_zone);
- graph_reducer.AddReducer(&vn_reducer);
- graph_reducer.AddReducer(&simple_reducer);
- graph_reducer.AddReducer(&lowering);
- graph_reducer.AddReducer(&machine_reducer);
- graph_reducer.AddReducer(&common_reducer);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
+ AddReducer(data, &graph_reducer, &lowering);
+ AddReducer(data, &graph_reducer, &machine_reducer);
+ AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
}
};
-struct ControlReductionPhase {
+struct EarlyGraphTrimmingPhase {
+ static const char* phase_name() { return "early graph trimming"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- ControlReducer::ReduceGraph(temp_zone, data->jsgraph(), data->common());
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
}
};
-struct EarlyControlReductionPhase : ControlReductionPhase {
- static const char* phase_name() { return "early control reduction"; }
+struct LateGraphTrimmingPhase {
+ static const char* phase_name() { return "late graph trimming"; }
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+ }
};
-struct LateControlReductionPhase : ControlReductionPhase {
- static const char* phase_name() { return "late control reduction"; }
+struct StressLoopPeelingPhase {
+ static const char* phase_name() { return "stress loop peeling"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ // Peel the first outer loop for testing.
+ // TODO(titzer): peel all loops? the N'th loop? Innermost loops?
+ LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
+ if (loop_tree != nullptr && loop_tree->outer_loops().size() > 0) {
+ LoopPeeler::Peel(data->graph(), data->common(), loop_tree,
+ loop_tree->outer_loops()[0], temp_zone);
+ }
+ }
};
@@ -503,13 +771,23 @@
static const char* phase_name() { return "generic lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SourcePositionTable::Scope pos(data->source_positions(),
- SourcePosition::Unknown());
- JSGenericLowering generic(data->info(), data->jsgraph());
- SelectLowering select(data->jsgraph()->graph(), data->jsgraph()->common());
- GraphReducer graph_reducer(data->graph(), temp_zone);
- graph_reducer.AddReducer(&generic);
- graph_reducer.AddReducer(&select);
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ JSContextRelaxation context_relaxing;
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ JSGenericLowering generic_lowering(data->info()->is_typing_enabled(),
+ data->jsgraph());
+ SelectLowering select_lowering(data->jsgraph()->graph(),
+ data->jsgraph()->common());
+ TailCallOptimization tco(data->common(), data->graph());
+ AddReducer(data, &graph_reducer, &context_relaxing);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &generic_lowering);
+ AddReducer(data, &graph_reducer, &select_lowering);
+ AddReducer(data, &graph_reducer, &tco);
graph_reducer.ReduceGraph();
}
};
@@ -519,9 +797,11 @@
static const char* phase_name() { return "scheduling"; }
void Run(PipelineData* data, Zone* temp_zone) {
- Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph());
- TraceSchedule(schedule);
- if (VerifyGraphs()) ScheduleVerifier::Run(schedule);
+ Schedule* schedule = Scheduler::ComputeSchedule(
+ temp_zone, data->graph(), data->info()->is_splitting_enabled()
+ ? Scheduler::kSplitNodes
+ : Scheduler::kNoFlags);
+ if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
data->set_schedule(schedule);
}
};
@@ -531,9 +811,12 @@
static const char* phase_name() { return "select instructions"; }
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- InstructionSelector selector(temp_zone, data->graph(), linkage,
- data->sequence(), data->schedule(),
- data->source_positions());
+ InstructionSelector selector(
+ temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
+ data->schedule(), data->source_positions(),
+ data->info()->is_source_positions_enabled()
+ ? InstructionSelector::kAllSourcePositions
+ : InstructionSelector::kCallSourcePositions);
selector.SelectInstructions();
}
};
@@ -543,7 +826,8 @@
static const char* phase_name() { return "meet register constraints"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->MeetRegisterConstraints();
+ ConstraintBuilder builder(data->register_allocation_data());
+ builder.MeetRegisterConstraints();
}
};
@@ -552,7 +836,8 @@
static const char* phase_name() { return "resolve phis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->ResolvePhis();
+ ConstraintBuilder builder(data->register_allocation_data());
+ builder.ResolvePhis();
}
};
@@ -561,34 +846,73 @@
static const char* phase_name() { return "build live ranges"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->BuildLiveRanges();
+ LiveRangeBuilder builder(data->register_allocation_data(), temp_zone);
+ builder.BuildLiveRanges();
}
};
+struct SplinterLiveRangesPhase {
+ static const char* phase_name() { return "splinter live ranges"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ LiveRangeSeparator live_range_splinterer(data->register_allocation_data(),
+ temp_zone);
+ live_range_splinterer.Splinter();
+ }
+};
+
+
+template <typename RegAllocator>
struct AllocateGeneralRegistersPhase {
static const char* phase_name() { return "allocate general registers"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->AllocateGeneralRegisters();
+ RegAllocator allocator(data->register_allocation_data(), GENERAL_REGISTERS,
+ temp_zone);
+ allocator.AllocateRegisters();
}
};
+template <typename RegAllocator>
struct AllocateDoubleRegistersPhase {
static const char* phase_name() { return "allocate double registers"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->AllocateDoubleRegisters();
+ RegAllocator allocator(data->register_allocation_data(), DOUBLE_REGISTERS,
+ temp_zone);
+ allocator.AllocateRegisters();
}
};
-struct ReuseSpillSlotsPhase {
- static const char* phase_name() { return "reuse spill slots"; }
+struct MergeSplintersPhase {
+ static const char* phase_name() { return "merge splintered ranges"; }
+ void Run(PipelineData* pipeline_data, Zone* temp_zone) {
+ RegisterAllocationData* data = pipeline_data->register_allocation_data();
+ LiveRangeMerger live_range_merger(data, temp_zone);
+ live_range_merger.Merge();
+ }
+};
+
+
+struct LocateSpillSlotsPhase {
+ static const char* phase_name() { return "locate spill slots"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->ReuseSpillSlots();
+ SpillSlotLocator locator(data->register_allocation_data());
+ locator.LocateSpillSlots();
+ }
+};
+
+
+struct AssignSpillSlotsPhase {
+ static const char* phase_name() { return "assign spill slots"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ OperandAssigner assigner(data->register_allocation_data());
+ assigner.AssignSpillSlots();
}
};
@@ -597,16 +921,18 @@
static const char* phase_name() { return "commit assignment"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->CommitAssignment();
+ OperandAssigner assigner(data->register_allocation_data());
+ assigner.CommitAssignment();
}
};
-struct PopulatePointerMapsPhase {
+struct PopulateReferenceMapsPhase {
static const char* phase_name() { return "populate pointer maps"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->PopulatePointerMaps();
+ ReferenceMapPopulator populator(data->register_allocation_data());
+ populator.PopulateReferenceMaps();
}
};
@@ -615,7 +941,8 @@
static const char* phase_name() { return "connect ranges"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->ConnectRanges();
+ LiveRangeConnector connector(data->register_allocation_data());
+ connector.ConnectRanges(temp_zone);
}
};
@@ -624,7 +951,8 @@
static const char* phase_name() { return "resolve control flow"; }
void Run(PipelineData* data, Zone* temp_zone) {
- data->register_allocator()->ResolveControlFlow();
+ LiveRangeConnector connector(data->register_allocation_data());
+ connector.ResolveControlFlow(temp_zone);
}
};
@@ -639,11 +967,20 @@
};
+struct FrameElisionPhase {
+ static const char* phase_name() { return "frame elision"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ FrameElider(data->sequence()).Run();
+ }
+};
+
+
struct JumpThreadingPhase {
static const char* phase_name() { return "jump threading"; }
void Run(PipelineData* data, Zone* temp_zone) {
- ZoneVector<BasicBlock::RpoNumber> result(temp_zone);
+ ZoneVector<RpoNumber> result(temp_zone);
if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence())) {
JumpThreading::ApplyForwarding(result, data->sequence());
}
@@ -668,52 +1005,21 @@
void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
CompilationInfo* info = data->info();
Graph* graph = data->graph();
- char buffer[256];
- Vector<char> filename(buffer, sizeof(buffer));
- SmartArrayPointer<char> functionname;
- if (!info->shared_info().is_null()) {
- functionname = info->shared_info()->DebugName()->ToCString();
- if (strlen(functionname.get()) > 0) {
- SNPrintF(filename, "turbo-%s-%s", functionname.get(), phase);
- } else {
- SNPrintF(filename, "turbo-%p-%s", static_cast<void*>(info), phase);
- }
- } else {
- SNPrintF(filename, "turbo-none-%s", phase);
- }
- std::replace(filename.start(), filename.start() + filename.length(), ' ',
- '_');
-
- { // Print dot.
- char dot_buffer[256];
- Vector<char> dot_filename(dot_buffer, sizeof(dot_buffer));
- SNPrintF(dot_filename, "%s.dot", filename.start());
- FILE* dot_file = base::OS::FOpen(dot_filename.start(), "w+");
- if (dot_file == nullptr) return;
- OFStream dot_of(dot_file);
- dot_of << AsDOT(*graph);
- fclose(dot_file);
- }
{ // Print JSON.
- char json_buffer[256];
- Vector<char> json_filename(json_buffer, sizeof(json_buffer));
- SNPrintF(json_filename, "%s.json", filename.start());
- FILE* json_file = base::OS::FOpen(json_filename.start(), "w+");
+ FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
if (json_file == nullptr) return;
OFStream json_of(json_file);
- json_of << AsJSON(*graph);
+ json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
+ << AsJSON(*graph, data->source_positions()) << "},\n";
fclose(json_file);
}
- OFStream os(stdout);
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
os << "-- Graph after " << phase << " -- " << std::endl;
os << AsRPO(*graph);
}
-
- os << "-- " << phase << " graph printed to file " << filename.start()
- << std::endl;
}
};
@@ -730,7 +1036,7 @@
void Pipeline::BeginPhaseKind(const char* phase_kind_name) {
- if (data_->pipeline_statistics() != NULL) {
+ if (data_->pipeline_statistics() != nullptr) {
data_->pipeline_statistics()->BeginPhaseKind(phase_kind_name);
}
}
@@ -740,45 +1046,62 @@
if (FLAG_trace_turbo) {
Run<PrintGraphPhase>(phase);
}
- if (VerifyGraphs()) {
+ if (FLAG_turbo_verify) {
Run<VerifyGraphPhase>(untyped);
}
}
Handle<Code> Pipeline::GenerateCode() {
- // This list must be kept in sync with DONT_TURBOFAN_NODE in ast.cc.
- if (info()->function()->dont_optimize_reason() == kTryCatchStatement ||
- info()->function()->dont_optimize_reason() == kTryFinallyStatement ||
- // TODO(turbofan): Make ES6 for-of work and remove this bailout.
- info()->function()->dont_optimize_reason() == kForOfStatement ||
- // TODO(turbofan): Make super work and remove this bailout.
- info()->function()->dont_optimize_reason() == kSuperReference ||
- // TODO(turbofan): Make class literals work and remove this bailout.
- info()->function()->dont_optimize_reason() == kClassLiteral ||
- // TODO(turbofan): Make OSR work and remove this bailout.
- info()->is_osr()) {
+ // TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
+ // the correct solution is to restore the context register after invoking
+ // builtins from full-codegen.
+ if (Context::IsJSBuiltin(isolate()->native_context(), info()->closure())) {
return Handle<Code>::null();
}
- ZonePool zone_pool(isolate());
- SmartPointer<PipelineStatistics> pipeline_statistics;
+ ZonePool zone_pool;
+ base::SmartPointer<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats) {
pipeline_statistics.Reset(new PipelineStatistics(info(), &zone_pool));
pipeline_statistics->BeginPhaseKind("initializing");
}
- PipelineData data(&zone_pool, info());
+ if (FLAG_trace_turbo) {
+ FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "w+");
+ if (json_file != nullptr) {
+ OFStream json_of(json_file);
+ Handle<Script> script = info()->script();
+ FunctionLiteral* function = info()->literal();
+ base::SmartArrayPointer<char> function_name = info()->GetDebugName();
+ int pos = info()->shared_info()->start_position();
+ json_of << "{\"function\":\"" << function_name.get()
+ << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ DisallowHeapAllocation no_allocation;
+ int start = function->start_position();
+ int len = function->end_position() - start;
+ String::SubStringRange source(String::cast(script->source()), start,
+ len);
+ for (const auto& c : source) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+ }
+ json_of << "\",\n\"phases\":[";
+ fclose(json_file);
+ }
+ }
+
+ PipelineData data(&zone_pool, info(), pipeline_statistics.get());
this->data_ = &data;
- data.Initialize(pipeline_statistics.get());
BeginPhaseKind("graph creation");
if (FLAG_trace_turbo) {
OFStream os(stdout);
os << "---------------------------------------------------\n"
- << "Begin compiling method " << GetDebugName(info()).get()
+ << "Begin compiling method " << info()->GetDebugName().get()
<< " using Turbofan" << std::endl;
TurboCfgFile tcf(isolate());
tcf << AsC1VCompilation(info());
@@ -790,35 +1113,42 @@
Run<LoopAssignmentAnalysisPhase>();
}
+ if (info()->is_typing_enabled()) {
+ Run<TypeHintAnalysisPhase>();
+ }
+
Run<GraphBuilderPhase>();
if (data.compilation_failed()) return Handle<Code>::null();
RunPrintAndVerify("Initial untyped", true);
- Run<EarlyControlReductionPhase>();
- RunPrintAndVerify("Early Control reduced", true);
-
- if (info()->is_context_specializing()) {
- // Specialize the code to the context as aggressively as possible.
- Run<ContextSpecializerPhase>();
- RunPrintAndVerify("Context specialized", true);
+ // Perform OSR deconstruction.
+ if (info()->is_osr()) {
+ Run<OsrDeconstructionPhase>();
+ RunPrintAndVerify("OSR deconstruction", true);
}
- if (info()->is_inlining_enabled()) {
- Run<InliningPhase>();
- RunPrintAndVerify("Inlined", true);
- }
+ // Perform function context specialization and inlining (if enabled).
+ Run<InliningPhase>();
+ RunPrintAndVerify("Inlined", true);
+
+ // Remove dead->live edges from the graph.
+ Run<EarlyGraphTrimmingPhase>();
+ RunPrintAndVerify("Early trimmed", true);
if (FLAG_print_turbo_replay) {
// Print a replay of the initial graph.
GraphReplayPrinter::PrintReplay(data.graph());
}
- // Bailout here in case target architecture is not supported.
- if (!SupportedTarget()) return Handle<Code>::null();
-
+ base::SmartPointer<Typer> typer;
if (info()->is_typing_enabled()) {
// Type the graph.
- Run<TyperPhase>();
+ typer.Reset(new Typer(isolate(), data.graph(),
+ info()->is_deoptimization_enabled()
+ ? Typer::kDeoptimizationEnabled
+ : Typer::kNoFlags,
+ info()->dependencies()));
+ Run<TyperPhase>(typer.get());
RunPrintAndVerify("Typed");
}
@@ -829,17 +1159,33 @@
Run<TypedLoweringPhase>();
RunPrintAndVerify("Lowered typed");
+ if (FLAG_turbo_stress_loop_peeling) {
+ Run<StressLoopPeelingPhase>();
+ RunPrintAndVerify("Loop peeled");
+ }
+
+ if (FLAG_turbo_escape) {
+ Run<EscapeAnalysisPhase>();
+ RunPrintAndVerify("Escape Analysed");
+ }
+
// Lower simplified operators and insert changes.
Run<SimplifiedLoweringPhase>();
RunPrintAndVerify("Lowered simplified");
+ Run<BranchEliminationPhase>();
+ RunPrintAndVerify("Branch conditions eliminated");
+
+ // Optimize control flow.
+ if (FLAG_turbo_cf_optimization) {
+ Run<ControlFlowOptimizationPhase>();
+ RunPrintAndVerify("Control flow optimized");
+ }
+
// Lower changes that have been inserted before.
Run<ChangeLoweringPhase>();
- // // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+ // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Lowered changes", true);
-
- Run<LateControlReductionPhase>();
- RunPrintAndVerify("Late Control reduced");
}
// Lower any remaining generic JSOperators.
@@ -847,32 +1193,55 @@
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Lowered generic", true);
+ Run<LateGraphTrimmingPhase>();
+ // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+ RunPrintAndVerify("Late trimmed", true);
+
BeginPhaseKind("block building");
data.source_positions()->RemoveDecorator();
- // Compute a schedule.
- Run<ComputeSchedulePhase>();
+ // Kill the Typer and thereby uninstall the decorator (if any).
+ typer.Reset(nullptr);
- {
- // Generate optimized code.
- Linkage linkage(data.instruction_zone(), info());
- GenerateCode(&linkage);
+ return ScheduleAndGenerateCode(
+ Linkage::ComputeIncoming(data.instruction_zone(), info()));
+}
+
+
+Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
+ CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule,
+ Code::Kind kind,
+ const char* debug_name) {
+ CompilationInfo info(debug_name, isolate, graph->zone());
+ info.set_output_code_kind(kind);
+
+ // Construct a pipeline for scheduling and code generation.
+ ZonePool zone_pool;
+ PipelineData data(&zone_pool, &info, graph, schedule);
+ base::SmartPointer<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats) {
+ pipeline_statistics.Reset(new PipelineStatistics(&info, &zone_pool));
+ pipeline_statistics->BeginPhaseKind("stub codegen");
}
- Handle<Code> code = data.code();
- info()->SetCode(code);
- // Print optimized code.
- v8::internal::CodeGenerator::PrintCode(code, info());
+ Pipeline pipeline(&info);
+ pipeline.data_ = &data;
+ DCHECK_NOT_NULL(data.schedule());
if (FLAG_trace_turbo) {
- OFStream os(stdout);
- os << "---------------------------------------------------\n"
- << "Finished compiling method " << GetDebugName(info()).get()
- << " using Turbofan" << std::endl;
+ FILE* json_file = OpenVisualizerLogFile(&info, nullptr, "json", "w+");
+ if (json_file != nullptr) {
+ OFStream json_of(json_file);
+ json_of << "{\"function\":\"" << info.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ fclose(json_file);
+ }
+ pipeline.Run<PrintGraphPhase>("Machine");
}
- return code;
+ return pipeline.ScheduleAndGenerateCode(call_descriptor);
}
@@ -885,70 +1254,53 @@
}
-Handle<Code> Pipeline::GenerateCodeForTesting(CallDescriptor* call_descriptor,
- Graph* graph,
- Schedule* schedule) {
- CompilationInfo info(graph->zone()->isolate(), graph->zone());
- return GenerateCodeForTesting(&info, call_descriptor, graph, schedule);
-}
-
-
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,
Graph* graph,
Schedule* schedule) {
- CHECK(SupportedBackend());
- ZonePool zone_pool(info->isolate());
+ // Construct a pipeline for scheduling and code generation.
+ ZonePool zone_pool;
+ PipelineData data(&zone_pool, info, graph, schedule);
+ base::SmartPointer<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats) {
+ pipeline_statistics.Reset(new PipelineStatistics(info, &zone_pool));
+ pipeline_statistics->BeginPhaseKind("test codegen");
+ }
+
Pipeline pipeline(info);
- PipelineData data(&zone_pool, info);
pipeline.data_ = &data;
- data.InitializeTorTesting(graph, schedule);
- if (schedule == NULL) {
+ if (data.schedule() == nullptr) {
// TODO(rossberg): Should this really be untyped?
pipeline.RunPrintAndVerify("Machine", true);
- pipeline.Run<ComputeSchedulePhase>();
- } else {
- TraceSchedule(schedule);
}
- Linkage linkage(info->zone(), call_descriptor);
- pipeline.GenerateCode(&linkage);
- Handle<Code> code = data.code();
-
-#if ENABLE_DISASSEMBLER
- if (!code.is_null() && FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble("test code", os);
- }
-#endif
- return code;
+ return pipeline.ScheduleAndGenerateCode(call_descriptor);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool run_verifier) {
- CompilationInfo info(sequence->zone()->isolate(), sequence->zone());
- ZonePool zone_pool(sequence->zone()->isolate());
- PipelineData data(&zone_pool, &info);
- data.InitializeTorTesting(sequence);
+ CompilationInfo info("testing", sequence->isolate(), sequence->zone());
+ ZonePool zone_pool;
+ PipelineData data(&zone_pool, &info, sequence);
Pipeline pipeline(&info);
pipeline.data_ = &data;
- pipeline.AllocateRegisters(config, run_verifier);
+ pipeline.AllocateRegisters(config, nullptr, run_verifier);
return !data.compilation_failed();
}
-void Pipeline::GenerateCode(Linkage* linkage) {
+Handle<Code> Pipeline::ScheduleAndGenerateCode(
+ CallDescriptor* call_descriptor) {
PipelineData* data = this->data_;
- DCHECK_NOT_NULL(linkage);
DCHECK_NOT_NULL(data->graph());
- DCHECK_NOT_NULL(data->schedule());
- CHECK(SupportedBackend());
- BasicBlockProfiler::Data* profiler_data = NULL;
+ if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
+ TraceSchedule(data->info(), data->schedule());
+
+ BasicBlockProfiler::Data* profiler_data = nullptr;
if (FLAG_turbo_profiling) {
profiler_data = BasicBlockInstrumentor::Instrument(info(), data->graph(),
data->schedule());
@@ -957,7 +1309,8 @@
data->InitializeInstructionSequence();
// Select and schedule instructions covering the scheduled graph.
- Run<InstructionSelectionPhase>(linkage);
+ Linkage linkage(call_descriptor);
+ Run<InstructionSelectionPhase>(&linkage);
if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
@@ -965,19 +1318,24 @@
data->sequence());
}
+ std::ostringstream source_position_output;
+ if (FLAG_trace_turbo) {
+ // Output source position information before the graph is deleted.
+ data_->source_positions()->Print(source_position_output);
+ }
+
data->DeleteGraphZone();
BeginPhaseKind("register allocation");
- bool run_verifier = false;
-#ifdef DEBUG
- run_verifier = true;
-#endif
+ bool run_verifier = FLAG_turbo_verify_allocation;
// Allocate registers.
- AllocateRegisters(RegisterConfiguration::ArchDefault(), run_verifier);
+ AllocateRegisters(
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
+ call_descriptor, run_verifier);
if (data->compilation_failed()) {
info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
- return;
+ return Handle<Code>();
}
BeginPhaseKind("code generation");
@@ -988,45 +1346,74 @@
}
// Generate final machine code.
- Run<GenerateCodePhase>(linkage);
+ Run<GenerateCodePhase>(&linkage);
- if (profiler_data != NULL) {
+ Handle<Code> code = data->code();
+ if (profiler_data != nullptr) {
#if ENABLE_DISASSEMBLER
std::ostringstream os;
- data->code()->Disassemble(NULL, os);
+ code->Disassemble(nullptr, os);
profiler_data->SetCode(&os);
#endif
}
+
+ info()->SetCode(code);
+ v8::internal::CodeGenerator::PrintCode(code, info());
+
+ if (FLAG_trace_turbo) {
+ FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "a+");
+ if (json_file != nullptr) {
+ OFStream json_of(json_file);
+ json_of
+ << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
+#if ENABLE_DISASSEMBLER
+ std::stringstream disassembly_stream;
+ code->Disassemble(nullptr, disassembly_stream);
+ std::string disassembly_string(disassembly_stream.str());
+ for (const auto& c : disassembly_string) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+#endif // ENABLE_DISASSEMBLER
+ json_of << "\"}\n],\n";
+ json_of << "\"nodePositions\":";
+ json_of << source_position_output.str();
+ json_of << "}";
+ fclose(json_file);
+ }
+ OFStream os(stdout);
+ os << "---------------------------------------------------\n"
+ << "Finished compiling method " << info()->GetDebugName().get()
+ << " using Turbofan" << std::endl;
+ }
+
+ return code;
}
void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
+ CallDescriptor* descriptor,
bool run_verifier) {
PipelineData* data = this->data_;
- int node_count = data->sequence()->VirtualRegisterCount();
- if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
- data->set_compilation_failed();
- return;
- }
-
// Don't track usage for this zone in compiler stats.
- SmartPointer<Zone> verifier_zone;
+ base::SmartPointer<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
if (run_verifier) {
- verifier_zone.Reset(new Zone(info()->isolate()));
+ verifier_zone.Reset(new Zone());
verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
verifier_zone.get(), config, data->sequence());
}
- SmartArrayPointer<char> debug_name;
+ base::SmartArrayPointer<char> debug_name;
#ifdef DEBUG
- debug_name = GetDebugName(data->info());
+ debug_name = info()->GetDebugName();
#endif
- ZonePool::Scope zone_scope(data->zone_pool());
- data->InitializeRegisterAllocator(zone_scope.zone(), config,
- debug_name.get());
+ data->InitializeRegisterAllocationData(config, descriptor, debug_name.get());
+ if (info()->is_osr()) {
+ OsrHelper osr_helper(info());
+ osr_helper.SetupFrame(data->frame());
+ }
Run<MeetRegisterConstraintsPhase>();
Run<ResolvePhisPhase>();
@@ -1038,23 +1425,36 @@
<< printable;
}
if (verifier != nullptr) {
- CHECK(!data->register_allocator()->ExistsUseWithoutDefinition());
+ CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
+ CHECK(data->register_allocation_data()
+ ->RangesDefinedInDeferredStayInDeferred());
}
- Run<AllocateGeneralRegistersPhase>();
- if (!data->register_allocator()->AllocationOk()) {
- data->set_compilation_failed();
- return;
+
+ if (FLAG_turbo_preprocess_ranges) {
+ Run<SplinterLiveRangesPhase>();
}
- Run<AllocateDoubleRegistersPhase>();
- if (!data->register_allocator()->AllocationOk()) {
- data->set_compilation_failed();
- return;
+
+ if (FLAG_turbo_greedy_regalloc) {
+ Run<AllocateGeneralRegistersPhase<GreedyAllocator>>();
+ Run<AllocateDoubleRegistersPhase<GreedyAllocator>>();
+ } else {
+ Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
+ Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
}
- if (FLAG_turbo_reuse_spill_slots) {
- Run<ReuseSpillSlotsPhase>();
+
+ if (FLAG_turbo_preprocess_ranges) {
+ Run<MergeSplintersPhase>();
}
+
+ if (FLAG_turbo_frame_elision) {
+ Run<LocateSpillSlotsPhase>();
+ Run<FrameElisionPhase>();
+ }
+
+ Run<AssignSpillSlotsPhase>();
+
Run<CommitAssignmentPhase>();
- Run<PopulatePointerMapsPhase>();
+ Run<PopulateReferenceMapsPhase>();
Run<ConnectRangesPhase>();
Run<ResolveControlFlowPhase>();
if (FLAG_turbo_move_optimization) {
@@ -1075,18 +1475,11 @@
if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(data->isolate());
- tcf << AsC1VAllocator("CodeGen", data->register_allocator());
+ tcf << AsC1VRegisterAllocationData("CodeGen",
+ data->register_allocation_data());
}
-}
-
-void Pipeline::SetUp() {
- InstructionOperand::SetUpCaches();
-}
-
-
-void Pipeline::TearDown() {
- InstructionOperand::TearDownCaches();
+ data->DeleteRegisterAllocationZone();
}
} // namespace compiler
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index 73053dc..af94018 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -5,24 +5,22 @@
#ifndef V8_COMPILER_PIPELINE_H_
#define V8_COMPILER_PIPELINE_H_
-#include "src/v8.h"
-
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
#include "src/compiler.h"
-// Note: TODO(turbofan) implies a performance improvement opportunity,
-// and TODO(name) implies an incomplete implementation
-
namespace v8 {
namespace internal {
+
+class RegisterConfiguration;
+
namespace compiler {
-// Clients of this interface shouldn't depend on lots of compiler internals.
class CallDescriptor;
class Graph;
class InstructionSequence;
class Linkage;
class PipelineData;
-class RegisterConfiguration;
class Schedule;
class Pipeline {
@@ -32,15 +30,17 @@
// Run the entire pipeline and generate a handle to a code object.
Handle<Code> GenerateCode();
- // Run the pipeline on a machine graph and generate code. If {schedule} is
- // {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
- Graph* graph,
- Schedule* schedule = nullptr);
+ // Run the pipeline on a machine graph and generate code. The {schedule} must
+ // be valid, hence the given {graph} does not need to be schedulable.
+ static Handle<Code> GenerateCodeForCodeStub(Isolate* isolate,
+ CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule,
+ Code::Kind kind,
+ const char* debug_name);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(CallDescriptor* call_descriptor,
+ static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
Graph* graph,
Schedule* schedule = nullptr);
@@ -49,17 +49,14 @@
InstructionSequence* sequence,
bool run_verifier);
- static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
- static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
-
- static void SetUp();
- static void TearDown();
-
- private:
+ // Run the pipeline on a machine graph and generate code. If {schedule} is
+ // {nullptr}, then compute a new schedule for code generation.
static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,
- Graph* graph, Schedule* schedule);
+ Graph* graph,
+ Schedule* schedule = nullptr);
+ private:
CompilationInfo* info_;
PipelineData* data_;
@@ -74,9 +71,9 @@
void BeginPhaseKind(const char* phase_kind);
void RunPrintAndVerify(const char* phase, bool untyped = false);
- void GenerateCode(Linkage* linkage);
+ Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
- bool run_verifier);
+ CallDescriptor* descriptor, bool run_verifier);
};
} // namespace compiler
diff --git a/src/compiler/ppc/OWNERS b/src/compiler/ppc/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/compiler/ppc/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
new file mode 100644
index 0000000..154cd64
--- /dev/null
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -0,0 +1,1868 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/ast/scopes.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/ppc/macro-assembler-ppc.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+#define kScratchReg r11
+
+
+// Adds PPC-specific methods to convert InstructionOperands.
+class PPCOperandConverter final : public InstructionOperandConverter {
+ public:
+ PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ size_t OutputCount() { return instr_->OutputCount(); }
+
+ RCBit OutputRCBit() const {
+ switch (instr_->flags_mode()) {
+ case kFlags_branch:
+ case kFlags_set:
+ return SetRC;
+ case kFlags_none:
+ return LeaveRC;
+ }
+ UNREACHABLE();
+ return LeaveRC;
+ }
+
+ bool CompareLogical() const {
+ switch (instr_->flags_condition()) {
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ Operand InputImmediate(size_t index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kInt64:
+#if V8_TARGET_ARCH_PPC64
+ return Operand(constant.ToInt64());
+#endif
+ case Constant::kExternalReference:
+ case Constant::kHeapObject:
+ case Constant::kRpoNumber:
+ break;
+ }
+ UNREACHABLE();
+ return Operand::Zero();
+ }
+
+ MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
+ const size_t index = *first_index;
+ *mode = AddressingModeField::decode(instr_->opcode());
+ switch (*mode) {
+ case kMode_None:
+ break;
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ return MemOperand(r0);
+ }
+
+ MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
+ return MemoryOperand(mode, &first_index);
+ }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+
+namespace {
+
+class OutOfLineLoadNAN32 final : public OutOfLineCode {
+ public:
+ OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
+ kScratchReg);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadNAN64 final : public OutOfLineCode {
+ public:
+ OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
+ kScratchReg);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadZero final : public OutOfLineCode {
+ public:
+ OutOfLineLoadZero(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final { __ li(result_, Operand::Zero()); }
+
+ private:
+ Register const result_;
+};
+
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ offset_(offset),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ add(scratch1_, object_, offset_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const offset_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
+Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ case kUnsignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ case kUnsignedGreaterThan:
+ return gt;
+ case kOverflow:
+ // Overflow checked for add/sub only.
+ switch (op) {
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Add:
+ case kPPC_Sub:
+ return lt;
+#endif
+ case kPPC_AddWithOverflow32:
+ case kPPC_SubWithOverflow32:
+#if V8_TARGET_ARCH_PPC64
+ return ne;
+#else
+ return lt;
+#endif
+ default:
+ break;
+ }
+ break;
+ case kNotOverflow:
+ switch (op) {
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Add:
+ case kPPC_Sub:
+ return ge;
+#endif
+ case kPPC_AddWithOverflow32:
+ case kPPC_SubWithOverflow32:
+#if V8_TARGET_ARCH_PPC64
+ return eq;
+#else
+ return ge;
+#endif
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+} // namespace
+
+#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1), i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1)); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputImmediate(1)); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), i.OutputRCBit()); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputImmediate(1), i.OutputRCBit()); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), i.OutputRCBit()); \
+ } else { \
+ __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+ i.InputInt32(1), i.OutputRCBit()); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_ADD_WITH_OVERFLOW() \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), kScratchReg, r0); \
+ } else { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputInt32(1), kScratchReg, r0); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW() \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1), kScratchReg, r0); \
+ } else { \
+ __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+ -i.InputInt32(1), kScratchReg, r0); \
+ } \
+ } while (0)
+
+
+#if V8_TARGET_ARCH_PPC64
+#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_BINOP(add, addi); \
+ __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+ } while (0)
+
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_BINOP(sub, subi); \
+ __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+ } while (0)
+#else
+#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
+#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
+#endif
+
+
+#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
+ do { \
+ const CRegister cr = cr0; \
+ if (HasRegisterInput(instr, 1)) { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr); \
+ } else { \
+ __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr); \
+ } \
+ } else { \
+ if (i.CompareLogical()) { \
+ __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
+ } else { \
+ __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
+ } \
+ } \
+ DCHECK_EQ(SetRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
+ do { \
+ const CRegister cr = cr0; \
+ __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
+ DCHECK_EQ(SetRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_MODULO(div_instr, mul_instr) \
+ do { \
+ const Register scratch = kScratchReg; \
+ __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1)); \
+ __ mul_instr(scratch, scratch, i.InputRegister(1)); \
+ __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
+ i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_MODULO() \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
+ 0, 2); \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_MAX(scratch_reg) \
+ do { \
+ __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+ __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_MIN(scratch_reg) \
+ do { \
+ __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+ __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(1), \
+ i.InputDoubleRegister(0)); \
+ } while (0)
+
+
+#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
+ do { \
+ DoubleRegister result = i.OutputDoubleRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_STORE_FLOAT32() \
+ do { \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ DoubleRegister value = i.InputDoubleRegister(index); \
+ __ frsp(kScratchDoubleReg, value); \
+ if (mode == kMode_MRI) { \
+ __ stfs(kScratchDoubleReg, operand); \
+ } else { \
+ __ stfsx(kScratchDoubleReg, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_STORE_DOUBLE() \
+ do { \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ DoubleRegister value = i.InputDoubleRegister(index); \
+ if (mode == kMode_MRI) { \
+ __ stfd(value, operand); \
+ } else { \
+ __ stfdx(value, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Register value = i.InputRegister(index); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
+ do { \
+ DoubleRegister result = i.OutputDoubleRegister(); \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
+ __ bge(ool->entry()); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ __ bind(ool->exit()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Register result = i.OutputRegister(); \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+ __ bge(ool->entry()); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ __ bind(ool->exit()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
+ do { \
+ Label done; \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ __ bge(&done); \
+ DoubleRegister value = i.InputDoubleRegister(3); \
+ __ frsp(kScratchDoubleReg, value); \
+ if (mode == kMode_MRI) { \
+ __ stfs(kScratchDoubleReg, operand); \
+ } else { \
+ __ stfsx(kScratchDoubleReg, operand); \
+ } \
+ __ bind(&done); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
+ do { \
+ Label done; \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ __ bge(&done); \
+ DoubleRegister value = i.InputDoubleRegister(3); \
+ if (mode == kMode_MRI) { \
+ __ stfd(value, operand); \
+ } else { \
+ __ stfdx(value, operand); \
+ } \
+ __ bind(&done); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Label done; \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
+ DCHECK_EQ(kMode_MRR, mode); \
+ Register offset = operand.rb(); \
+ __ extsw(offset, offset); \
+ if (HasRegisterInput(instr, 2)) { \
+ __ cmplw(offset, i.InputRegister(2)); \
+ } else { \
+ __ cmplwi(offset, i.InputImmediate(2)); \
+ } \
+ __ bge(&done); \
+ Register value = i.InputRegister(3); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ __ bind(&done); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ __ LoadP(kConstantPoolRegister,
+ MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ }
+ __ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ mtlr(r0);
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ PPCOperandConverter i(this, instr);
+ ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
+
+ switch (opcode) {
+ case kArchCallCodeObject: {
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ EnsureSpaceForLazyDeopt();
+ if (HasRegisterInput(instr, 0)) {
+ __ addi(ip, i.InputRegister(0),
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Call(ip);
+ } else {
+ __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ }
+ RecordCallPosition(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (HasRegisterInput(instr, 0)) {
+ __ addi(ip, i.InputRegister(0),
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+ } else {
+ // We cannot use the constant pool to load the target since
+ // we've already restored the caller's frame.
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ }
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallJSFunction: {
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ LoadP(kScratchReg,
+ FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, kScratchReg);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
+ RecordCallPosition(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ LoadP(kScratchReg,
+ FieldMemOperand(func, JSFunction::kContextOffset));
+ __ cmp(cp, kScratchReg);
+ __ Assert(eq, kWrongFunctionContext);
+ }
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Jump(ip);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchNop:
+ case kArchThrowTerminator:
+ // don't emit code for nops.
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ break;
+ }
+ case kArchRet:
+ AssembleReturn();
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchStackPointer:
+ __ mr(i.OutputRegister(), sp);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchFramePointer:
+ __ mr(i.OutputRegister(), fp);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchTruncateDoubleToI:
+ // TODO(mbrandy): move slow call to stub out of line.
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register offset = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+ scratch0, scratch1, mode);
+ __ StorePX(value, MemOperand(object, offset));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kPPC_And:
+ if (HasRegisterInput(instr, 1)) {
+ __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ } else {
+ __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ }
+ break;
+ case kPPC_AndComplement:
+ __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_Or:
+ if (HasRegisterInput(instr, 1)) {
+ __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ } else {
+ __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ break;
+ case kPPC_OrComplement:
+ __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_Xor:
+ if (HasRegisterInput(instr, 1)) {
+ __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ } else {
+ __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ break;
+ case kPPC_ShiftLeft32:
+ ASSEMBLE_BINOP_RC(slw, slwi);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ShiftLeft64:
+ ASSEMBLE_BINOP_RC(sld, sldi);
+ break;
+#endif
+ case kPPC_ShiftRight32:
+ ASSEMBLE_BINOP_RC(srw, srwi);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ShiftRight64:
+ ASSEMBLE_BINOP_RC(srd, srdi);
+ break;
+#endif
+ case kPPC_ShiftRightAlg32:
+ ASSEMBLE_BINOP_INT_RC(sraw, srawi);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ShiftRightAlg64:
+ ASSEMBLE_BINOP_INT_RC(srad, sradi);
+ break;
+#endif
+ case kPPC_RotRight32:
+ if (HasRegisterInput(instr, 1)) {
+ __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
+ __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
+ i.OutputRCBit());
+ } else {
+ int sh = i.InputInt32(1);
+ __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
+ }
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_RotRight64:
+ if (HasRegisterInput(instr, 1)) {
+ __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
+ __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
+ i.OutputRCBit());
+ } else {
+ int sh = i.InputInt32(1);
+ __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
+ }
+ break;
+#endif
+ case kPPC_Not:
+ __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
+ break;
+ case kPPC_RotLeftAndMask32:
+ __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_RotLeftAndClear64:
+ __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 63 - i.InputInt32(2), i.OutputRCBit());
+ break;
+ case kPPC_RotLeftAndClearLeft64:
+ __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 63 - i.InputInt32(2), i.OutputRCBit());
+ break;
+ case kPPC_RotLeftAndClearRight64:
+ __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
+ 63 - i.InputInt32(2), i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Add:
+#if V8_TARGET_ARCH_PPC64
+ if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ ASSEMBLE_ADD_WITH_OVERFLOW();
+ } else {
+#endif
+ if (HasRegisterInput(instr, 1)) {
+ __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+#if V8_TARGET_ARCH_PPC64
+ }
+#endif
+ break;
+ case kPPC_AddWithOverflow32:
+ ASSEMBLE_ADD_WITH_OVERFLOW32();
+ break;
+ case kPPC_AddDouble:
+ ASSEMBLE_FLOAT_BINOP_RC(fadd);
+ break;
+ case kPPC_Sub:
+#if V8_TARGET_ARCH_PPC64
+ if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ ASSEMBLE_SUB_WITH_OVERFLOW();
+ } else {
+#endif
+ if (HasRegisterInput(instr, 1)) {
+ __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+#if V8_TARGET_ARCH_PPC64
+ }
+#endif
+ break;
+ case kPPC_SubWithOverflow32:
+ ASSEMBLE_SUB_WITH_OVERFLOW32();
+ break;
+ case kPPC_SubDouble:
+ ASSEMBLE_FLOAT_BINOP_RC(fsub);
+ break;
+ case kPPC_Mul32:
+ __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Mul64:
+ __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_MulHigh32:
+ __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_MulHighU32:
+ __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputRCBit());
+ break;
+ case kPPC_MulDouble:
+ ASSEMBLE_FLOAT_BINOP_RC(fmul);
+ break;
+ case kPPC_Div32:
+ __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Div64:
+ __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_DivU32:
+ __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_DivU64:
+ __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_DivDouble:
+ ASSEMBLE_FLOAT_BINOP_RC(fdiv);
+ break;
+ case kPPC_Mod32:
+ ASSEMBLE_MODULO(divw, mullw);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Mod64:
+ ASSEMBLE_MODULO(divd, mulld);
+ break;
+#endif
+ case kPPC_ModU32:
+ ASSEMBLE_MODULO(divwu, mullw);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ModU64:
+ ASSEMBLE_MODULO(divdu, mulld);
+ break;
+#endif
+ case kPPC_ModDouble:
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ ASSEMBLE_FLOAT_MODULO();
+ break;
+ case kPPC_Neg:
+ __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
+ break;
+ case kPPC_MaxDouble:
+ ASSEMBLE_FLOAT_MAX(kScratchDoubleReg);
+ break;
+ case kPPC_MinDouble:
+ ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
+ break;
+ case kPPC_AbsDouble:
+ ASSEMBLE_FLOAT_UNOP_RC(fabs);
+ break;
+ case kPPC_SqrtDouble:
+ ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
+ break;
+ case kPPC_FloorDouble:
+ ASSEMBLE_FLOAT_UNOP_RC(frim);
+ break;
+ case kPPC_CeilDouble:
+ ASSEMBLE_FLOAT_UNOP_RC(frip);
+ break;
+ case kPPC_TruncateDouble:
+ ASSEMBLE_FLOAT_UNOP_RC(friz);
+ break;
+ case kPPC_RoundDouble:
+ ASSEMBLE_FLOAT_UNOP_RC(frin);
+ break;
+ case kPPC_NegDouble:
+ ASSEMBLE_FLOAT_UNOP_RC(fneg);
+ break;
+ case kPPC_Cntlz32:
+ __ cntlzw_(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Cntlz64:
+ __ cntlzd_(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Popcnt32:
+ __ popcntw(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Popcnt64:
+ __ popcntd(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Cmp32:
+ ASSEMBLE_COMPARE(cmpw, cmplw);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Cmp64:
+ ASSEMBLE_COMPARE(cmp, cmpl);
+ break;
+#endif
+ case kPPC_CmpDouble:
+ ASSEMBLE_FLOAT_COMPARE(fcmpu);
+ break;
+ case kPPC_Tst32:
+ if (HasRegisterInput(instr, 1)) {
+ __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
+ } else {
+ __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
+ }
+#if V8_TARGET_ARCH_PPC64
+ __ extsw(r0, r0, i.OutputRCBit());
+#endif
+ DCHECK_EQ(SetRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Tst64:
+ if (HasRegisterInput(instr, 1)) {
+ __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
+ } else {
+ __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
+ }
+ DCHECK_EQ(SetRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Push:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_PushFrame: {
+ int num_slots = i.InputInt32(1);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ stfdu(i.InputDoubleRegister(0),
+ MemOperand(sp, -num_slots * kPointerSize));
+ } else {
+ __ StorePU(i.InputRegister(0),
+ MemOperand(sp, -num_slots * kPointerSize));
+ }
+ break;
+ }
+ case kPPC_StoreToStackSlot: {
+ int slot = i.InputInt32(1);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ stfd(i.InputDoubleRegister(0), MemOperand(sp, slot * kPointerSize));
+ } else {
+ __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ }
+ break;
+ }
+ case kPPC_ExtendSignWord8:
+ __ extsb(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_ExtendSignWord16:
+ __ extsh(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_ExtendSignWord32:
+ __ extsw(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint32ToUint64:
+ // Zero extend
+ __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Int64ToInt32:
+ __ extsw(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Int64ToFloat32:
+ __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Int64ToDouble:
+ __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint64ToFloat32:
+ __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint64ToDouble:
+ __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Int32ToDouble:
+ __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint32ToDouble:
+ __ ConvertUnsignedIntToDouble(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_DoubleToInt32:
+ case kPPC_DoubleToUint32:
+ case kPPC_DoubleToInt64: {
+#if V8_TARGET_ARCH_PPC64
+ bool check_conversion =
+ (opcode == kPPC_DoubleToInt64 && i.OutputCount() > 1);
+ if (check_conversion) {
+ __ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ }
+#endif
+ __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
+#if !V8_TARGET_ARCH_PPC64
+ kScratchReg,
+#endif
+ i.OutputRegister(0), kScratchDoubleReg);
+#if V8_TARGET_ARCH_PPC64
+ if (check_conversion) {
+ // Set 2nd output to zero if conversion fails.
+ CRegister cr = cr7;
+ int crbit = v8::internal::Assembler::encode_crbit(
+ cr, static_cast<CRBit>(VXCVI % CRWIDTH));
+ __ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ li(i.OutputRegister(1), Operand(1));
+ __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
+ } else {
+ __ li(i.OutputRegister(1), Operand::Zero());
+ __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
+ __ li(i.OutputRegister(1), Operand(1));
+ }
+ }
+#endif
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_DoubleToUint64: {
+ bool check_conversion = (i.OutputCount() > 1);
+ if (check_conversion) {
+ __ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ }
+ __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
+ i.OutputRegister(0), kScratchDoubleReg);
+ if (check_conversion) {
+ // Set 2nd output to zero if conversion fails.
+ CRegister cr = cr7;
+ int crbit = v8::internal::Assembler::encode_crbit(
+ cr, static_cast<CRBit>(VXCVI % CRWIDTH));
+ __ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ li(i.OutputRegister(1), Operand(1));
+ __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
+ } else {
+ __ li(i.OutputRegister(1), Operand::Zero());
+ __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
+ __ li(i.OutputRegister(1), Operand(1));
+ }
+ }
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ }
+#endif
+ case kPPC_DoubleToFloat32:
+ ASSEMBLE_FLOAT_UNOP_RC(frsp);
+ break;
+ case kPPC_Float32ToDouble:
+ // Nothing to do.
+ __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_DoubleExtractLowWord32:
+ __ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_DoubleExtractHighWord32:
+ __ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_DoubleInsertLowWord32:
+ __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_DoubleInsertHighWord32:
+ __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_DoubleConstruct:
+#if V8_TARGET_ARCH_PPC64
+ __ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
+ i.InputRegister(0), i.InputRegister(1), r0);
+#else
+ __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
+ i.InputRegister(1));
+#endif
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_BitcastFloat32ToInt32:
+ __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kPPC_BitcastInt32ToFloat32:
+ __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_BitcastDoubleToInt64:
+ __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kPPC_BitcastInt64ToDouble:
+ __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+#endif
+ case kPPC_LoadWordU8:
+ ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+ break;
+ case kPPC_LoadWordS8:
+ ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
+ __ extsb(i.OutputRegister(), i.OutputRegister());
+ break;
+ case kPPC_LoadWordU16:
+ ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
+ break;
+ case kPPC_LoadWordS16:
+ ASSEMBLE_LOAD_INTEGER(lha, lhax);
+ break;
+ case kPPC_LoadWordS32:
+ ASSEMBLE_LOAD_INTEGER(lwa, lwax);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_LoadWord64:
+ ASSEMBLE_LOAD_INTEGER(ld, ldx);
+ break;
+#endif
+ case kPPC_LoadFloat32:
+ ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
+ break;
+ case kPPC_LoadDouble:
+ ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
+ break;
+ case kPPC_StoreWord8:
+ ASSEMBLE_STORE_INTEGER(stb, stbx);
+ break;
+ case kPPC_StoreWord16:
+ ASSEMBLE_STORE_INTEGER(sth, sthx);
+ break;
+ case kPPC_StoreWord32:
+ ASSEMBLE_STORE_INTEGER(stw, stwx);
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_StoreWord64:
+ ASSEMBLE_STORE_INTEGER(std, stdx);
+ break;
+#endif
+ case kPPC_StoreFloat32:
+ ASSEMBLE_STORE_FLOAT32();
+ break;
+ case kPPC_StoreDouble:
+ ASSEMBLE_STORE_DOUBLE();
+ break;
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
+ __ extsb(i.OutputRegister(), i.OutputRegister());
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
+ break;
+ case kCheckedLoadWord64:
+#if V8_TARGET_ARCH_PPC64
+ ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx);
+#else
+ UNREACHABLE();
+#endif
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
+ break;
+ case kCheckedStoreWord64:
+#if V8_TARGET_ARCH_PPC64
+ ASSEMBLE_CHECKED_STORE_INTEGER(std, stdx);
+#else
+ UNREACHABLE();
+#endif
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT32();
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_DOUBLE();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+} // NOLINT(readability/fn_size)
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ PPCOperandConverter i(this, instr);
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ ArchOpcode op = instr->arch_opcode();
+ FlagsCondition condition = branch->condition;
+ CRegister cr = cr0;
+
+ Condition cond = FlagsConditionToCondition(condition, op);
+ if (op == kPPC_CmpDouble) {
+ // check for unordered if necessary
+ if (cond == le) {
+ __ bunordered(flabel, cr);
+ // Unnecessary for eq/lt since only FU bit will be set.
+ } else if (cond == gt) {
+ __ bunordered(tlabel, cr);
+ // Unnecessary for ne/ge since only FU bit will be set.
+ }
+ }
+ __ b(cond, tlabel, cr);
+ if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
+}
+
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ PPCOperandConverter i(this, instr);
+ Label done;
+ ArchOpcode op = instr->arch_opcode();
+ CRegister cr = cr0;
+ int reg_value = -1;
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ DCHECK_NE(0u, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+
+ Condition cond = FlagsConditionToCondition(condition, op);
+ if (op == kPPC_CmpDouble) {
+ // check for unordered if necessary
+ if (cond == le) {
+ reg_value = 0;
+ __ li(reg, Operand::Zero());
+ __ bunordered(&done, cr);
+ } else if (cond == gt) {
+ reg_value = 1;
+ __ li(reg, Operand(1));
+ __ bunordered(&done, cr);
+ }
+ // Unnecessary for eq/lt & ne/ge since only FU bit will be set.
+ }
+
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ switch (cond) {
+ case eq:
+ case lt:
+ case gt:
+ if (reg_value != 1) __ li(reg, Operand(1));
+ __ li(kScratchReg, Operand::Zero());
+ __ isel(cond, reg, reg, kScratchReg, cr);
+ break;
+ case ne:
+ case ge:
+ case le:
+ if (reg_value != 1) __ li(reg, Operand(1));
+ // r0 implies logical zero in this form
+ __ isel(NegateCondition(cond), reg, r0, reg, cr);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ if (reg_value != 0) __ li(reg, Operand::Zero());
+ __ b(NegateCondition(cond), &done, cr);
+ __ li(reg, Operand(1));
+ }
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ PPCOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ Cmpi(input, Operand(i.InputInt32(index + 0)), r0);
+ __ beq(GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ PPCOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (int32_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ Cmpli(input, Operand(case_count), r0);
+ __ bge(GetLabel(i.InputRpo(1)));
+ __ mov_label_addr(kScratchReg, table);
+ __ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
+ __ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
+ __ Jump(kScratchReg);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, bailout_type);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->IsCFunctionCall()) {
+ __ function_descriptor();
+ __ mflr(r0);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ Push(r0, fp, kConstantPoolRegister);
+ // Adjust FP to point to saved FP.
+ __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ } else {
+ __ Push(r0, fp);
+ __ mr(fp, sp);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+ } else if (frame()->needs_frame()) {
+ if (!ABI_CALL_VIA_IP && info()->output_code_kind() == Code::WASM_FUNCTION) {
+ // TODO(mbrandy): Restrict only to the wasm wrapper case.
+ __ StubPrologue();
+ } else {
+ __ StubPrologue(ip);
+ }
+ } else {
+ frame()->SetElidedFrameSizeInSlots(0);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ if (double_saves != 0) {
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ if (stack_shrink_slots > 0) {
+ __ Add(sp, sp, -stack_shrink_slots * kPointerSize, r0);
+ }
+
+ // Save callee-saved Double registers.
+ if (double_saves != 0) {
+ __ MultiPushDoubles(double_saves);
+ DCHECK(kNumCalleeSavedDoubles ==
+ base::bits::CountPopulation32(double_saves));
+ frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+ (kDoubleSize / kPointerSize));
+ }
+
+ // Save callee-saved registers.
+ const RegList saves =
+ FLAG_enable_embedded_constant_pool
+ ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPush(saves);
+ // register save area does not include the fp or constant pool pointer.
+ const int num_saves =
+ kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
+ DCHECK(num_saves == base::bits::CountPopulation32(saves));
+ frame()->AllocateSavedCalleeRegisterSlots(num_saves);
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+
+ // Restore registers.
+ const RegList saves =
+ FLAG_enable_embedded_constant_pool
+ ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore double registers.
+ const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+ if (double_saves != 0) {
+ __ MultiPopDoubles(double_saves);
+ }
+
+ if (descriptor->IsCFunctionCall()) {
+ __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+ } else if (frame()->needs_frame()) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+ }
+ } else {
+ __ Drop(pop_count);
+ }
+ __ Ret();
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ PPCOperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Move(g.ToRegister(destination), src);
+ } else {
+ __ StoreP(src, g.ToMemOperand(destination), r0);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ LoadP(g.ToRegister(destination), src, r0);
+ } else {
+ Register temp = kScratchReg;
+ __ LoadP(temp, src, r0);
+ __ StoreP(temp, g.ToMemOperand(destination), r0);
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ mov(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kInt64:
+ __ mov(dst, Operand(src.ToInt64()));
+ break;
+ case Constant::kFloat32:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ break;
+ case Constant::kFloat64:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ break;
+ case Constant::kExternalReference:
+ __ mov(dst, Operand(src.ToExternalReference()));
+ break;
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ int offset;
+ if (IsMaterializableFromFrame(src_object, &offset)) {
+ __ LoadP(dst, MemOperand(fp, offset));
+ } else if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Move(dst, src_object);
+ }
+ break;
+ }
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
+ break;
+ }
+ if (destination->IsStackSlot()) {
+ __ StoreP(dst, g.ToMemOperand(destination), r0);
+ }
+ } else {
+ DoubleRegister dst = destination->IsDoubleRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+ : src.ToFloat64();
+ __ LoadDoubleLiteral(dst, value, kScratchReg);
+ if (destination->IsDoubleStackSlot()) {
+ __ StoreDouble(dst, g.ToMemOperand(destination), r0);
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DoubleRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ StoreDouble(src, g.ToMemOperand(destination), r0);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
+ } else {
+ DoubleRegister temp = kScratchDoubleReg;
+ __ LoadDouble(temp, src, r0);
+ __ StoreDouble(temp, g.ToMemOperand(destination), r0);
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ PPCOperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mr(temp, src);
+ __ mr(src, dst);
+ __ mr(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mr(temp, src);
+ __ LoadP(src, dst);
+ __ StoreP(temp, dst);
+ }
+#if V8_TARGET_ARCH_PPC64
+ } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+#else
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+#endif
+ Register temp_0 = kScratchReg;
+ Register temp_1 = r0;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ LoadP(temp_0, src);
+ __ LoadP(temp_1, dst);
+ __ StoreP(temp_0, dst);
+ __ StoreP(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister temp = kScratchDoubleReg;
+ DoubleRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DoubleRegister dst = g.ToDoubleRegister(destination);
+ __ fmr(temp, src);
+ __ fmr(src, dst);
+ __ fmr(dst, temp);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ fmr(temp, src);
+ __ lfd(src, dst);
+ __ stfd(temp, dst);
+ }
+#if !V8_TARGET_ARCH_PPC64
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ DoubleRegister temp_0 = kScratchDoubleReg;
+ DoubleRegister temp_1 = d0;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ lfd(temp_0, src);
+ __ lfd(temp_1, dst);
+ __ stfd(temp_0, dst);
+ __ stfd(temp_1, src);
+#endif
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ emit_label_addr(targets[index]);
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+ // We do not insert nops for inlined Smi code.
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
+ int space_needed = Deoptimizer::patch_size();
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block tramoline pool emission for duration of padding.
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
+ }
+ }
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
new file mode 100644
index 0000000..a3bf80e
--- /dev/null
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -0,0 +1,139 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
+#define V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// PPC-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(PPC_And) \
+ V(PPC_AndComplement) \
+ V(PPC_Or) \
+ V(PPC_OrComplement) \
+ V(PPC_Xor) \
+ V(PPC_ShiftLeft32) \
+ V(PPC_ShiftLeft64) \
+ V(PPC_ShiftRight32) \
+ V(PPC_ShiftRight64) \
+ V(PPC_ShiftRightAlg32) \
+ V(PPC_ShiftRightAlg64) \
+ V(PPC_RotRight32) \
+ V(PPC_RotRight64) \
+ V(PPC_Not) \
+ V(PPC_RotLeftAndMask32) \
+ V(PPC_RotLeftAndClear64) \
+ V(PPC_RotLeftAndClearLeft64) \
+ V(PPC_RotLeftAndClearRight64) \
+ V(PPC_Add) \
+ V(PPC_AddWithOverflow32) \
+ V(PPC_AddDouble) \
+ V(PPC_Sub) \
+ V(PPC_SubWithOverflow32) \
+ V(PPC_SubDouble) \
+ V(PPC_Mul32) \
+ V(PPC_Mul64) \
+ V(PPC_MulHigh32) \
+ V(PPC_MulHighU32) \
+ V(PPC_MulDouble) \
+ V(PPC_Div32) \
+ V(PPC_Div64) \
+ V(PPC_DivU32) \
+ V(PPC_DivU64) \
+ V(PPC_DivDouble) \
+ V(PPC_Mod32) \
+ V(PPC_Mod64) \
+ V(PPC_ModU32) \
+ V(PPC_ModU64) \
+ V(PPC_ModDouble) \
+ V(PPC_Neg) \
+ V(PPC_NegDouble) \
+ V(PPC_SqrtDouble) \
+ V(PPC_FloorDouble) \
+ V(PPC_CeilDouble) \
+ V(PPC_TruncateDouble) \
+ V(PPC_RoundDouble) \
+ V(PPC_MaxDouble) \
+ V(PPC_MinDouble) \
+ V(PPC_AbsDouble) \
+ V(PPC_Cntlz32) \
+ V(PPC_Cntlz64) \
+ V(PPC_Popcnt32) \
+ V(PPC_Popcnt64) \
+ V(PPC_Cmp32) \
+ V(PPC_Cmp64) \
+ V(PPC_CmpDouble) \
+ V(PPC_Tst32) \
+ V(PPC_Tst64) \
+ V(PPC_Push) \
+ V(PPC_PushFrame) \
+ V(PPC_StoreToStackSlot) \
+ V(PPC_ExtendSignWord8) \
+ V(PPC_ExtendSignWord16) \
+ V(PPC_ExtendSignWord32) \
+ V(PPC_Uint32ToUint64) \
+ V(PPC_Int64ToInt32) \
+ V(PPC_Int64ToFloat32) \
+ V(PPC_Int64ToDouble) \
+ V(PPC_Uint64ToFloat32) \
+ V(PPC_Uint64ToDouble) \
+ V(PPC_Int32ToDouble) \
+ V(PPC_Uint32ToDouble) \
+ V(PPC_Float32ToDouble) \
+ V(PPC_DoubleToInt32) \
+ V(PPC_DoubleToUint32) \
+ V(PPC_DoubleToInt64) \
+ V(PPC_DoubleToUint64) \
+ V(PPC_DoubleToFloat32) \
+ V(PPC_DoubleExtractLowWord32) \
+ V(PPC_DoubleExtractHighWord32) \
+ V(PPC_DoubleInsertLowWord32) \
+ V(PPC_DoubleInsertHighWord32) \
+ V(PPC_DoubleConstruct) \
+ V(PPC_BitcastInt32ToFloat32) \
+ V(PPC_BitcastFloat32ToInt32) \
+ V(PPC_BitcastInt64ToDouble) \
+ V(PPC_BitcastDoubleToInt64) \
+ V(PPC_LoadWordS8) \
+ V(PPC_LoadWordU8) \
+ V(PPC_LoadWordS16) \
+ V(PPC_LoadWordU16) \
+ V(PPC_LoadWordS32) \
+ V(PPC_LoadWord64) \
+ V(PPC_LoadFloat32) \
+ V(PPC_LoadDouble) \
+ V(PPC_StoreWord8) \
+ V(PPC_StoreWord16) \
+ V(PPC_StoreWord32) \
+ V(PPC_StoreWord64) \
+ V(PPC_StoreFloat32) \
+ V(PPC_StoreDouble)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
new file mode 100644
index 0000000..fc90cdd
--- /dev/null
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -0,0 +1,143 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kPPC_And:
+ case kPPC_AndComplement:
+ case kPPC_Or:
+ case kPPC_OrComplement:
+ case kPPC_Xor:
+ case kPPC_ShiftLeft32:
+ case kPPC_ShiftLeft64:
+ case kPPC_ShiftRight32:
+ case kPPC_ShiftRight64:
+ case kPPC_ShiftRightAlg32:
+ case kPPC_ShiftRightAlg64:
+ case kPPC_RotRight32:
+ case kPPC_RotRight64:
+ case kPPC_Not:
+ case kPPC_RotLeftAndMask32:
+ case kPPC_RotLeftAndClear64:
+ case kPPC_RotLeftAndClearLeft64:
+ case kPPC_RotLeftAndClearRight64:
+ case kPPC_Add:
+ case kPPC_AddWithOverflow32:
+ case kPPC_AddDouble:
+ case kPPC_Sub:
+ case kPPC_SubWithOverflow32:
+ case kPPC_SubDouble:
+ case kPPC_Mul32:
+ case kPPC_Mul64:
+ case kPPC_MulHigh32:
+ case kPPC_MulHighU32:
+ case kPPC_MulDouble:
+ case kPPC_Div32:
+ case kPPC_Div64:
+ case kPPC_DivU32:
+ case kPPC_DivU64:
+ case kPPC_DivDouble:
+ case kPPC_Mod32:
+ case kPPC_Mod64:
+ case kPPC_ModU32:
+ case kPPC_ModU64:
+ case kPPC_ModDouble:
+ case kPPC_Neg:
+ case kPPC_NegDouble:
+ case kPPC_SqrtDouble:
+ case kPPC_FloorDouble:
+ case kPPC_CeilDouble:
+ case kPPC_TruncateDouble:
+ case kPPC_RoundDouble:
+ case kPPC_MaxDouble:
+ case kPPC_MinDouble:
+ case kPPC_AbsDouble:
+ case kPPC_Cntlz32:
+ case kPPC_Cntlz64:
+ case kPPC_Popcnt32:
+ case kPPC_Popcnt64:
+ case kPPC_Cmp32:
+ case kPPC_Cmp64:
+ case kPPC_CmpDouble:
+ case kPPC_Tst32:
+ case kPPC_Tst64:
+ case kPPC_ExtendSignWord8:
+ case kPPC_ExtendSignWord16:
+ case kPPC_ExtendSignWord32:
+ case kPPC_Uint32ToUint64:
+ case kPPC_Int64ToInt32:
+ case kPPC_Int64ToFloat32:
+ case kPPC_Int64ToDouble:
+ case kPPC_Uint64ToFloat32:
+ case kPPC_Uint64ToDouble:
+ case kPPC_Int32ToDouble:
+ case kPPC_Uint32ToDouble:
+ case kPPC_Float32ToDouble:
+ case kPPC_DoubleToInt32:
+ case kPPC_DoubleToUint32:
+ case kPPC_DoubleToInt64:
+ case kPPC_DoubleToUint64:
+ case kPPC_DoubleToFloat32:
+ case kPPC_DoubleExtractLowWord32:
+ case kPPC_DoubleExtractHighWord32:
+ case kPPC_DoubleInsertLowWord32:
+ case kPPC_DoubleInsertHighWord32:
+ case kPPC_DoubleConstruct:
+ case kPPC_BitcastInt32ToFloat32:
+ case kPPC_BitcastFloat32ToInt32:
+ case kPPC_BitcastInt64ToDouble:
+ case kPPC_BitcastDoubleToInt64:
+ return kNoOpcodeFlags;
+
+ case kPPC_LoadWordS8:
+ case kPPC_LoadWordU8:
+ case kPPC_LoadWordS16:
+ case kPPC_LoadWordU16:
+ case kPPC_LoadWordS32:
+ case kPPC_LoadWord64:
+ case kPPC_LoadFloat32:
+ case kPPC_LoadDouble:
+ return kIsLoadOperation;
+
+ case kPPC_StoreWord8:
+ case kPPC_StoreWord16:
+ case kPPC_StoreWord32:
+ case kPPC_StoreWord64:
+ case kPPC_StoreFloat32:
+ case kPPC_StoreDouble:
+ case kPPC_Push:
+ case kPPC_PushFrame:
+ case kPPC_StoreToStackSlot:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
new file mode 100644
index 0000000..f6ebbdf
--- /dev/null
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -0,0 +1,1772 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/adapters.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/ppc/frames-ppc.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+ kInt16Imm,
+ kInt16Imm_Unsigned,
+ kInt16Imm_Negate,
+ kInt16Imm_4ByteAligned,
+ kShift32Imm,
+ kShift64Imm,
+ kNoImmediate
+};
+
+
+// Adds PPC-specific methods for generating operands.
+class PPCOperandGenerator final : public OperandGenerator {
+ public:
+ explicit PPCOperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
+ if (CanBeImmediate(node, mode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, ImmediateMode mode) {
+ int64_t value;
+ if (node->opcode() == IrOpcode::kInt32Constant)
+ value = OpParameter<int32_t>(node);
+ else if (node->opcode() == IrOpcode::kInt64Constant)
+ value = OpParameter<int64_t>(node);
+ else
+ return false;
+ return CanBeImmediate(value, mode);
+ }
+
+ bool CanBeImmediate(int64_t value, ImmediateMode mode) {
+ switch (mode) {
+ case kInt16Imm:
+ return is_int16(value);
+ case kInt16Imm_Unsigned:
+ return is_uint16(value);
+ case kInt16Imm_Negate:
+ return is_int16(-value);
+ case kInt16Imm_4ByteAligned:
+ return is_int16(value) && !(value & 3);
+ case kShift32Imm:
+ return 0 <= value && value < 32;
+ case kShift64Imm:
+ return 0 <= value && value < 64;
+ case kNoImmediate:
+ return false;
+ }
+ return false;
+ }
+};
+
+
+namespace {
+
+void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+
+void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+ ImmediateMode operand_mode) {
+ PPCOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ PPCOperandGenerator g(selector);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ selector->Emit(opcode, output_count, outputs, 1, inputs);
+}
+#endif
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, ImmediateMode operand_mode,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ Matcher m(node);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
+}
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+ ImmediateMode operand_mode) {
+ FlagsContinuation cont;
+ VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
+}
+
+} // namespace
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ ImmediateMode mode = kInt16Imm;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kPPC_LoadFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kPPC_LoadDouble;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
+ break;
+#if !V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kTagged: // Fall through.
+#endif
+ case MachineRepresentation::kWord32:
+ opcode = kPPC_LoadWordS32;
+#if V8_TARGET_ARCH_PPC64
+ // TODO(mbrandy): this applies to signed loads only (lwa)
+ mode = kInt16Imm_4ByteAligned;
+#endif
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kPPC_LoadWord64;
+ mode = kInt16Imm_4ByteAligned;
+ break;
+#else
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(offset, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
+ } else if (g.CanBeImmediate(base, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
+ }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ // TODO(ppc): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(offset);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode = kArchNop;
+ ImmediateMode mode = kInt16Imm;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kPPC_StoreFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kPPC_StoreDouble;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kPPC_StoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kPPC_StoreWord16;
+ break;
+#if !V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kTagged: // Fall through.
+#endif
+ case MachineRepresentation::kWord32:
+ opcode = kPPC_StoreWord32;
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kPPC_StoreWord64;
+ mode = kInt16Imm_4ByteAligned;
+ break;
+#else
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(offset, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
+ } else if (g.CanBeImmediate(base, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+ }
+ }
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
+ PPCOperandGenerator g(this);
+ Node* const base = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedLoadWord64;
+ break;
+#endif
+ case MachineRepresentation::kFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+#if !V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressingMode = kMode_MRR;
+ Emit(opcode | AddressingModeField::encode(addressingMode),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
+ g.UseOperand(length, kInt16Imm_Unsigned));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
+ PPCOperandGenerator g(this);
+ Node* const base = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedStoreWord64;
+ break;
+#endif
+ case MachineRepresentation::kFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+#if !V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressingMode = kMode_MRR;
+ Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(offset),
+ g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
+}
+
+
+template <typename Matcher>
+static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
+ ArchOpcode opcode, bool left_can_cover,
+ bool right_can_cover, ImmediateMode imm_mode) {
+ PPCOperandGenerator g(selector);
+
+ // Map instruction to equivalent operation with inverted right input.
+ ArchOpcode inv_opcode = opcode;
+ switch (opcode) {
+ case kPPC_And:
+ inv_opcode = kPPC_AndComplement;
+ break;
+ case kPPC_Or:
+ inv_opcode = kPPC_OrComplement;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
+ if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
+ Matcher mleft(m->left().node());
+ if (mleft.right().Is(-1)) {
+ selector->Emit(inv_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m->right().node()),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+
+ // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
+ if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
+ right_can_cover) {
+ Matcher mright(m->right().node());
+ if (mright.right().Is(-1)) {
+ // TODO(all): support shifted operand on right.
+ selector->Emit(inv_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m->left().node()),
+ g.UseRegister(mright.left().node()));
+ return;
+ }
+ }
+
+ VisitBinop<Matcher>(selector, node, opcode, imm_mode);
+}
+
+
+static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
+ int mask_width = base::bits::CountPopulation32(value);
+ int mask_msb = base::bits::CountLeadingZeros32(value);
+ int mask_lsb = base::bits::CountTrailingZeros32(value);
+ if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
+ return false;
+ *mb = mask_lsb + mask_width - 1;
+ *me = mask_lsb;
+ return true;
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
+ int mask_width = base::bits::CountPopulation64(value);
+ int mask_msb = base::bits::CountLeadingZeros64(value);
+ int mask_lsb = base::bits::CountTrailingZeros64(value);
+ if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
+ return false;
+ *mb = mask_lsb + mask_width - 1;
+ *me = mask_lsb;
+ return true;
+}
+#endif
+
+
+// TODO(mbrandy): Absorb rotate-right into rlwinm?
+void InstructionSelector::VisitWord32And(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ int mb = 0;
+ int me = 0;
+ if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
+ int sh = 0;
+ Node* left = m.left().node();
+ if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
+ CanCover(node, left)) {
+ // Try to absorb left/right shift into rlwinm
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 31)) {
+ left = mleft.left().node();
+ sh = mleft.right().Value();
+ if (m.left().IsWord32Shr()) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 31 - sh) mb = 31 - sh;
+ sh = (32 - sh) & 0x1f;
+ } else {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ }
+ }
+ }
+ if (mb >= me) {
+ Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node), g.UseRegister(left),
+ g.TempImmediate(sh), g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kPPC_And, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// TODO(mbrandy): Absorb rotate-right into rldic?
+void InstructionSelector::VisitWord64And(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ int mb = 0;
+ int me = 0;
+ if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+ int sh = 0;
+ Node* left = m.left().node();
+ if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
+ CanCover(node, left)) {
+ // Try to absorb left/right shift into rldic
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 63)) {
+ left = mleft.left().node();
+ sh = mleft.right().Value();
+ if (m.left().IsWord64Shr()) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 63 - sh) mb = 63 - sh;
+ sh = (64 - sh) & 0x3f;
+ } else {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ }
+ }
+ }
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearRight64;
+ mask = me;
+ } else if (sh && me <= sh && m.left().IsWord64Shl()) {
+ match = true;
+ opcode = kPPC_RotLeftAndClear64;
+ mask = mb;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+ g.TempImmediate(sh), g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kPPC_And, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ Int32BinopMatcher m(node);
+ VisitLogical<Int32BinopMatcher>(
+ this, node, &m, kPPC_Or, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Or(Node* node) {
+ Int64BinopMatcher m(node);
+ VisitLogical<Int64BinopMatcher>(
+ this, node, &m, kPPC_Or, CanCover(node, m.left().node()),
+ CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Xor, kInt16Imm_Unsigned);
+ }
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Xor, kInt16Imm_Unsigned);
+ }
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+ // Try to absorb logical-and into rlwinm
+ Int32BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ if (mb >= me) {
+ Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kPPC_ShiftLeft32, node, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ // TODO(mbrandy): eliminate left sign extension if right >= 32
+ if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+ // Try to absorb logical-and into rldic
+ Int64BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (me < sh) me = sh;
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearRight64;
+ mask = me;
+ } else if (sh && me <= sh) {
+ match = true;
+ opcode = kPPC_RotLeftAndClear64;
+ mask = mb;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kPPC_ShiftLeft64, node, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+ // Try to absorb logical-and into rlwinm
+ Int32BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 31 - sh) mb = 31 - sh;
+ sh = (32 - sh) & 0x1f;
+ if (mb >= me) {
+ Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mb), g.TempImmediate(me));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kPPC_ShiftRight32, node, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+ // Try to absorb logical-and into rldic
+ Int64BinopMatcher mleft(m.left().node());
+ int sh = m.right().Value();
+ int mb;
+ int me;
+ if (mleft.right().HasValue() &&
+ IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+ // Adjust the mask such that it doesn't include any rotated bits.
+ if (mb > 63 - sh) mb = 63 - sh;
+ sh = (64 - sh) & 0x3f;
+ if (mb >= me) {
+ bool match = false;
+ ArchOpcode opcode;
+ int mask;
+ if (me == 0) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearLeft64;
+ mask = mb;
+ } else if (mb == 63) {
+ match = true;
+ opcode = kPPC_RotLeftAndClearRight64;
+ mask = me;
+ }
+ if (match) {
+ Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+ g.TempImmediate(mask));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kPPC_ShiftRight64, node, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ // Replace with sign extension for (x << K) >> K where K is 16 or 24.
+ if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(16) && m.right().Is(16)) {
+ Emit(kPPC_ExtendSignWord16, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if (mleft.right().Is(24) && m.right().Is(24)) {
+ Emit(kPPC_ExtendSignWord8, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ }
+ }
+ VisitRRO(this, kPPC_ShiftRightAlg32, node, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ VisitRRO(this, kPPC_ShiftRightAlg64, node, kShift64Imm);
+}
+#endif
+
+
+// TODO(mbrandy): Absorb logical-and into rlwinm?
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kPPC_RotRight32, node, kShift32Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// TODO(mbrandy): Absorb logical-and into rldic?
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kPPC_RotRight64, node, kShift64Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Cntlz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Cntlz64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Popcnt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Popcnt64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+#endif
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Add(Node* node) {
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ PPCOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate);
+ }
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ PPCOperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate);
+ }
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ VisitRRR(this, kPPC_Mul32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ VisitRRR(this, kPPC_Mul64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_MulHigh32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_MulHighU32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitRRR(this, kPPC_Div32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Div(Node* node) {
+ VisitRRR(this, kPPC_Div64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ VisitRRR(this, kPPC_DivU32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitUint64Div(Node* node) {
+ VisitRRR(this, kPPC_DivU64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitRRR(this, kPPC_Mod32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ VisitRRR(this, kPPC_Mod64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ VisitRRR(this, kPPC_ModU32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ VisitRRR(this, kPPC_ModU64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ VisitRR(this, kPPC_Float32ToDouble, node);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ VisitRR(this, kPPC_Int32ToDouble, node);
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ VisitRR(this, kPPC_Uint32ToDouble, node);
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ VisitRR(this, kPPC_DoubleToInt32, node);
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kPPC_DoubleToUint32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_ExtendSignWord32, node);
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_Uint32ToUint64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ VisitRR(this, kPPC_DoubleToFloat32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kPPC_DoubleToInt32, node);
+ }
+ UNREACHABLE();
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ // TODO(mbrandy): inspect input to see if nop is appropriate.
+ VisitRR(this, kPPC_Int64ToInt32, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kPPC_Int64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kPPC_Int64ToDouble, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kPPC_Uint64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kPPC_Uint64ToDouble, node);
+}
+#endif
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kPPC_BitcastFloat32ToInt32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kPPC_BitcastDoubleToInt64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ VisitRR(this, kPPC_BitcastInt32ToFloat32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kPPC_BitcastInt64ToDouble, node);
+}
+#endif
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kPPC_AddDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ // TODO(mbrandy): detect multiply-add
+ VisitRRR(this, kPPC_AddDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ PPCOperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ Emit(kPPC_NegDouble, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ VisitRRR(this, kPPC_SubDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ // TODO(mbrandy): detect multiply-subtract
+ PPCOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ if (m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ // -floor(-x) = ceil(x)
+ Emit(kPPC_CeilDouble, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
+ Emit(kPPC_NegDouble, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+ VisitRRR(this, kPPC_SubDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kPPC_MulDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ // TODO(mbrandy): detect negate
+ VisitRRR(this, kPPC_MulDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kPPC_DivDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRR(this, kPPC_DivDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_ModDouble, g.DefineAsFixed(node, d1),
+ g.UseFixed(node->InputAt(0), d1),
+ g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kPPC_AbsDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kPPC_AbsDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kPPC_SqrtDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kPPC_SqrtDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kPPC_FloorDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kPPC_FloorDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kPPC_CeilDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kPPC_CeilDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kPPC_TruncateDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRR(this, kPPC_TruncateDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ VisitRR(this, kPPC_RoundDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32,
+ kInt16Imm, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32, kInt16Imm,
+ &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
+ kInt16Imm_Negate, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
+ kInt16Imm_Negate, &cont);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate, &cont);
+}
+#endif
+
+
+static bool CompareLogical(FlagsContinuation* cont) {
+ switch (cont->condition()) {
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ return true;
+ default:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+namespace {
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative, ImmediateMode immediate_mode) {
+ PPCOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, immediate_mode)) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else if (g.CanBeImmediate(left, immediate_mode)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+ VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+ VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode);
+}
+#endif
+
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left),
+ g.UseRegister(right), cont);
+}
+
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left),
+ g.UseRegister(right), cont);
+}
+
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+ Node* value, InstructionCode opcode,
+ FlagsContinuation* cont) {
+ while (selector->CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(selector, value, cont);
+ }
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
+#if V8_TARGET_ARCH_PPC64
+ case IrOpcode::kWord64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
+#endif
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either nullptr, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || selector->IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(
+ selector, node, kPPC_AddWithOverflow32, kInt16Imm, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(selector, node,
+ kPPC_SubWithOverflow32,
+ kInt16Imm_Negate, cont);
+#if V8_TARGET_ARCH_PPC64
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add,
+ kInt16Imm, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Sub,
+ kInt16Imm_Negate, cont);
+#endif
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kInt32Sub:
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kWord32And:
+ // TODO(mbandy): opportunity for rlwinm?
+ return VisitWordCompare(selector, value, kPPC_Tst32, cont, true,
+ kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt32Add:
+// case IrOpcode::kWord32Or:
+// case IrOpcode::kWord32Xor:
+// case IrOpcode::kWord32Sar:
+// case IrOpcode::kWord32Shl:
+// case IrOpcode::kWord32Shr:
+// case IrOpcode::kWord32Ror:
+#if V8_TARGET_ARCH_PPC64
+ case IrOpcode::kInt64Sub:
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kWord64And:
+ // TODO(mbandy): opportunity for rldic?
+ return VisitWordCompare(selector, value, kPPC_Tst64, cont, true,
+ kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt64Add:
+// case IrOpcode::kWord64Or:
+// case IrOpcode::kWord64Xor:
+// case IrOpcode::kWord64Sar:
+// case IrOpcode::kWord64Shl:
+// case IrOpcode::kWord64Shr:
+// case IrOpcode::kWord64Ror:
+#endif
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ PPCOperandGenerator g(selector);
+ VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
+ cont);
+}
+
+
+void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ VisitWordCompareZero(selector, user, value, kPPC_Cmp32, cont);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ VisitWordCompareZero(selector, user, value, kPPC_Cmp64, cont);
+}
+#endif
+
+} // namespace
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+ VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ PPCOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kPPC_Sub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
+ }
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
+ }
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+#endif
+
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ PPCOperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kStackFrameExtraParamSlot;
+ for (PushParameter input : (*arguments)) {
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(slot));
+ ++slot;
+ }
+ } else {
+ // Push any stack arguments.
+ int num_slots = static_cast<int>(descriptor->StackParameterCount());
+ int slot = 0;
+ for (PushParameter input : (*arguments)) {
+ if (slot == 0) {
+ DCHECK(input.node());
+ Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(num_slots));
+ } else {
+ // Skip any alignment holes in pushed nodes.
+ if (input.node()) {
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(slot));
+ }
+ }
+ ++slot;
+ }
+ }
+}
+
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_DoubleExtractLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_DoubleExtractHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ PPCOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+ CanCover(node, left)) {
+ left = left->InputAt(1);
+ Emit(kPPC_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
+ g.UseRegister(right));
+ return;
+ }
+ Emit(kPPC_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ PPCOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+ CanCover(node, left)) {
+ left = left->InputAt(1);
+ Emit(kPPC_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
+ g.UseRegister(left));
+ return;
+ }
+ Emit(kPPC_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt;
+ // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index b93ec66..4df2bde 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -2,38 +2,37 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/code-factory.h"
-#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/pipeline.h"
#include "src/compiler/scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
-RawMachineAssembler::RawMachineAssembler(Graph* graph,
- MachineSignature* machine_sig,
- MachineType word,
+RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
+ CallDescriptor* call_descriptor,
+ MachineRepresentation word,
MachineOperatorBuilder::Flags flags)
- : GraphBuilder(graph),
+ : isolate_(isolate),
+ graph_(graph),
schedule_(new (zone()) Schedule(zone())),
machine_(zone(), word, flags),
common_(zone()),
- machine_sig_(machine_sig),
- call_descriptor_(
- Linkage::GetSimplifiedCDescriptor(graph->zone(), machine_sig)),
- parameters_(NULL),
- exit_label_(schedule()->end()),
+ call_descriptor_(call_descriptor),
+ parameters_(parameter_count(), zone()),
current_block_(schedule()->start()) {
int param_count = static_cast<int>(parameter_count());
- Node* s = graph->NewNode(common_.Start(param_count));
- graph->SetStart(s);
- if (parameter_count() == 0) return;
- parameters_ = zone()->NewArray<Node*>(param_count);
+ // Add an extra input for the JSFunction parameter to the start node.
+ graph->SetStart(graph->NewNode(common_.Start(param_count + 1)));
for (size_t i = 0; i < parameter_count(); ++i) {
parameters_[i] =
- NewNode(common()->Parameter(static_cast<int>(i)), graph->start());
+ AddNode(common()->Parameter(static_cast<int>(i)), graph->start());
}
+ graph->SetEnd(graph->NewNode(common_.End(0)));
}
@@ -41,9 +40,9 @@
// Compute the correct codegen order.
DCHECK(schedule_->rpo_order()->empty());
Scheduler::ComputeSpecialRPO(zone(), schedule_);
- // Invalidate MachineAssembler.
+ // Invalidate RawMachineAssembler.
Schedule* schedule = schedule_;
- schedule_ = NULL;
+ schedule_ = nullptr;
return schedule;
}
@@ -54,94 +53,298 @@
}
-RawMachineAssembler::Label* RawMachineAssembler::Exit() {
- exit_label_.used_ = true;
- return &exit_label_;
-}
-
-
-void RawMachineAssembler::Goto(Label* label) {
+void RawMachineAssembler::Goto(RawMachineLabel* label) {
DCHECK(current_block_ != schedule()->end());
schedule()->AddGoto(CurrentBlock(), Use(label));
- current_block_ = NULL;
+ current_block_ = nullptr;
}
-void RawMachineAssembler::Branch(Node* condition, Label* true_val,
- Label* false_val) {
+void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
+ RawMachineLabel* false_val) {
DCHECK(current_block_ != schedule()->end());
- Node* branch = NewNode(common()->Branch(), condition);
+ Node* branch = AddNode(common()->Branch(), condition);
schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
- current_block_ = NULL;
+ current_block_ = nullptr;
+}
+
+
+void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
+ int32_t* case_values,
+ RawMachineLabel** case_labels,
+ size_t case_count) {
+ DCHECK_NE(schedule()->end(), current_block_);
+ size_t succ_count = case_count + 1;
+ Node* switch_node = AddNode(common()->Switch(succ_count), index);
+ BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t case_value = case_values[index];
+ BasicBlock* case_block = Use(case_labels[index]);
+ Node* case_node =
+ graph()->NewNode(common()->IfValue(case_value), switch_node);
+ schedule()->AddNode(case_block, case_node);
+ succ_blocks[index] = case_block;
+ }
+ BasicBlock* default_block = Use(default_label);
+ Node* default_node = graph()->NewNode(common()->IfDefault(), switch_node);
+ schedule()->AddNode(default_block, default_node);
+ succ_blocks[case_count] = default_block;
+ schedule()->AddSwitch(CurrentBlock(), switch_node, succ_blocks, succ_count);
+ current_block_ = nullptr;
}
void RawMachineAssembler::Return(Node* value) {
- schedule()->AddReturn(CurrentBlock(), value);
- current_block_ = NULL;
+ Node* ret = MakeNode(common()->Return(), 1, &value);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
}
-Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver,
- Node* context, Node* frame_state,
- CallFunctionFlags flags) {
- Callable callable = CodeFactory::CallFunction(isolate(), 0, flags);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- callable.descriptor(), 1, CallDescriptor::kNeedsFrameState,
- Operator::kNoProperties, zone());
- Node* stub_code = HeapConstant(callable.code());
- Node* call = graph()->NewNode(common()->Call(desc), stub_code, function,
- receiver, context, frame_state);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
+void RawMachineAssembler::Return(Node* v1, Node* v2) {
+ Node* values[] = {v1, v2};
+ Node* ret = MakeNode(common()->Return(2), 2, values);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
}
-Node* RawMachineAssembler::CallJS0(Node* function, Node* receiver,
- Node* context, Node* frame_state) {
- CallDescriptor* descriptor =
- Linkage::GetJSCallDescriptor(1, zone(), CallDescriptor::kNeedsFrameState);
- Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver,
- context, frame_state);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
+void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
+ Node* values[] = {v1, v2, v3};
+ Node* ret = MakeNode(common()->Return(3), 3, values);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+
+Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
+ Node** args) {
+ int param_count =
+ static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ int input_count = param_count + 1;
+ Node** buffer = zone()->NewArray<Node*>(input_count);
+ int index = 0;
+ buffer[index++] = function;
+ for (int i = 0; i < param_count; i++) {
+ buffer[index++] = args[i];
+ }
+ return AddNode(common()->Call(desc), input_count, buffer);
+}
+
+
+Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
+ Node* function, Node** args,
+ Node* frame_state) {
+ DCHECK(desc->NeedsFrameState());
+ int param_count =
+ static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ int input_count = param_count + 2;
+ Node** buffer = zone()->NewArray<Node*>(input_count);
+ int index = 0;
+ buffer[index++] = function;
+ for (int i = 0; i < param_count; i++) {
+ buffer[index++] = args[i];
+ }
+ buffer[index++] = frame_state;
+ return AddNode(common()->Call(desc), input_count, buffer);
}
Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
- Node* arg0, Node* context,
- Node* frame_state) {
+ Node* arg1, Node* context) {
CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- function, 1, Operator::kNoProperties, zone());
+ zone(), function, 1, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
- Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
- Node* ref = NewNode(
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
common()->ExternalConstant(ExternalReference(function, isolate())));
Node* arity = Int32Constant(1);
- Node* call = graph()->NewNode(common()->Call(descriptor), centry, arg0, ref,
- arity, context, frame_state);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
+ return AddNode(common()->Call(descriptor), centry, arg1, ref, arity, context);
}
-void RawMachineAssembler::Bind(Label* label) {
- DCHECK(current_block_ == NULL);
+Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* context) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 2, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(2);
+
+ return AddNode(common()->Call(descriptor), centry, arg1, arg2, ref, arity,
+ context);
+}
+
+
+Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, Node* context) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 4, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(4);
+
+ return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
+ ref, arity, context);
+}
+
+
+Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
+ Node** args) {
+ int param_count =
+ static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ int input_count = param_count + 1;
+ Node** buffer = zone()->NewArray<Node*>(input_count);
+ int index = 0;
+ buffer[index++] = function;
+ for (int i = 0; i < param_count; i++) {
+ buffer[index++] = args[i];
+ }
+ Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
+}
+
+
+Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
+ Node* arg1, Node* context) {
+ const int kArity = 1;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(kArity);
+
+ Node* nodes[] = {centry, arg1, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
+}
+
+
+Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
+ Node* arg1, Node* arg2,
+ Node* context) {
+ const int kArity = 2;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(kArity);
+
+ Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
+}
+
+
+Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
+ Node* function) {
+ MachineSignature::Builder builder(zone(), 1, 0);
+ builder.AddReturn(return_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ return AddNode(common()->Call(descriptor), function);
+}
+
+
+Node* RawMachineAssembler::CallCFunction1(MachineType return_type,
+ MachineType arg0_type, Node* function,
+ Node* arg0) {
+ MachineSignature::Builder builder(zone(), 1, 1);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ return AddNode(common()->Call(descriptor), function, arg0);
+}
+
+
+Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
+ MachineType arg0_type,
+ MachineType arg1_type, Node* function,
+ Node* arg0, Node* arg1) {
+ MachineSignature::Builder builder(zone(), 1, 2);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ return AddNode(common()->Call(descriptor), function, arg0, arg1);
+}
+
+
+Node* RawMachineAssembler::CallCFunction8(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type, MachineType arg7_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, Node* arg6, Node* arg7) {
+ MachineSignature::Builder builder(zone(), 1, 8);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ builder.AddParam(arg3_type);
+ builder.AddParam(arg4_type);
+ builder.AddParam(arg5_type);
+ builder.AddParam(arg6_type);
+ builder.AddParam(arg7_type);
+ Node* args[] = {function, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+ return AddNode(common()->Call(descriptor), arraysize(args), args);
+}
+
+
+void RawMachineAssembler::Bind(RawMachineLabel* label) {
+ DCHECK(current_block_ == nullptr);
DCHECK(!label->bound_);
label->bound_ = true;
current_block_ = EnsureBlock(label);
}
-BasicBlock* RawMachineAssembler::Use(Label* label) {
+BasicBlock* RawMachineAssembler::Use(RawMachineLabel* label) {
label->used_ = true;
return EnsureBlock(label);
}
-BasicBlock* RawMachineAssembler::EnsureBlock(Label* label) {
- if (label->block_ == NULL) label->block_ = schedule()->NewBasicBlock();
+BasicBlock* RawMachineAssembler::EnsureBlock(RawMachineLabel* label) {
+ if (label->block_ == nullptr) label->block_ = schedule()->NewBasicBlock();
return label->block_;
}
@@ -152,17 +355,30 @@
}
-Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
- Node** inputs, bool incomplete) {
- DCHECK(ScheduleValid());
- DCHECK(current_block_ != NULL);
- Node* node = graph()->NewNode(op, input_count, inputs, incomplete);
- BasicBlock* block = op->opcode() == IrOpcode::kParameter ? schedule()->start()
- : CurrentBlock();
- schedule()->AddNode(block, node);
+Node* RawMachineAssembler::AddNode(const Operator* op, int input_count,
+ Node** inputs) {
+ DCHECK_NOT_NULL(schedule_);
+ DCHECK_NOT_NULL(current_block_);
+ Node* node = MakeNode(op, input_count, inputs);
+ schedule()->AddNode(CurrentBlock(), node);
return node;
}
+
+Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
+ Node** inputs) {
+ // The raw machine assembler nodes do not have effect and control inputs,
+ // so we disable checking input counts here.
+ return graph()->NewNodeUnchecked(op, input_count, inputs);
+}
+
+
+RawMachineLabel::RawMachineLabel()
+ : block_(nullptr), used_(false), bound_(false) {}
+
+
+RawMachineLabel::~RawMachineLabel() { DCHECK(bound_ || !used_); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index 5455814..5c232ed 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -5,63 +5,66 @@
#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
-#include "src/v8.h"
-
+#include "src/assembler.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-builder.h"
+#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
-
+#include "src/factory.h"
namespace v8 {
namespace internal {
namespace compiler {
class BasicBlock;
+class RawMachineLabel;
class Schedule;
-class RawMachineAssembler : public GraphBuilder {
+// The RawMachineAssembler produces a low-level IR graph. All nodes are wired
+// into a graph and also placed into a schedule immediately, hence subsequent
+// code generation can happen without the need for scheduling.
+//
+// In order to create a schedule on-the-fly, the assembler keeps track of basic
+// blocks by having one current basic block being populated and by referencing
+// other basic blocks through the use of labels.
+//
+// Also note that the generated graph is only valid together with the generated
+// schedule, using one without the other is invalid as the graph is inherently
+// non-schedulable due to missing control and effect dependencies.
+class RawMachineAssembler {
public:
- class Label {
- public:
- Label() : block_(NULL), used_(false), bound_(false) {}
- ~Label() { DCHECK(bound_ || !used_); }
+ RawMachineAssembler(
+ Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
+ MachineRepresentation word = MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::Flag::kNoFlags);
+ ~RawMachineAssembler() {}
- BasicBlock* block() { return block_; }
-
- private:
- // Private constructor for exit label.
- explicit Label(BasicBlock* block)
- : block_(block), used_(false), bound_(false) {}
-
- BasicBlock* block_;
- bool used_;
- bool bound_;
- friend class RawMachineAssembler;
- DISALLOW_COPY_AND_ASSIGN(Label);
- };
-
- RawMachineAssembler(Graph* graph, MachineSignature* machine_sig,
- MachineType word = kMachPtr,
- MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::Flag::kNoFlags);
- ~RawMachineAssembler() OVERRIDE {}
-
- Isolate* isolate() const { return zone()->isolate(); }
+ Isolate* isolate() const { return isolate_; }
+ Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
MachineOperatorBuilder* machine() { return &machine_; }
CommonOperatorBuilder* common() { return &common_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
- size_t parameter_count() const { return machine_sig_->parameter_count(); }
- MachineSignature* machine_sig() const { return machine_sig_; }
+
+ // Finalizes the schedule and exports it to be used for code generation. Note
+ // that this RawMachineAssembler becomes invalid after export.
+ Schedule* Export();
+
+ // ===========================================================================
+ // The following utility methods create new nodes with specific operators and
+ // place them into the current basic block. They don't perform control flow,
+ // hence will not switch the current basic block.
+
+ Node* NullConstant() {
+ return HeapConstant(isolate()->factory()->null_value());
+ }
Node* UndefinedConstant() {
- Unique<HeapObject> unique = Unique<HeapObject>::CreateImmovable(
- isolate()->factory()->undefined_value());
- return NewNode(common()->HeapConstant(unique));
+ return HeapConstant(isolate()->factory()->undefined_value());
}
// Constants.
@@ -74,66 +77,74 @@
: Int32Constant(static_cast<int>(value));
}
Node* Int32Constant(int32_t value) {
- return NewNode(common()->Int32Constant(value));
+ return AddNode(common()->Int32Constant(value));
}
Node* Int64Constant(int64_t value) {
- return NewNode(common()->Int64Constant(value));
+ return AddNode(common()->Int64Constant(value));
}
Node* NumberConstant(double value) {
- return NewNode(common()->NumberConstant(value));
+ return AddNode(common()->NumberConstant(value));
}
Node* Float32Constant(float value) {
- return NewNode(common()->Float32Constant(value));
+ return AddNode(common()->Float32Constant(value));
}
Node* Float64Constant(double value) {
- return NewNode(common()->Float64Constant(value));
+ return AddNode(common()->Float64Constant(value));
}
Node* HeapConstant(Handle<HeapObject> object) {
- Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object);
- return NewNode(common()->HeapConstant(val));
+ return AddNode(common()->HeapConstant(object));
+ }
+ Node* BooleanConstant(bool value) {
+ Handle<Object> object = isolate()->factory()->ToBoolean(value);
+ return HeapConstant(Handle<HeapObject>::cast(object));
+ }
+ Node* ExternalConstant(ExternalReference address) {
+ return AddNode(common()->ExternalConstant(address));
}
Node* Projection(int index, Node* a) {
- return NewNode(common()->Projection(index), a);
+ return AddNode(common()->Projection(index), a);
}
// Memory Operations.
Node* Load(MachineType rep, Node* base) {
- return Load(rep, base, Int32Constant(0));
+ return Load(rep, base, IntPtrConstant(0));
}
Node* Load(MachineType rep, Node* base, Node* index) {
- return NewNode(machine()->Load(rep), base, index, graph()->start(),
- graph()->start());
+ return AddNode(machine()->Load(rep), base, index);
}
- void Store(MachineType rep, Node* base, Node* value) {
- Store(rep, base, Int32Constant(0), value);
+ Node* Store(MachineRepresentation rep, Node* base, Node* value,
+ WriteBarrierKind write_barrier) {
+ return Store(rep, base, IntPtrConstant(0), value, write_barrier);
}
- void Store(MachineType rep, Node* base, Node* index, Node* value) {
- NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)), base,
- index, value, graph()->start(), graph()->start());
+ Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value,
+ WriteBarrierKind write_barrier) {
+ return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
+ base, index, value);
}
+
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
- return NewNode(machine()->WordAnd(), a, b);
+ return AddNode(machine()->WordAnd(), a, b);
}
- Node* WordOr(Node* a, Node* b) { return NewNode(machine()->WordOr(), a, b); }
+ Node* WordOr(Node* a, Node* b) { return AddNode(machine()->WordOr(), a, b); }
Node* WordXor(Node* a, Node* b) {
- return NewNode(machine()->WordXor(), a, b);
+ return AddNode(machine()->WordXor(), a, b);
}
Node* WordShl(Node* a, Node* b) {
- return NewNode(machine()->WordShl(), a, b);
+ return AddNode(machine()->WordShl(), a, b);
}
Node* WordShr(Node* a, Node* b) {
- return NewNode(machine()->WordShr(), a, b);
+ return AddNode(machine()->WordShr(), a, b);
}
Node* WordSar(Node* a, Node* b) {
- return NewNode(machine()->WordSar(), a, b);
+ return AddNode(machine()->WordSar(), a, b);
}
Node* WordRor(Node* a, Node* b) {
- return NewNode(machine()->WordRor(), a, b);
+ return AddNode(machine()->WordRor(), a, b);
}
Node* WordEqual(Node* a, Node* b) {
- return NewNode(machine()->WordEqual(), a, b);
+ return AddNode(machine()->WordEqual(), a, b);
}
Node* WordNotEqual(Node* a, Node* b) {
return WordBinaryNot(WordEqual(a, b));
@@ -154,28 +165,29 @@
}
Node* Word32And(Node* a, Node* b) {
- return NewNode(machine()->Word32And(), a, b);
+ return AddNode(machine()->Word32And(), a, b);
}
Node* Word32Or(Node* a, Node* b) {
- return NewNode(machine()->Word32Or(), a, b);
+ return AddNode(machine()->Word32Or(), a, b);
}
Node* Word32Xor(Node* a, Node* b) {
- return NewNode(machine()->Word32Xor(), a, b);
+ return AddNode(machine()->Word32Xor(), a, b);
}
Node* Word32Shl(Node* a, Node* b) {
- return NewNode(machine()->Word32Shl(), a, b);
+ return AddNode(machine()->Word32Shl(), a, b);
}
Node* Word32Shr(Node* a, Node* b) {
- return NewNode(machine()->Word32Shr(), a, b);
+ return AddNode(machine()->Word32Shr(), a, b);
}
Node* Word32Sar(Node* a, Node* b) {
- return NewNode(machine()->Word32Sar(), a, b);
+ return AddNode(machine()->Word32Sar(), a, b);
}
Node* Word32Ror(Node* a, Node* b) {
- return NewNode(machine()->Word32Ror(), a, b);
+ return AddNode(machine()->Word32Ror(), a, b);
}
+ Node* Word32Clz(Node* a) { return AddNode(machine()->Word32Clz(), a); }
Node* Word32Equal(Node* a, Node* b) {
- return NewNode(machine()->Word32Equal(), a, b);
+ return AddNode(machine()->Word32Equal(), a, b);
}
Node* Word32NotEqual(Node* a, Node* b) {
return Word32BinaryNot(Word32Equal(a, b));
@@ -184,28 +196,29 @@
Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
Node* Word64And(Node* a, Node* b) {
- return NewNode(machine()->Word64And(), a, b);
+ return AddNode(machine()->Word64And(), a, b);
}
Node* Word64Or(Node* a, Node* b) {
- return NewNode(machine()->Word64Or(), a, b);
+ return AddNode(machine()->Word64Or(), a, b);
}
Node* Word64Xor(Node* a, Node* b) {
- return NewNode(machine()->Word64Xor(), a, b);
+ return AddNode(machine()->Word64Xor(), a, b);
}
Node* Word64Shl(Node* a, Node* b) {
- return NewNode(machine()->Word64Shl(), a, b);
+ return AddNode(machine()->Word64Shl(), a, b);
}
Node* Word64Shr(Node* a, Node* b) {
- return NewNode(machine()->Word64Shr(), a, b);
+ return AddNode(machine()->Word64Shr(), a, b);
}
Node* Word64Sar(Node* a, Node* b) {
- return NewNode(machine()->Word64Sar(), a, b);
+ return AddNode(machine()->Word64Sar(), a, b);
}
Node* Word64Ror(Node* a, Node* b) {
- return NewNode(machine()->Word64Ror(), a, b);
+ return AddNode(machine()->Word64Ror(), a, b);
}
+ Node* Word64Clz(Node* a) { return AddNode(machine()->Word64Clz(), a); }
Node* Word64Equal(Node* a, Node* b) {
- return NewNode(machine()->Word64Equal(), a, b);
+ return AddNode(machine()->Word64Equal(), a, b);
}
Node* Word64NotEqual(Node* a, Node* b) {
return Word64BinaryNot(Word64Equal(a, b));
@@ -214,49 +227,49 @@
Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
Node* Int32Add(Node* a, Node* b) {
- return NewNode(machine()->Int32Add(), a, b);
+ return AddNode(machine()->Int32Add(), a, b);
}
Node* Int32AddWithOverflow(Node* a, Node* b) {
- return NewNode(machine()->Int32AddWithOverflow(), a, b);
+ return AddNode(machine()->Int32AddWithOverflow(), a, b);
}
Node* Int32Sub(Node* a, Node* b) {
- return NewNode(machine()->Int32Sub(), a, b);
+ return AddNode(machine()->Int32Sub(), a, b);
}
Node* Int32SubWithOverflow(Node* a, Node* b) {
- return NewNode(machine()->Int32SubWithOverflow(), a, b);
+ return AddNode(machine()->Int32SubWithOverflow(), a, b);
}
Node* Int32Mul(Node* a, Node* b) {
- return NewNode(machine()->Int32Mul(), a, b);
+ return AddNode(machine()->Int32Mul(), a, b);
}
Node* Int32MulHigh(Node* a, Node* b) {
- return NewNode(machine()->Int32MulHigh(), a, b);
+ return AddNode(machine()->Int32MulHigh(), a, b);
}
Node* Int32Div(Node* a, Node* b) {
- return NewNode(machine()->Int32Div(), a, b, graph()->start());
+ return AddNode(machine()->Int32Div(), a, b);
}
Node* Int32Mod(Node* a, Node* b) {
- return NewNode(machine()->Int32Mod(), a, b, graph()->start());
+ return AddNode(machine()->Int32Mod(), a, b);
}
Node* Int32LessThan(Node* a, Node* b) {
- return NewNode(machine()->Int32LessThan(), a, b);
+ return AddNode(machine()->Int32LessThan(), a, b);
}
Node* Int32LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Int32LessThanOrEqual(), a, b);
+ return AddNode(machine()->Int32LessThanOrEqual(), a, b);
}
Node* Uint32Div(Node* a, Node* b) {
- return NewNode(machine()->Uint32Div(), a, b, graph()->start());
+ return AddNode(machine()->Uint32Div(), a, b);
}
Node* Uint32LessThan(Node* a, Node* b) {
- return NewNode(machine()->Uint32LessThan(), a, b);
+ return AddNode(machine()->Uint32LessThan(), a, b);
}
Node* Uint32LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Uint32LessThanOrEqual(), a, b);
+ return AddNode(machine()->Uint32LessThanOrEqual(), a, b);
}
Node* Uint32Mod(Node* a, Node* b) {
- return NewNode(machine()->Uint32Mod(), a, b, graph()->start());
+ return AddNode(machine()->Uint32Mod(), a, b);
}
Node* Uint32MulHigh(Node* a, Node* b) {
- return NewNode(machine()->Uint32MulHigh(), a, b);
+ return AddNode(machine()->Uint32MulHigh(), a, b);
}
Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
@@ -265,45 +278,48 @@
Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
Node* Int64Add(Node* a, Node* b) {
- return NewNode(machine()->Int64Add(), a, b);
+ return AddNode(machine()->Int64Add(), a, b);
+ }
+ Node* Int64AddWithOverflow(Node* a, Node* b) {
+ return AddNode(machine()->Int64AddWithOverflow(), a, b);
}
Node* Int64Sub(Node* a, Node* b) {
- return NewNode(machine()->Int64Sub(), a, b);
+ return AddNode(machine()->Int64Sub(), a, b);
+ }
+ Node* Int64SubWithOverflow(Node* a, Node* b) {
+ return AddNode(machine()->Int64SubWithOverflow(), a, b);
}
Node* Int64Mul(Node* a, Node* b) {
- return NewNode(machine()->Int64Mul(), a, b);
+ return AddNode(machine()->Int64Mul(), a, b);
}
Node* Int64Div(Node* a, Node* b) {
- return NewNode(machine()->Int64Div(), a, b);
+ return AddNode(machine()->Int64Div(), a, b);
}
Node* Int64Mod(Node* a, Node* b) {
- return NewNode(machine()->Int64Mod(), a, b);
+ return AddNode(machine()->Int64Mod(), a, b);
}
Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
Node* Int64LessThan(Node* a, Node* b) {
- return NewNode(machine()->Int64LessThan(), a, b);
+ return AddNode(machine()->Int64LessThan(), a, b);
}
Node* Int64LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Int64LessThanOrEqual(), a, b);
+ return AddNode(machine()->Int64LessThanOrEqual(), a, b);
+ }
+ Node* Uint64LessThan(Node* a, Node* b) {
+ return AddNode(machine()->Uint64LessThan(), a, b);
+ }
+ Node* Uint64LessThanOrEqual(Node* a, Node* b) {
+ return AddNode(machine()->Uint64LessThanOrEqual(), a, b);
}
Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
return Int64LessThanOrEqual(b, a);
}
Node* Uint64Div(Node* a, Node* b) {
- return NewNode(machine()->Uint64Div(), a, b);
+ return AddNode(machine()->Uint64Div(), a, b);
}
Node* Uint64Mod(Node* a, Node* b) {
- return NewNode(machine()->Uint64Mod(), a, b);
- }
-
- // TODO(turbofan): What is this used for?
- Node* ConvertIntPtrToInt32(Node* a) {
- return kPointerSize == 8 ? NewNode(machine()->TruncateInt64ToInt32(), a)
- : a;
- }
- Node* ConvertInt32ToIntPtr(Node* a) {
- return kPointerSize == 8 ? NewNode(machine()->ChangeInt32ToInt64(), a) : a;
+ return AddNode(machine()->Uint64Mod(), a, b);
}
#define INTPTR_BINOP(prefix, name) \
@@ -323,32 +339,77 @@
#undef INTPTR_BINOP
+ Node* Float32Add(Node* a, Node* b) {
+ return AddNode(machine()->Float32Add(), a, b);
+ }
+ Node* Float32Sub(Node* a, Node* b) {
+ return AddNode(machine()->Float32Sub(), a, b);
+ }
+ Node* Float32Mul(Node* a, Node* b) {
+ return AddNode(machine()->Float32Mul(), a, b);
+ }
+ Node* Float32Div(Node* a, Node* b) {
+ return AddNode(machine()->Float32Div(), a, b);
+ }
+ Node* Float32Max(Node* a, Node* b) {
+ return AddNode(machine()->Float32Max().op(), a, b);
+ }
+ Node* Float32Min(Node* a, Node* b) {
+ return AddNode(machine()->Float32Min().op(), a, b);
+ }
+ Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
+ Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
+ Node* Float32Equal(Node* a, Node* b) {
+ return AddNode(machine()->Float32Equal(), a, b);
+ }
+ Node* Float32NotEqual(Node* a, Node* b) {
+ return WordBinaryNot(Float32Equal(a, b));
+ }
+ Node* Float32LessThan(Node* a, Node* b) {
+ return AddNode(machine()->Float32LessThan(), a, b);
+ }
+ Node* Float32LessThanOrEqual(Node* a, Node* b) {
+ return AddNode(machine()->Float32LessThanOrEqual(), a, b);
+ }
+ Node* Float32GreaterThan(Node* a, Node* b) { return Float32LessThan(b, a); }
+ Node* Float32GreaterThanOrEqual(Node* a, Node* b) {
+ return Float32LessThanOrEqual(b, a);
+ }
+
Node* Float64Add(Node* a, Node* b) {
- return NewNode(machine()->Float64Add(), a, b);
+ return AddNode(machine()->Float64Add(), a, b);
}
Node* Float64Sub(Node* a, Node* b) {
- return NewNode(machine()->Float64Sub(), a, b);
+ return AddNode(machine()->Float64Sub(), a, b);
}
Node* Float64Mul(Node* a, Node* b) {
- return NewNode(machine()->Float64Mul(), a, b);
+ return AddNode(machine()->Float64Mul(), a, b);
}
Node* Float64Div(Node* a, Node* b) {
- return NewNode(machine()->Float64Div(), a, b);
+ return AddNode(machine()->Float64Div(), a, b);
}
Node* Float64Mod(Node* a, Node* b) {
- return NewNode(machine()->Float64Mod(), a, b);
+ return AddNode(machine()->Float64Mod(), a, b);
}
+ Node* Float64Max(Node* a, Node* b) {
+ return AddNode(machine()->Float64Max().op(), a, b);
+ }
+ Node* Float64Min(Node* a, Node* b) {
+ return AddNode(machine()->Float64Min().op(), a, b);
+ }
+ Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
+ Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
Node* Float64Equal(Node* a, Node* b) {
- return NewNode(machine()->Float64Equal(), a, b);
+ return AddNode(machine()->Float64Equal(), a, b);
}
Node* Float64NotEqual(Node* a, Node* b) {
return WordBinaryNot(Float64Equal(a, b));
}
Node* Float64LessThan(Node* a, Node* b) {
- return NewNode(machine()->Float64LessThan(), a, b);
+ return AddNode(machine()->Float64LessThan(), a, b);
}
Node* Float64LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Float64LessThanOrEqual(), a, b);
+ return AddNode(machine()->Float64LessThanOrEqual(), a, b);
}
Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
@@ -357,106 +418,274 @@
// Conversions.
Node* ChangeFloat32ToFloat64(Node* a) {
- return NewNode(machine()->ChangeFloat32ToFloat64(), a);
+ return AddNode(machine()->ChangeFloat32ToFloat64(), a);
}
Node* ChangeInt32ToFloat64(Node* a) {
- return NewNode(machine()->ChangeInt32ToFloat64(), a);
+ return AddNode(machine()->ChangeInt32ToFloat64(), a);
}
Node* ChangeUint32ToFloat64(Node* a) {
- return NewNode(machine()->ChangeUint32ToFloat64(), a);
+ return AddNode(machine()->ChangeUint32ToFloat64(), a);
}
Node* ChangeFloat64ToInt32(Node* a) {
- return NewNode(machine()->ChangeFloat64ToInt32(), a);
+ return AddNode(machine()->ChangeFloat64ToInt32(), a);
}
Node* ChangeFloat64ToUint32(Node* a) {
- return NewNode(machine()->ChangeFloat64ToUint32(), a);
+ return AddNode(machine()->ChangeFloat64ToUint32(), a);
+ }
+ Node* TruncateFloat32ToInt64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
+ }
+ Node* TryTruncateFloat32ToInt64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
+ }
+ Node* TruncateFloat64ToInt64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
+ }
+ Node* TryTruncateFloat64ToInt64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
+ }
+ Node* TruncateFloat32ToUint64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
+ }
+ Node* TryTruncateFloat32ToUint64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
+ }
+ Node* TruncateFloat64ToUint64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
+ }
+ Node* TryTruncateFloat64ToUint64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
}
Node* ChangeInt32ToInt64(Node* a) {
- return NewNode(machine()->ChangeInt32ToInt64(), a);
+ return AddNode(machine()->ChangeInt32ToInt64(), a);
}
Node* ChangeUint32ToUint64(Node* a) {
- return NewNode(machine()->ChangeUint32ToUint64(), a);
+ return AddNode(machine()->ChangeUint32ToUint64(), a);
}
Node* TruncateFloat64ToFloat32(Node* a) {
- return NewNode(machine()->TruncateFloat64ToFloat32(), a);
+ return AddNode(machine()->TruncateFloat64ToFloat32(), a);
}
- Node* TruncateFloat64ToInt32(Node* a) {
- return NewNode(machine()->TruncateFloat64ToInt32(), a);
+ Node* TruncateFloat64ToInt32(TruncationMode mode, Node* a) {
+ return AddNode(machine()->TruncateFloat64ToInt32(mode), a);
}
Node* TruncateInt64ToInt32(Node* a) {
- return NewNode(machine()->TruncateInt64ToInt32(), a);
+ return AddNode(machine()->TruncateInt64ToInt32(), a);
}
- Node* Float64Floor(Node* a) { return NewNode(machine()->Float64Floor(), a); }
- Node* Float64Ceil(Node* a) { return NewNode(machine()->Float64Ceil(), a); }
+ Node* RoundInt64ToFloat32(Node* a) {
+ return AddNode(machine()->RoundInt64ToFloat32(), a);
+ }
+ Node* RoundInt64ToFloat64(Node* a) {
+ return AddNode(machine()->RoundInt64ToFloat64(), a);
+ }
+ Node* RoundUint64ToFloat32(Node* a) {
+ return AddNode(machine()->RoundUint64ToFloat32(), a);
+ }
+ Node* RoundUint64ToFloat64(Node* a) {
+ return AddNode(machine()->RoundUint64ToFloat64(), a);
+ }
+ Node* BitcastFloat32ToInt32(Node* a) {
+ return AddNode(machine()->BitcastFloat32ToInt32(), a);
+ }
+ Node* BitcastFloat64ToInt64(Node* a) {
+ return AddNode(machine()->BitcastFloat64ToInt64(), a);
+ }
+ Node* BitcastInt32ToFloat32(Node* a) {
+ return AddNode(machine()->BitcastInt32ToFloat32(), a);
+ }
+ Node* BitcastInt64ToFloat64(Node* a) {
+ return AddNode(machine()->BitcastInt64ToFloat64(), a);
+ }
+ Node* Float32RoundDown(Node* a) {
+ return AddNode(machine()->Float32RoundDown().op(), a);
+ }
+ Node* Float64RoundDown(Node* a) {
+ return AddNode(machine()->Float64RoundDown().op(), a);
+ }
+ Node* Float32RoundUp(Node* a) {
+ return AddNode(machine()->Float32RoundUp().op(), a);
+ }
+ Node* Float64RoundUp(Node* a) {
+ return AddNode(machine()->Float64RoundUp().op(), a);
+ }
+ Node* Float32RoundTruncate(Node* a) {
+ return AddNode(machine()->Float32RoundTruncate().op(), a);
+ }
Node* Float64RoundTruncate(Node* a) {
- return NewNode(machine()->Float64RoundTruncate(), a);
+ return AddNode(machine()->Float64RoundTruncate().op(), a);
}
Node* Float64RoundTiesAway(Node* a) {
- return NewNode(machine()->Float64RoundTiesAway(), a);
+ return AddNode(machine()->Float64RoundTiesAway().op(), a);
}
+ Node* Float32RoundTiesEven(Node* a) {
+ return AddNode(machine()->Float32RoundTiesEven().op(), a);
+ }
+ Node* Float64RoundTiesEven(Node* a) {
+ return AddNode(machine()->Float64RoundTiesEven().op(), a);
+ }
+
+ // Float64 bit operations.
+ Node* Float64ExtractLowWord32(Node* a) {
+ return AddNode(machine()->Float64ExtractLowWord32(), a);
+ }
+ Node* Float64ExtractHighWord32(Node* a) {
+ return AddNode(machine()->Float64ExtractHighWord32(), a);
+ }
+ Node* Float64InsertLowWord32(Node* a, Node* b) {
+ return AddNode(machine()->Float64InsertLowWord32(), a, b);
+ }
+ Node* Float64InsertHighWord32(Node* a, Node* b) {
+ return AddNode(machine()->Float64InsertHighWord32(), a, b);
+ }
+
+ // Stack operations.
+ Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); }
+ Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
// Parameters.
Node* Parameter(size_t index);
+ // Pointer utilities.
+ Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
+ return Load(rep, PointerConstant(address), Int32Constant(offset));
+ }
+ Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
+ return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
+ }
+ Node* StringConstant(const char* string) {
+ return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
+ }
+
+ // Call a given call descriptor and the given arguments.
+ Node* CallN(CallDescriptor* desc, Node* function, Node** args);
+ // Call a given call descriptor and the given arguments and frame-state.
+ Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
+ Node* frame_state);
+ // Call to a runtime function with one arguments.
+ Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
+ // Call to a runtime function with two arguments.
+ Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* context);
+ // Call to a runtime function with four arguments.
+ Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* context);
+ // Call to a C function with zero arguments.
+ Node* CallCFunction0(MachineType return_type, Node* function);
+ // Call to a C function with one parameter.
+ Node* CallCFunction1(MachineType return_type, MachineType arg0_type,
+ Node* function, Node* arg0);
+ // Call to a C function with two arguments.
+ Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, Node* function, Node* arg0,
+ Node* arg1);
+ // Call to a C function with eight arguments.
+ Node* CallCFunction8(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type,
+ MachineType arg7_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, Node* arg6, Node* arg7);
+
+ // Tail call the given call descriptor and the given arguments.
+ Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
+ // Tail call to a runtime function with one argument.
+ Node* TailCallRuntime1(Runtime::FunctionId function, Node* arg0,
+ Node* context);
+ // Tail call to a runtime function with two arguments.
+ Node* TailCallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* context);
+
+
+ // ===========================================================================
+ // The following utility methods deal with control flow, hence might switch
+ // the current basic block or create new basic blocks for labels.
+
// Control flow.
- Label* Exit();
- void Goto(Label* label);
- void Branch(Node* condition, Label* true_val, Label* false_val);
- // Call through CallFunctionStub with lazy deopt and frame-state.
- Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
- Node* frame_state, CallFunctionFlags flags);
- // Call to a JS function with zero parameters.
- Node* CallJS0(Node* function, Node* receiver, Node* context,
- Node* frame_state);
- // Call to a runtime function with zero parameters.
- Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context,
- Node* frame_state);
+ void Goto(RawMachineLabel* label);
+ void Branch(Node* condition, RawMachineLabel* true_val,
+ RawMachineLabel* false_val);
+ void Switch(Node* index, RawMachineLabel* default_label, int32_t* case_values,
+ RawMachineLabel** case_labels, size_t case_count);
void Return(Node* value);
- void Bind(Label* label);
+ void Return(Node* v1, Node* v2);
+ void Return(Node* v1, Node* v2, Node* v3);
+ void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
// Variables.
- Node* Phi(MachineType type, Node* n1, Node* n2) {
- return NewNode(common()->Phi(type, 2), n1, n2);
+ Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
+ return AddNode(common()->Phi(rep, 2), n1, n2);
}
- Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3) {
- return NewNode(common()->Phi(type, 3), n1, n2, n3);
+ Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3) {
+ return AddNode(common()->Phi(rep, 3), n1, n2, n3);
}
- Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3, Node* n4) {
- return NewNode(common()->Phi(type, 4), n1, n2, n3, n4);
+ Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3, Node* n4) {
+ return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4);
}
- // MachineAssembler is invalid after export.
- Schedule* Export();
+ // ===========================================================================
+ // The following generic node creation methods can be used for operators that
+ // are not covered by the above utility methods. There should rarely be a need
+ // to do that outside of testing though.
- protected:
- Node* MakeNode(const Operator* op, int input_count, Node** inputs,
- bool incomplete) FINAL;
+ Node* AddNode(const Operator* op, int input_count, Node** inputs);
- bool ScheduleValid() { return schedule_ != NULL; }
+ Node* AddNode(const Operator* op) {
+ return AddNode(op, 0, static_cast<Node**>(nullptr));
+ }
- Schedule* schedule() {
- DCHECK(ScheduleValid());
- return schedule_;
+ template <class... TArgs>
+ Node* AddNode(const Operator* op, Node* n1, TArgs... args) {
+ Node* buffer[] = {n1, args...};
+ return AddNode(op, sizeof...(args) + 1, buffer);
}
private:
- BasicBlock* Use(Label* label);
- BasicBlock* EnsureBlock(Label* label);
+ Node* MakeNode(const Operator* op, int input_count, Node** inputs);
+ BasicBlock* Use(RawMachineLabel* label);
+ BasicBlock* EnsureBlock(RawMachineLabel* label);
BasicBlock* CurrentBlock();
+ Schedule* schedule() { return schedule_; }
+ size_t parameter_count() const { return machine_sig()->parameter_count(); }
+ const MachineSignature* machine_sig() const {
+ return call_descriptor_->GetMachineSignature();
+ }
+
+ Isolate* isolate_;
+ Graph* graph_;
Schedule* schedule_;
MachineOperatorBuilder machine_;
CommonOperatorBuilder common_;
- MachineSignature* machine_sig_;
CallDescriptor* call_descriptor_;
- Node** parameters_;
- Label exit_label_;
+ NodeVector parameters_;
BasicBlock* current_block_;
DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
};
+
+class RawMachineLabel final {
+ public:
+ RawMachineLabel();
+ ~RawMachineLabel();
+
+ private:
+ BasicBlock* block_;
+ bool used_;
+ bool bound_;
+ friend class RawMachineAssembler;
+ DISALLOW_COPY_AND_ASSIGN(RawMachineLabel);
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index dabfd59..463795e 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/bit-vector.h"
#include "src/compiler/instruction.h"
#include "src/compiler/register-allocator-verifier.h"
@@ -9,26 +10,46 @@
namespace internal {
namespace compiler {
-static size_t OperandCount(const Instruction* instr) {
+namespace {
+
+size_t OperandCount(const Instruction* instr) {
return instr->InputCount() + instr->OutputCount() + instr->TempCount();
}
-static void VerifyGapEmpty(const GapInstruction* gap) {
- for (int i = GapInstruction::FIRST_INNER_POSITION;
- i <= GapInstruction::LAST_INNER_POSITION; i++) {
- GapInstruction::InnerPosition inner_pos =
- static_cast<GapInstruction::InnerPosition>(i);
- CHECK_EQ(NULL, gap->GetParallelMove(inner_pos));
+void VerifyEmptyGaps(const Instruction* instr) {
+ for (int i = Instruction::FIRST_GAP_POSITION;
+ i <= Instruction::LAST_GAP_POSITION; i++) {
+ Instruction::GapPosition inner_pos =
+ static_cast<Instruction::GapPosition>(i);
+ CHECK(instr->GetParallelMove(inner_pos) == nullptr);
}
}
+void VerifyAllocatedGaps(const Instruction* instr) {
+ for (int i = Instruction::FIRST_GAP_POSITION;
+ i <= Instruction::LAST_GAP_POSITION; i++) {
+ Instruction::GapPosition inner_pos =
+ static_cast<Instruction::GapPosition>(i);
+ auto moves = instr->GetParallelMove(inner_pos);
+ if (moves == nullptr) continue;
+ for (auto move : *moves) {
+ if (move->IsRedundant()) continue;
+ CHECK(move->source().IsAllocated() || move->source().IsConstant());
+ CHECK(move->destination().IsAllocated());
+ }
+ }
+}
+
+} // namespace
+
+
void RegisterAllocatorVerifier::VerifyInput(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
- if (constraint.type_ != kImmediate) {
- CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+ if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
+ CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
}
@@ -38,16 +59,16 @@
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
CHECK_NE(kConstant, constraint.type_);
- CHECK_EQ(UnallocatedOperand::kInvalidVirtualRegister,
- constraint.virtual_register_);
}
void RegisterAllocatorVerifier::VerifyOutput(
const OperandConstraint& constraint) {
CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+ CHECK_NE(kExplicit, constraint.type_);
+ CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
@@ -61,9 +82,10 @@
// Construct OperandConstraints for all InstructionOperands, eliminating
// kSameAsFirst along the way.
for (const auto* instr : sequence->instructions()) {
+ // All gaps should be totally unallocated at this point.
+ VerifyEmptyGaps(instr);
const size_t operand_count = OperandCount(instr);
- auto* op_constraints =
- zone->NewArray<OperandConstraint>(static_cast<int>(operand_count));
+ auto* op_constraints = zone->NewArray<OperandConstraint>(operand_count);
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
BuildConstraint(instr->InputAt(i), &op_constraints[count]);
@@ -82,11 +104,6 @@
}
VerifyOutput(op_constraints[count]);
}
- // All gaps should be totally unallocated at this point.
- if (instr->IsGapMoves()) {
- CHECK(operand_count == 0);
- VerifyGapEmpty(GapInstruction::cast(instr));
- }
InstructionConstraint instr_constraint = {instr, operand_count,
op_constraints};
constraints()->push_back(instr_constraint);
@@ -99,6 +116,8 @@
auto instr_it = sequence()->begin();
for (const auto& instr_constraint : *constraints()) {
const auto* instr = instr_constraint.instruction_;
+ // All gaps should be totally allocated at this point.
+ VerifyAllocatedGaps(instr);
const size_t operand_count = instr_constraint.operand_constaints_size_;
const auto* op_constraints = instr_constraint.operand_constraints_;
CHECK_EQ(instr, *instr_it);
@@ -121,36 +140,44 @@
void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
OperandConstraint* constraint) {
constraint->value_ = kMinInt;
- constraint->virtual_register_ = UnallocatedOperand::kInvalidVirtualRegister;
+ constraint->virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
if (op->IsConstant()) {
constraint->type_ = kConstant;
- constraint->value_ = ConstantOperand::cast(op)->index();
+ constraint->value_ = ConstantOperand::cast(op)->virtual_register();
constraint->virtual_register_ = constraint->value_;
+ } else if (op->IsExplicit()) {
+ constraint->type_ = kExplicit;
} else if (op->IsImmediate()) {
+ auto imm = ImmediateOperand::cast(op);
+ int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value()
+ : imm->indexed_value();
constraint->type_ = kImmediate;
- constraint->value_ = ImmediateOperand::cast(op)->index();
+ constraint->value_ = value;
} else {
CHECK(op->IsUnallocated());
const auto* unallocated = UnallocatedOperand::cast(op);
int vreg = unallocated->virtual_register();
constraint->virtual_register_ = vreg;
if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
- constraint->type_ = kFixedSlot;
+ constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot;
constraint->value_ = unallocated->fixed_slot_index();
} else {
switch (unallocated->extended_policy()) {
case UnallocatedOperand::ANY:
- CHECK(false);
- break;
case UnallocatedOperand::NONE:
- if (sequence()->IsDouble(vreg)) {
+ if (sequence()->IsFloat(vreg)) {
constraint->type_ = kNoneDouble;
} else {
constraint->type_ = kNone;
}
break;
case UnallocatedOperand::FIXED_REGISTER:
- constraint->type_ = kFixedRegister;
+ if (unallocated->HasSecondaryStorage()) {
+ constraint->type_ = kRegisterAndSlot;
+ constraint->spilled_slot_ = unallocated->GetSecondaryStorage();
+ } else {
+ constraint->type_ = kFixedRegister;
+ }
constraint->value_ = unallocated->fixed_register_index();
break;
case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
@@ -158,12 +185,15 @@
constraint->value_ = unallocated->fixed_register_index();
break;
case UnallocatedOperand::MUST_HAVE_REGISTER:
- if (sequence()->IsDouble(vreg)) {
+ if (sequence()->IsFloat(vreg)) {
constraint->type_ = kDoubleRegister;
} else {
constraint->type_ = kRegister;
}
break;
+ case UnallocatedOperand::MUST_HAVE_SLOT:
+ constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot;
+ break;
case UnallocatedOperand::SAME_AS_FIRST_INPUT:
constraint->type_ = kSameAsFirst;
break;
@@ -178,29 +208,47 @@
switch (constraint->type_) {
case kConstant:
CHECK(op->IsConstant());
- CHECK_EQ(op->index(), constraint->value_);
+ CHECK_EQ(ConstantOperand::cast(op)->virtual_register(),
+ constraint->value_);
return;
- case kImmediate:
+ case kImmediate: {
CHECK(op->IsImmediate());
- CHECK_EQ(op->index(), constraint->value_);
+ auto imm = ImmediateOperand::cast(op);
+ int value = imm->type() == ImmediateOperand::INLINE
+ ? imm->inline_value()
+ : imm->indexed_value();
+ CHECK_EQ(value, constraint->value_);
return;
+ }
case kRegister:
CHECK(op->IsRegister());
return;
- case kFixedRegister:
- CHECK(op->IsRegister());
- CHECK_EQ(op->index(), constraint->value_);
- return;
case kDoubleRegister:
CHECK(op->IsDoubleRegister());
return;
+ case kExplicit:
+ CHECK(op->IsExplicit());
+ return;
+ case kFixedRegister:
+ case kRegisterAndSlot:
+ CHECK(op->IsRegister());
+ CHECK_EQ(LocationOperand::cast(op)->GetRegister().code(),
+ constraint->value_);
+ return;
case kFixedDoubleRegister:
CHECK(op->IsDoubleRegister());
- CHECK_EQ(op->index(), constraint->value_);
+ CHECK_EQ(LocationOperand::cast(op)->GetDoubleRegister().code(),
+ constraint->value_);
return;
case kFixedSlot:
CHECK(op->IsStackSlot());
- CHECK_EQ(op->index(), constraint->value_);
+ CHECK_EQ(LocationOperand::cast(op)->index(), constraint->value_);
+ return;
+ case kSlot:
+ CHECK(op->IsStackSlot());
+ return;
+ case kDoubleSlot:
+ CHECK(op->IsDoubleStackSlot());
return;
case kNone:
CHECK(op->IsRegister() || op->IsStackSlot());
@@ -214,227 +262,447 @@
}
}
+namespace {
-class RegisterAllocatorVerifier::OutgoingMapping : public ZoneObject {
+typedef RpoNumber Rpo;
+
+static const int kInvalidVreg = InstructionOperand::kInvalidVirtualRegister;
+
+struct PhiData : public ZoneObject {
+ PhiData(Rpo definition_rpo, const PhiInstruction* phi, int first_pred_vreg,
+ const PhiData* first_pred_phi, Zone* zone)
+ : definition_rpo(definition_rpo),
+ virtual_register(phi->virtual_register()),
+ first_pred_vreg(first_pred_vreg),
+ first_pred_phi(first_pred_phi),
+ operands(zone) {
+ operands.reserve(phi->operands().size());
+ operands.insert(operands.begin(), phi->operands().begin(),
+ phi->operands().end());
+ }
+ const Rpo definition_rpo;
+ const int virtual_register;
+ const int first_pred_vreg;
+ const PhiData* first_pred_phi;
+ IntVector operands;
+};
+
+class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
public:
- struct OperandLess {
- bool operator()(const InstructionOperand* a,
- const InstructionOperand* b) const {
- if (a->kind() == b->kind()) return a->index() < b->index();
- return a->kind() < b->kind();
+ explicit PhiMap(Zone* zone) : ZoneMap<int, PhiData*>(zone) {}
+};
+
+struct OperandLess {
+ bool operator()(const InstructionOperand* a,
+ const InstructionOperand* b) const {
+ return a->CompareCanonicalized(*b);
+ }
+};
+
+class OperandMap : public ZoneObject {
+ public:
+ struct MapValue : public ZoneObject {
+ MapValue()
+ : incoming(nullptr),
+ define_vreg(kInvalidVreg),
+ use_vreg(kInvalidVreg),
+ succ_vreg(kInvalidVreg) {}
+ MapValue* incoming; // value from first predecessor block.
+ int define_vreg; // valid if this value was defined in this block.
+ int use_vreg; // valid if this value was used in this block.
+ int succ_vreg; // valid if propagated back from successor block.
+ };
+
+ class Map
+ : public ZoneMap<const InstructionOperand*, MapValue*, OperandLess> {
+ public:
+ explicit Map(Zone* zone)
+ : ZoneMap<const InstructionOperand*, MapValue*, OperandLess>(zone) {}
+
+ // Remove all entries with keys not in other.
+ void Intersect(const Map& other) {
+ if (this->empty()) return;
+ auto it = this->begin();
+ OperandLess less;
+ for (const auto& o : other) {
+ while (less(it->first, o.first)) {
+ this->erase(it++);
+ if (it == this->end()) return;
+ }
+ if (it->first->EqualsCanonicalized(*o.first)) {
+ ++it;
+ if (it == this->end()) return;
+ } else {
+ CHECK(less(o.first, it->first));
+ }
+ }
}
};
- typedef std::map<
- const InstructionOperand*, int, OperandLess,
- zone_allocator<std::pair<const InstructionOperand*, const int>>>
- LocationMap;
+ explicit OperandMap(Zone* zone) : map_(zone) {}
- explicit OutgoingMapping(Zone* zone)
- : locations_(LocationMap::key_compare(),
- LocationMap::allocator_type(zone)),
- predecessor_intersection_(LocationMap::key_compare(),
- LocationMap::allocator_type(zone)) {}
+ Map& map() { return map_; }
- LocationMap* locations() { return &locations_; }
-
- void RunPhis(const InstructionSequence* sequence,
- const InstructionBlock* block, size_t phi_index) {
- // This operation is only valid in edge split form.
- size_t predecessor_index = block->predecessors()[phi_index].ToSize();
- CHECK(sequence->instruction_blocks()[predecessor_index]->SuccessorCount() ==
- 1);
- for (const auto* phi : block->phis()) {
- auto input = phi->inputs()[phi_index];
- CHECK(locations()->find(input) != locations()->end());
- auto it = locations()->find(phi->output());
- CHECK(it != locations()->end());
- if (input->IsConstant()) {
- CHECK_EQ(it->second, input->index());
- } else {
- CHECK_EQ(it->second, phi->operands()[phi_index]);
- }
- it->second = phi->virtual_register();
+ void RunParallelMoves(Zone* zone, const ParallelMove* moves) {
+ // Compute outgoing mappings.
+ Map to_insert(zone);
+ for (auto move : *moves) {
+ if (move->IsEliminated()) continue;
+ auto cur = map().find(&move->source());
+ CHECK(cur != map().end());
+ auto res =
+ to_insert.insert(std::make_pair(&move->destination(), cur->second));
+ // Ensure injectivity of moves.
+ CHECK(res.second);
}
+ // Drop current mappings.
+ for (auto move : *moves) {
+ if (move->IsEliminated()) continue;
+ auto cur = map().find(&move->destination());
+ if (cur != map().end()) map().erase(cur);
+ }
+ // Insert new values.
+ map().insert(to_insert.begin(), to_insert.end());
}
- void RunGapInstruction(Zone* zone, const GapInstruction* gap) {
- for (int i = GapInstruction::FIRST_INNER_POSITION;
- i <= GapInstruction::LAST_INNER_POSITION; i++) {
- GapInstruction::InnerPosition inner_pos =
- static_cast<GapInstruction::InnerPosition>(i);
- const ParallelMove* move = gap->GetParallelMove(inner_pos);
+ void RunGaps(Zone* zone, const Instruction* instr) {
+ for (int i = Instruction::FIRST_GAP_POSITION;
+ i <= Instruction::LAST_GAP_POSITION; i++) {
+ auto inner_pos = static_cast<Instruction::GapPosition>(i);
+ auto move = instr->GetParallelMove(inner_pos);
if (move == nullptr) continue;
RunParallelMoves(zone, move);
}
}
- void RunParallelMoves(Zone* zone, const ParallelMove* move) {
- // Compute outgoing mappings.
- LocationMap to_insert((LocationMap::key_compare()),
- LocationMap::allocator_type(zone));
- auto* moves = move->move_operands();
- for (auto i = moves->begin(); i != moves->end(); ++i) {
- if (i->IsEliminated()) continue;
- auto cur = locations()->find(i->source());
- CHECK(cur != locations()->end());
- to_insert.insert(std::make_pair(i->destination(), cur->second));
- }
- // Drop current mappings.
- for (auto i = moves->begin(); i != moves->end(); ++i) {
- if (i->IsEliminated()) continue;
- auto cur = locations()->find(i->destination());
- if (cur != locations()->end()) locations()->erase(cur);
- }
- // Insert new values.
- locations()->insert(to_insert.begin(), to_insert.end());
- }
-
- void Map(const InstructionOperand* op, int virtual_register) {
- locations()->insert(std::make_pair(op, virtual_register));
- }
-
void Drop(const InstructionOperand* op) {
- auto it = locations()->find(op);
- if (it != locations()->end()) locations()->erase(it);
+ auto it = map().find(op);
+ if (it != map().end()) map().erase(it);
}
void DropRegisters(const RegisterConfiguration* config) {
- for (int i = 0; i < config->num_general_registers(); ++i) {
- InstructionOperand op(InstructionOperand::REGISTER, i);
- Drop(&op);
- }
- for (int i = 0; i < config->num_double_registers(); ++i) {
- InstructionOperand op(InstructionOperand::DOUBLE_REGISTER, i);
- Drop(&op);
- }
- }
-
- void InitializeFromFirstPredecessor(const InstructionSequence* sequence,
- const OutgoingMappings* outgoing_mappings,
- const InstructionBlock* block) {
- if (block->predecessors().empty()) return;
- size_t predecessor_index = block->predecessors()[0].ToSize();
- CHECK(predecessor_index < block->rpo_number().ToSize());
- auto* incoming = outgoing_mappings->at(predecessor_index);
- if (block->PredecessorCount() > 1) {
- // Update incoming map with phis. The remaining phis will be checked later
- // as their mappings are not guaranteed to exist yet.
- incoming->RunPhis(sequence, block, 0);
- }
- // Now initialize outgoing mapping for this block with incoming mapping.
- CHECK(locations_.empty());
- locations_ = incoming->locations_;
- }
-
- void InitializeFromIntersection() { locations_ = predecessor_intersection_; }
-
- void InitializeIntersection(const OutgoingMapping* incoming) {
- CHECK(predecessor_intersection_.empty());
- predecessor_intersection_ = incoming->locations_;
- }
-
- void Intersect(const OutgoingMapping* other) {
- if (predecessor_intersection_.empty()) return;
- auto it = predecessor_intersection_.begin();
- OperandLess less;
- for (const auto& o : other->locations_) {
- while (less(it->first, o.first)) {
+ // TODO(dcarney): sort map by kind and drop range.
+ for (auto it = map().begin(); it != map().end();) {
+ auto op = it->first;
+ if (op->IsRegister() || op->IsDoubleRegister()) {
+ map().erase(it++);
+ } else {
++it;
- if (it == predecessor_intersection_.end()) return;
}
- if (it->first->Equals(o.first)) {
- if (o.second != it->second) {
- predecessor_intersection_.erase(it++);
- } else {
- ++it;
+ }
+ }
+
+ MapValue* Define(Zone* zone, const InstructionOperand* op,
+ int virtual_register) {
+ auto value = new (zone) MapValue();
+ value->define_vreg = virtual_register;
+ auto res = map().insert(std::make_pair(op, value));
+ if (!res.second) res.first->second = value;
+ return value;
+ }
+
+ void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
+ auto it = map().find(op);
+ CHECK(it != map().end());
+ auto v = it->second;
+ if (v->define_vreg != kInvalidVreg) {
+ CHECK_EQ(v->define_vreg, use_vreg);
+ }
+ // Already used this vreg in this block.
+ if (v->use_vreg != kInvalidVreg) {
+ CHECK_EQ(v->use_vreg, use_vreg);
+ return;
+ }
+ if (!initial_pass) {
+ // A value may be defined and used in this block or the use must have
+ // propagated up.
+ if (v->succ_vreg != kInvalidVreg) {
+ CHECK_EQ(v->succ_vreg, use_vreg);
+ } else {
+ CHECK_EQ(v->define_vreg, use_vreg);
+ }
+ // Mark the use.
+ it->second->use_vreg = use_vreg;
+ return;
+ }
+ // Go up block list and ensure the correct definition is reached.
+ for (; v != nullptr; v = v->incoming) {
+ // Value unused in block.
+ if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
+ continue;
+ }
+ // Found correct definition or use.
+ CHECK(v->define_vreg == use_vreg || v->use_vreg == use_vreg);
+ // Mark the use.
+ it->second->use_vreg = use_vreg;
+ return;
+ }
+ // Use of a non-phi value without definition.
+ CHECK(false);
+ }
+
+ void UsePhi(const InstructionOperand* op, const PhiData* phi,
+ bool initial_pass) {
+ auto it = map().find(op);
+ CHECK(it != map().end());
+ auto v = it->second;
+ int use_vreg = phi->virtual_register;
+ // Phis are not defined.
+ CHECK_EQ(kInvalidVreg, v->define_vreg);
+ // Already used this vreg in this block.
+ if (v->use_vreg != kInvalidVreg) {
+ CHECK_EQ(v->use_vreg, use_vreg);
+ return;
+ }
+ if (!initial_pass) {
+ // A used phi must have propagated its use to a predecessor.
+ CHECK_EQ(v->succ_vreg, use_vreg);
+ // Mark the use.
+ v->use_vreg = use_vreg;
+ return;
+ }
+ // Go up the block list starting at the first predecessor and ensure this
+ // phi has a correct use or definition.
+ for (v = v->incoming; v != nullptr; v = v->incoming) {
+ // Value unused in block.
+ if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
+ continue;
+ }
+ // Found correct definition or use.
+ if (v->define_vreg != kInvalidVreg) {
+ CHECK(v->define_vreg == phi->first_pred_vreg);
+ } else if (v->use_vreg != phi->first_pred_vreg) {
+ // Walk the phi chain, hunting for a matching phi use.
+ auto p = phi;
+ for (; p != nullptr; p = p->first_pred_phi) {
+ if (p->virtual_register == v->use_vreg) break;
}
- if (it == predecessor_intersection_.end()) return;
+ CHECK(p);
+ }
+ // Mark the use.
+ it->second->use_vreg = use_vreg;
+ return;
+ }
+ // Use of a phi value without definition.
+ UNREACHABLE();
+ }
+
+ private:
+ Map map_;
+ DISALLOW_COPY_AND_ASSIGN(OperandMap);
+};
+
+} // namespace
+
+
+class RegisterAllocatorVerifier::BlockMaps {
+ public:
+ BlockMaps(Zone* zone, const InstructionSequence* sequence)
+ : zone_(zone),
+ sequence_(sequence),
+ phi_map_guard_(sequence->VirtualRegisterCount(), zone),
+ phi_map_(zone),
+ incoming_maps_(zone),
+ outgoing_maps_(zone) {
+ InitializePhis();
+ InitializeOperandMaps();
+ }
+
+ bool IsPhi(int virtual_register) {
+ return phi_map_guard_.Contains(virtual_register);
+ }
+
+ const PhiData* GetPhi(int virtual_register) {
+ auto it = phi_map_.find(virtual_register);
+ CHECK(it != phi_map_.end());
+ return it->second;
+ }
+
+ OperandMap* InitializeIncoming(size_t block_index, bool initial_pass) {
+ return initial_pass ? InitializeFromFirstPredecessor(block_index)
+ : InitializeFromIntersection(block_index);
+ }
+
+ void PropagateUsesBackwards() {
+ typedef std::set<size_t, std::greater<size_t>, zone_allocator<size_t>>
+ BlockIds;
+ BlockIds block_ids((BlockIds::key_compare()),
+ zone_allocator<size_t>(zone()));
+ // First ensure that incoming contains only keys in all predecessors.
+ for (auto block : sequence()->instruction_blocks()) {
+ size_t index = block->rpo_number().ToSize();
+ block_ids.insert(index);
+ auto& succ_map = incoming_maps_[index]->map();
+ for (size_t i = 0; i < block->PredecessorCount(); ++i) {
+ auto pred_rpo = block->predecessors()[i];
+ succ_map.Intersect(outgoing_maps_[pred_rpo.ToSize()]->map());
+ }
+ }
+ // Back propagation fixpoint.
+ while (!block_ids.empty()) {
+ // Pop highest block_id.
+ auto block_id_it = block_ids.begin();
+ const size_t succ_index = *block_id_it;
+ block_ids.erase(block_id_it);
+ // Propagate uses back to their definition blocks using succ_vreg.
+ auto block = sequence()->instruction_blocks()[succ_index];
+ auto& succ_map = incoming_maps_[succ_index]->map();
+ for (size_t i = 0; i < block->PredecessorCount(); ++i) {
+ for (auto& succ_val : succ_map) {
+ // An incoming map contains no defines.
+ CHECK_EQ(kInvalidVreg, succ_val.second->define_vreg);
+ // Compute succ_vreg.
+ int succ_vreg = succ_val.second->succ_vreg;
+ if (succ_vreg == kInvalidVreg) {
+ succ_vreg = succ_val.second->use_vreg;
+ // Initialize succ_vreg in back propagation chain.
+ succ_val.second->succ_vreg = succ_vreg;
+ }
+ if (succ_vreg == kInvalidVreg) continue;
+ // May need to transition phi.
+ if (IsPhi(succ_vreg)) {
+ auto phi = GetPhi(succ_vreg);
+ if (phi->definition_rpo.ToSize() == succ_index) {
+ // phi definition block, transition to pred value.
+ succ_vreg = phi->operands[i];
+ }
+ }
+ // Push succ_vreg up to all predecessors.
+ auto pred_rpo = block->predecessors()[i];
+ auto& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
+ auto& pred_val = *pred_map.find(succ_val.first);
+ if (pred_val.second->use_vreg != kInvalidVreg) {
+ CHECK_EQ(succ_vreg, pred_val.second->use_vreg);
+ }
+ if (pred_val.second->define_vreg != kInvalidVreg) {
+ CHECK_EQ(succ_vreg, pred_val.second->define_vreg);
+ }
+ if (pred_val.second->succ_vreg != kInvalidVreg) {
+ CHECK_EQ(succ_vreg, pred_val.second->succ_vreg);
+ } else {
+ pred_val.second->succ_vreg = succ_vreg;
+ block_ids.insert(pred_rpo.ToSize());
+ }
+ }
+ }
+ }
+ // Clear uses and back links for second pass.
+ for (auto operand_map : incoming_maps_) {
+ for (auto& succ_val : operand_map->map()) {
+ succ_val.second->incoming = nullptr;
+ succ_val.second->use_vreg = kInvalidVreg;
}
}
}
private:
- LocationMap locations_;
- LocationMap predecessor_intersection_;
+ OperandMap* InitializeFromFirstPredecessor(size_t block_index) {
+ auto to_init = outgoing_maps_[block_index];
+ CHECK(to_init->map().empty());
+ auto block = sequence()->instruction_blocks()[block_index];
+ if (block->predecessors().empty()) return to_init;
+ size_t predecessor_index = block->predecessors()[0].ToSize();
+ // Ensure not a backedge.
+ CHECK(predecessor_index < block->rpo_number().ToSize());
+ auto incoming = outgoing_maps_[predecessor_index];
+ // Copy map and replace values.
+ to_init->map() = incoming->map();
+ for (auto& it : to_init->map()) {
+ auto incoming = it.second;
+ it.second = new (zone()) OperandMap::MapValue();
+ it.second->incoming = incoming;
+ }
+ // Copy to incoming map for second pass.
+ incoming_maps_[block_index]->map() = to_init->map();
+ return to_init;
+ }
- DISALLOW_COPY_AND_ASSIGN(OutgoingMapping);
+ OperandMap* InitializeFromIntersection(size_t block_index) {
+ return incoming_maps_[block_index];
+ }
+
+ void InitializeOperandMaps() {
+ size_t block_count = sequence()->instruction_blocks().size();
+ incoming_maps_.reserve(block_count);
+ outgoing_maps_.reserve(block_count);
+ for (size_t i = 0; i < block_count; ++i) {
+ incoming_maps_.push_back(new (zone()) OperandMap(zone()));
+ outgoing_maps_.push_back(new (zone()) OperandMap(zone()));
+ }
+ }
+
+ void InitializePhis() {
+ const size_t block_count = sequence()->instruction_blocks().size();
+ for (size_t block_index = 0; block_index < block_count; ++block_index) {
+ const auto block = sequence()->instruction_blocks()[block_index];
+ for (auto phi : block->phis()) {
+ int first_pred_vreg = phi->operands()[0];
+ const PhiData* first_pred_phi = nullptr;
+ if (IsPhi(first_pred_vreg)) {
+ first_pred_phi = GetPhi(first_pred_vreg);
+ first_pred_vreg = first_pred_phi->first_pred_vreg;
+ }
+ CHECK(!IsPhi(first_pred_vreg));
+ auto phi_data = new (zone()) PhiData(
+ block->rpo_number(), phi, first_pred_vreg, first_pred_phi, zone());
+ auto res =
+ phi_map_.insert(std::make_pair(phi->virtual_register(), phi_data));
+ CHECK(res.second);
+ phi_map_guard_.Add(phi->virtual_register());
+ }
+ }
+ }
+
+ typedef ZoneVector<OperandMap*> OperandMaps;
+ typedef ZoneVector<PhiData*> PhiVector;
+
+ Zone* zone() const { return zone_; }
+ const InstructionSequence* sequence() const { return sequence_; }
+
+ Zone* const zone_;
+ const InstructionSequence* const sequence_;
+ BitVector phi_map_guard_;
+ PhiMap phi_map_;
+ OperandMaps incoming_maps_;
+ OperandMaps outgoing_maps_;
};
-// Verify that all gap moves move the operands for a virtual register into the
-// correct location for every instruction.
void RegisterAllocatorVerifier::VerifyGapMoves() {
- typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
- OutgoingMappings outgoing_mappings(
- static_cast<int>(sequence()->instruction_blocks().size()), nullptr,
- zone());
- // Construct all mappings, ignoring back edges and multiple entries.
- ConstructOutgoingMappings(&outgoing_mappings, true);
- // Run all remaining phis and compute the intersection of all predecessor
- // mappings.
- for (const auto* block : sequence()->instruction_blocks()) {
- if (block->PredecessorCount() == 0) continue;
- const size_t block_index = block->rpo_number().ToSize();
- auto* mapping = outgoing_mappings[block_index];
- bool initialized = false;
- // Walk predecessors in reverse to ensure Intersect is correctly working.
- // If it did nothing, the second pass would do exactly what the first pass
- // did.
- for (size_t phi_input = block->PredecessorCount() - 1; true; --phi_input) {
- const size_t pred_block_index = block->predecessors()[phi_input].ToSize();
- auto* incoming = outgoing_mappings[pred_block_index];
- if (phi_input != 0) incoming->RunPhis(sequence(), block, phi_input);
- if (!initialized) {
- mapping->InitializeIntersection(incoming);
- initialized = true;
- } else {
- mapping->Intersect(incoming);
- }
- if (phi_input == 0) break;
- }
- }
- // Construct all mappings again, this time using the instersection mapping
- // above as the incoming mapping instead of the result from the first
- // predecessor.
- ConstructOutgoingMappings(&outgoing_mappings, false);
+ BlockMaps block_maps(zone(), sequence());
+ VerifyGapMoves(&block_maps, true);
+ block_maps.PropagateUsesBackwards();
+ VerifyGapMoves(&block_maps, false);
}
-void RegisterAllocatorVerifier::ConstructOutgoingMappings(
- OutgoingMappings* outgoing_mappings, bool initial_pass) {
- // Compute the locations of all virtual registers leaving every block, using
- // only the first predecessor as source for the input mapping.
- for (const auto* block : sequence()->instruction_blocks()) {
- const size_t block_index = block->rpo_number().ToSize();
- auto* current = outgoing_mappings->at(block_index);
- CHECK(initial_pass == (current == nullptr));
- // Initialize current.
- if (!initial_pass) {
- // Skip check second time around for blocks without multiple predecessors
- // as we have already executed this in the initial run.
- if (block->PredecessorCount() <= 1) continue;
- current->InitializeFromIntersection();
- } else {
- current = new (zone()) OutgoingMapping(zone());
- outgoing_mappings->at(block_index) = current;
- // Copy outgoing values from predecessor block.
- current->InitializeFromFirstPredecessor(sequence(), outgoing_mappings,
- block);
- }
- // Update current with gaps and operands for all instructions in block.
+// Compute and verify outgoing values for every block.
+void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
+ bool initial_pass) {
+ const size_t block_count = sequence()->instruction_blocks().size();
+ for (size_t block_index = 0; block_index < block_count; ++block_index) {
+ auto current = block_maps->InitializeIncoming(block_index, initial_pass);
+ const auto block = sequence()->instruction_blocks()[block_index];
for (int instr_index = block->code_start(); instr_index < block->code_end();
++instr_index) {
const auto& instr_constraint = constraints_[instr_index];
- const auto* instr = instr_constraint.instruction_;
- const auto* op_constraints = instr_constraint.operand_constraints_;
+ const auto instr = instr_constraint.instruction_;
+ current->RunGaps(zone(), instr);
+ const auto op_constraints = instr_constraint.operand_constraints_;
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
- if (op_constraints[count].type_ == kImmediate) continue;
- auto it = current->locations()->find(instr->InputAt(i));
+ if (op_constraints[count].type_ == kImmediate ||
+ op_constraints[count].type_ == kExplicit) {
+ continue;
+ }
int virtual_register = op_constraints[count].virtual_register_;
- CHECK(it != current->locations()->end());
- CHECK_EQ(it->second, virtual_register);
+ auto op = instr->InputAt(i);
+ if (!block_maps->IsPhi(virtual_register)) {
+ current->Use(op, virtual_register, initial_pass);
+ } else {
+ auto phi = block_maps->GetPhi(virtual_register);
+ current->UsePhi(op, phi, initial_pass);
+ }
}
for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
current->Drop(instr->TempAt(i));
@@ -443,13 +711,21 @@
current->DropRegisters(config());
}
for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
- current->Drop(instr->OutputAt(i));
int virtual_register = op_constraints[count].virtual_register_;
- current->Map(instr->OutputAt(i), virtual_register);
- }
- if (instr->IsGapMoves()) {
- const auto* gap = GapInstruction::cast(instr);
- current->RunGapInstruction(zone(), gap);
+ OperandMap::MapValue* value =
+ current->Define(zone(), instr->OutputAt(i), virtual_register);
+ if (op_constraints[count].type_ == kRegisterAndSlot) {
+ const AllocatedOperand* reg_op =
+ AllocatedOperand::cast(instr->OutputAt(i));
+ MachineRepresentation rep = reg_op->representation();
+ const AllocatedOperand* stack_op = AllocatedOperand::New(
+ zone(), LocationOperand::LocationKind::STACK_SLOT, rep,
+ op_constraints[i].spilled_slot_);
+ auto insert_result =
+ current->map().insert(std::make_pair(stack_op, value));
+ DCHECK(insert_result.second);
+ USE(insert_result);
+ }
}
}
}
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
index 4e35dc2..f3ab54f 100644
--- a/src/compiler/register-allocator-verifier.h
+++ b/src/compiler/register-allocator-verifier.h
@@ -5,7 +5,6 @@
#ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
#define V8_REGISTER_ALLOCATOR_VERIFIER_H_
-#include "src/v8.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -15,7 +14,7 @@
class InstructionOperand;
class InstructionSequence;
-class RegisterAllocatorVerifier FINAL : public ZoneObject {
+class RegisterAllocatorVerifier final : public ZoneObject {
public:
RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
const InstructionSequence* sequence);
@@ -31,15 +30,20 @@
kFixedRegister,
kDoubleRegister,
kFixedDoubleRegister,
+ kSlot,
+ kDoubleSlot,
kFixedSlot,
kNone,
kNoneDouble,
- kSameAsFirst
+ kExplicit,
+ kSameAsFirst,
+ kRegisterAndSlot
};
struct OperandConstraint {
ConstraintType type_;
int value_; // subkind index when relevant
+ int spilled_slot_;
int virtual_register_;
};
@@ -49,10 +53,9 @@
OperandConstraint* operand_constraints_;
};
- class OutgoingMapping;
+ class BlockMaps;
typedef ZoneVector<InstructionConstraint> Constraints;
- typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
Zone* zone() const { return zone_; }
const RegisterConfiguration* config() { return config_; }
@@ -68,8 +71,7 @@
void CheckConstraint(const InstructionOperand* op,
const OperandConstraint* constraint);
- void ConstructOutgoingMappings(OutgoingMappings* outgoing_mappings,
- bool initial_pass);
+ void VerifyGapMoves(BlockMaps* outgoing_mappings, bool initial_pass);
Zone* const zone_;
const RegisterConfiguration* config_;
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 9eb4a47..232ad9f 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/adapters.h"
#include "src/compiler/linkage.h"
#include "src/compiler/register-allocator.h"
#include "src/string-stream.h"
@@ -10,198 +11,343 @@
namespace internal {
namespace compiler {
-static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
- return a.Value() < b.Value() ? a : b;
-}
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+ } while (false)
-static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
- return a.Value() > b.Value() ? a : b;
-}
+namespace {
-
-static void TraceAlloc(const char* msg, ...) {
- if (FLAG_trace_alloc) {
- va_list arguments;
- va_start(arguments, msg);
- base::OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
-static void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
+void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
auto it = std::find(v->begin(), v->end(), range);
DCHECK(it != v->end());
v->erase(it);
}
-UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
- InstructionOperand* hint)
- : operand_(operand),
- hint_(hint),
- pos_(pos),
- next_(nullptr),
- requires_reg_(false),
- register_beneficial_(true) {
- if (operand_ != nullptr && operand_->IsUnallocated()) {
- const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
- requires_reg_ = unalloc->HasRegisterPolicy();
- register_beneficial_ = !unalloc->HasAnyPolicy();
- }
- DCHECK(pos_.IsValid());
+int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
+ return kind == DOUBLE_REGISTERS ? cfg->num_double_registers()
+ : cfg->num_general_registers();
}
-bool UsePosition::HasHint() const {
- return hint_ != nullptr && !hint_->IsUnallocated();
+int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
+ RegisterKind kind) {
+ return kind == DOUBLE_REGISTERS
+ ? cfg->num_allocatable_aliased_double_registers()
+ : cfg->num_allocatable_general_registers();
}
-bool UsePosition::RequiresRegister() const { return requires_reg_; }
-
-
-bool UsePosition::RegisterIsBeneficial() const { return register_beneficial_; }
-
-
-void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
- DCHECK(Contains(pos) && pos.Value() != start().Value());
- auto after = new (zone) UseInterval(pos, end_);
- after->next_ = next_;
- next_ = after;
- end_ = pos;
+const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
+ RegisterKind kind) {
+ return kind == DOUBLE_REGISTERS ? cfg->allocatable_double_codes()
+ : cfg->allocatable_general_codes();
}
-struct LiveRange::SpillAtDefinitionList : ZoneObject {
- SpillAtDefinitionList(int gap_index, InstructionOperand* operand,
- SpillAtDefinitionList* next)
- : gap_index(gap_index), operand(operand), next(next) {}
- const int gap_index;
- InstructionOperand* const operand;
- SpillAtDefinitionList* const next;
-};
-
-
-#ifdef DEBUG
-
-
-void LiveRange::Verify() const {
- UsePosition* cur = first_pos_;
- while (cur != nullptr) {
- DCHECK(Start().Value() <= cur->pos().Value() &&
- cur->pos().Value() <= End().Value());
- cur = cur->next();
- }
+const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
+ const InstructionBlock* block) {
+ RpoNumber index = block->loop_header();
+ if (!index.IsValid()) return nullptr;
+ return sequence->InstructionBlockAt(index);
}
-bool LiveRange::HasOverlap(UseInterval* target) const {
- UseInterval* current_interval = first_interval_;
- while (current_interval != nullptr) {
- // Intervals overlap if the start of one is contained in the other.
- if (current_interval->Contains(target->start()) ||
- target->Contains(current_interval->start())) {
+const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
+ LifetimePosition pos) {
+ return code->GetInstructionBlock(pos.ToInstructionIndex());
+}
+
+
+Instruction* GetLastInstruction(InstructionSequence* code,
+ const InstructionBlock* block) {
+ return code->InstructionAt(block->last_instruction_index());
+}
+
+
+bool IsOutputRegisterOf(Instruction* instr, Register reg) {
+ for (size_t i = 0; i < instr->OutputCount(); i++) {
+ InstructionOperand* output = instr->OutputAt(i);
+ if (output->IsRegister() &&
+ LocationOperand::cast(output)->GetRegister().is(reg)) {
return true;
}
- current_interval = current_interval->next();
}
return false;
}
-#endif
+bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
+ for (size_t i = 0; i < instr->OutputCount(); i++) {
+ InstructionOperand* output = instr->OutputAt(i);
+ if (output->IsDoubleRegister() &&
+ LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
+ return true;
+ }
+ }
+ return false;
+}
-LiveRange::LiveRange(int id, Zone* zone)
- : id_(id),
- spilled_(false),
- is_phi_(false),
- is_non_loop_phi_(false),
- kind_(UNALLOCATED_REGISTERS),
- assigned_register_(kInvalidAssignment),
+// TODO(dcarney): fix frame to allow frame accesses to half size location.
+int GetByteWidth(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kTagged:
+ return kPointerSize;
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat64:
+ return 8;
+ case MachineRepresentation::kNone:
+ break;
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+} // namespace
+
+
+UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
+ void* hint, UsePositionHintType hint_type)
+ : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
+ DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
+ bool register_beneficial = true;
+ UsePositionType type = UsePositionType::kAny;
+ if (operand_ != nullptr && operand_->IsUnallocated()) {
+ const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
+ if (unalloc->HasRegisterPolicy()) {
+ type = UsePositionType::kRequiresRegister;
+ } else if (unalloc->HasSlotPolicy()) {
+ type = UsePositionType::kRequiresSlot;
+ register_beneficial = false;
+ } else {
+ register_beneficial = !unalloc->HasAnyPolicy();
+ }
+ }
+ flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
+ RegisterBeneficialField::encode(register_beneficial) |
+ AssignedRegisterField::encode(kUnassignedRegister);
+ DCHECK(pos_.IsValid());
+}
+
+
+bool UsePosition::HasHint() const {
+ int hint_register;
+ return HintRegister(&hint_register);
+}
+
+
+bool UsePosition::HintRegister(int* register_code) const {
+ if (hint_ == nullptr) return false;
+ switch (HintTypeField::decode(flags_)) {
+ case UsePositionHintType::kNone:
+ case UsePositionHintType::kUnresolved:
+ return false;
+ case UsePositionHintType::kUsePos: {
+ UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
+ int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
+ if (assigned_register == kUnassignedRegister) return false;
+ *register_code = assigned_register;
+ return true;
+ }
+ case UsePositionHintType::kOperand: {
+ InstructionOperand* operand =
+ reinterpret_cast<InstructionOperand*>(hint_);
+ int assigned_register =
+ operand->IsRegister()
+ ? LocationOperand::cast(operand)->GetRegister().code()
+ : LocationOperand::cast(operand)->GetDoubleRegister().code();
+ *register_code = assigned_register;
+ return true;
+ }
+ case UsePositionHintType::kPhi: {
+ RegisterAllocationData::PhiMapValue* phi =
+ reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
+ int assigned_register = phi->assigned_register();
+ if (assigned_register == kUnassignedRegister) return false;
+ *register_code = assigned_register;
+ return true;
+ }
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+UsePositionHintType UsePosition::HintTypeForOperand(
+ const InstructionOperand& op) {
+ switch (op.kind()) {
+ case InstructionOperand::CONSTANT:
+ case InstructionOperand::IMMEDIATE:
+ case InstructionOperand::EXPLICIT:
+ return UsePositionHintType::kNone;
+ case InstructionOperand::UNALLOCATED:
+ return UsePositionHintType::kUnresolved;
+ case InstructionOperand::ALLOCATED:
+ if (op.IsRegister() || op.IsDoubleRegister()) {
+ return UsePositionHintType::kOperand;
+ } else {
+ DCHECK(op.IsStackSlot() || op.IsDoubleStackSlot());
+ return UsePositionHintType::kNone;
+ }
+ case InstructionOperand::INVALID:
+ break;
+ }
+ UNREACHABLE();
+ return UsePositionHintType::kNone;
+}
+
+
+void UsePosition::ResolveHint(UsePosition* use_pos) {
+ DCHECK_NOT_NULL(use_pos);
+ if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
+ hint_ = use_pos;
+ flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
+}
+
+
+void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
+ DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
+ DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
+ flags_ = TypeField::encode(type) |
+ RegisterBeneficialField::encode(register_beneficial) |
+ HintTypeField::encode(HintTypeField::decode(flags_)) |
+ AssignedRegisterField::encode(kUnassignedRegister);
+}
+
+
+UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
+ DCHECK(Contains(pos) && pos != start());
+ UseInterval* after = new (zone) UseInterval(pos, end_);
+ after->next_ = next_;
+ next_ = nullptr;
+ end_ = pos;
+ return after;
+}
+
+
+void LifetimePosition::Print() const {
+ OFStream os(stdout);
+ os << *this << std::endl;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
+ os << '@' << pos.ToInstructionIndex();
+ if (pos.IsGapPosition()) {
+ os << 'g';
+ } else {
+ os << 'i';
+ }
+ if (pos.IsStart()) {
+ os << 's';
+ } else {
+ os << 'e';
+ }
+ return os;
+}
+
+
+const float LiveRange::kInvalidWeight = -1;
+const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
+
+
+LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
+ TopLevelLiveRange* top_level)
+ : relative_id_(relative_id),
+ bits_(0),
last_interval_(nullptr),
first_interval_(nullptr),
first_pos_(nullptr),
- parent_(nullptr),
+ top_level_(top_level),
next_(nullptr),
current_interval_(nullptr),
last_processed_use_(nullptr),
- current_hint_operand_(nullptr),
- spill_start_index_(kMaxInt),
- spill_type_(SpillType::kNoSpillType),
- spill_operand_(nullptr),
- spills_at_definition_(nullptr) {}
-
-
-void LiveRange::set_assigned_register(int reg, Zone* zone) {
- DCHECK(!HasRegisterAssigned() && !IsSpilled());
- assigned_register_ = reg;
- // TODO(dcarney): stop aliasing hint operands.
- ConvertUsesToOperand(CreateAssignedOperand(zone));
+ current_hint_position_(nullptr),
+ splitting_pointer_(nullptr),
+ size_(kInvalidSize),
+ weight_(kInvalidWeight),
+ group_(nullptr) {
+ DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
+ bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
+ RepresentationField::encode(rep);
}
-void LiveRange::MakeSpilled() {
- DCHECK(!IsSpilled());
- DCHECK(!TopLevel()->HasNoSpillType());
- spilled_ = true;
- assigned_register_ = kInvalidAssignment;
-}
-
-
-void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
- InstructionOperand* operand) {
- DCHECK(HasNoSpillType());
- spills_at_definition_ = new (zone)
- SpillAtDefinitionList(gap_index, operand, spills_at_definition_);
-}
-
-
-void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
- InstructionOperand* op) {
- auto to_spill = TopLevel()->spills_at_definition_;
- if (to_spill == nullptr) return;
- auto zone = sequence->zone();
- for (; to_spill != nullptr; to_spill = to_spill->next) {
- auto gap = sequence->GapAt(to_spill->gap_index);
- auto move = gap->GetOrCreateParallelMove(GapInstruction::START, zone);
- move->AddMove(to_spill->operand, op, zone);
+void LiveRange::VerifyPositions() const {
+ // Walk the positions, verifying that each is in an interval.
+ UseInterval* interval = first_interval_;
+ for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
+ CHECK(Start() <= pos->pos());
+ CHECK(pos->pos() <= End());
+ CHECK_NOT_NULL(interval);
+ while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
+ interval = interval->next();
+ CHECK_NOT_NULL(interval);
+ }
}
- TopLevel()->spills_at_definition_ = nullptr;
}
-void LiveRange::SetSpillOperand(InstructionOperand* operand) {
- DCHECK(HasNoSpillType());
- DCHECK(!operand->IsUnallocated());
- spill_type_ = SpillType::kSpillOperand;
- spill_operand_ = operand;
+void LiveRange::VerifyIntervals() const {
+ DCHECK(first_interval()->start() == Start());
+ LifetimePosition last_end = first_interval()->end();
+ for (UseInterval* interval = first_interval()->next(); interval != nullptr;
+ interval = interval->next()) {
+ DCHECK(last_end <= interval->start());
+ last_end = interval->end();
+ }
+ DCHECK(last_end == End());
}
-void LiveRange::SetSpillRange(SpillRange* spill_range) {
- DCHECK(HasNoSpillType() || HasSpillRange());
- DCHECK_NE(spill_range, nullptr);
- spill_type_ = SpillType::kSpillRange;
- spill_range_ = spill_range;
+void LiveRange::set_assigned_register(int reg) {
+ DCHECK(!HasRegisterAssigned() && !spilled());
+ bits_ = AssignedRegisterField::update(bits_, reg);
}
-void LiveRange::CommitSpillOperand(InstructionOperand* operand) {
- DCHECK(HasSpillRange());
- DCHECK(!operand->IsUnallocated());
- DCHECK(!IsChild());
- spill_type_ = SpillType::kSpillOperand;
- spill_operand_ = operand;
+void LiveRange::UnsetAssignedRegister() {
+ DCHECK(HasRegisterAssigned() && !spilled());
+ bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
}
-UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
+void LiveRange::Spill() {
+ DCHECK(!spilled());
+ DCHECK(!TopLevel()->HasNoSpillType());
+ set_spilled(true);
+ bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
+}
+
+
+RegisterKind LiveRange::kind() const {
+ return IsFloatingPoint(representation()) ? DOUBLE_REGISTERS
+ : GENERAL_REGISTERS;
+}
+
+
+UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
+ for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
+ if (pos->HintRegister(register_index)) return pos;
+ }
+ return nullptr;
+}
+
+
+UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
UsePosition* use_pos = last_processed_use_;
- if (use_pos == nullptr) use_pos = first_pos();
- while (use_pos != nullptr && use_pos->pos().Value() < start.Value()) {
+ if (use_pos == nullptr || use_pos->pos() > start) {
+ use_pos = first_pos();
+ }
+ while (use_pos != nullptr && use_pos->pos() < start) {
use_pos = use_pos->next();
}
last_processed_use_ = use_pos;
@@ -210,7 +356,7 @@
UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
- LifetimePosition start) {
+ LifetimePosition start) const {
UsePosition* pos = NextUsePosition(start);
while (pos != nullptr && !pos->RegisterIsBeneficial()) {
pos = pos->next();
@@ -220,10 +366,10 @@
UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
- LifetimePosition start) {
- auto pos = first_pos();
+ LifetimePosition start) const {
+ UsePosition* pos = first_pos();
UsePosition* prev = nullptr;
- while (pos != nullptr && pos->pos().Value() < start.Value()) {
+ while (pos != nullptr && pos->pos() < start) {
if (pos->RegisterIsBeneficial()) prev = pos;
pos = pos->next();
}
@@ -231,53 +377,58 @@
}
-UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
+UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
UsePosition* pos = NextUsePosition(start);
- while (pos != nullptr && !pos->RequiresRegister()) {
+ while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
pos = pos->next();
}
return pos;
}
-bool LiveRange::CanBeSpilled(LifetimePosition pos) {
- // We cannot spill a live range that has a use requiring a register
- // at the current or the immediate next position.
- auto use_pos = NextRegisterPosition(pos);
- if (use_pos == nullptr) return true;
- return use_pos->pos().Value() >
- pos.NextInstruction().InstructionEnd().Value();
+UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
+ for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
+ pos = pos->next()) {
+ if (pos->type() != UsePositionType::kRequiresSlot) continue;
+ return pos;
+ }
+ return nullptr;
}
-InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) const {
- InstructionOperand* op = nullptr;
+bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
+ // We cannot spill a live range that has a use requiring a register
+ // at the current or the immediate next position.
+ UsePosition* use_pos = NextRegisterPosition(pos);
+ if (use_pos == nullptr) return true;
+ return use_pos->pos() > pos.NextStart().End();
+}
+
+
+bool LiveRange::IsTopLevel() const { return top_level_ == this; }
+
+
+InstructionOperand LiveRange::GetAssignedOperand() const {
if (HasRegisterAssigned()) {
- DCHECK(!IsSpilled());
- switch (Kind()) {
- case GENERAL_REGISTERS:
- op = RegisterOperand::Create(assigned_register(), zone);
- break;
- case DOUBLE_REGISTERS:
- op = DoubleRegisterOperand::Create(assigned_register(), zone);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- DCHECK(IsSpilled());
- DCHECK(!HasRegisterAssigned());
- op = TopLevel()->GetSpillOperand();
- DCHECK(!op->IsUnallocated());
+ DCHECK(!spilled());
+ return AllocatedOperand(LocationOperand::REGISTER, representation(),
+ assigned_register());
}
- return op;
+ DCHECK(spilled());
+ DCHECK(!HasRegisterAssigned());
+ if (TopLevel()->HasSpillOperand()) {
+ InstructionOperand* op = TopLevel()->GetSpillOperand();
+ DCHECK(!op->IsUnallocated());
+ return *op;
+ }
+ return TopLevel()->GetSpillRangeOperand();
}
UseInterval* LiveRange::FirstSearchIntervalForPosition(
LifetimePosition position) const {
if (current_interval_ == nullptr) return first_interval_;
- if (current_interval_->start().Value() > position.Value()) {
+ if (current_interval_->start() > position) {
current_interval_ = nullptr;
return first_interval_;
}
@@ -288,49 +439,66 @@
void LiveRange::AdvanceLastProcessedMarker(
UseInterval* to_start_of, LifetimePosition but_not_past) const {
if (to_start_of == nullptr) return;
- if (to_start_of->start().Value() > but_not_past.Value()) return;
- auto start = current_interval_ == nullptr ? LifetimePosition::Invalid()
- : current_interval_->start();
- if (to_start_of->start().Value() > start.Value()) {
+ if (to_start_of->start() > but_not_past) return;
+ LifetimePosition start = current_interval_ == nullptr
+ ? LifetimePosition::Invalid()
+ : current_interval_->start();
+ if (to_start_of->start() > start) {
current_interval_ = to_start_of;
}
}
-void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
- Zone* zone) {
- DCHECK(Start().Value() < position.Value());
+LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
+ int new_id = TopLevel()->GetNextChildId();
+ LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
+ DetachAt(position, child, zone);
+
+ child->top_level_ = TopLevel();
+ child->next_ = next_;
+ next_ = child;
+ return child;
+}
+
+
+UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
+ Zone* zone) {
+ DCHECK(Start() < position);
+ DCHECK(End() > position);
DCHECK(result->IsEmpty());
// Find the last interval that ends before the position. If the
// position is contained in one of the intervals in the chain, we
// split that interval and use the first part.
- auto current = FirstSearchIntervalForPosition(position);
+ UseInterval* current = FirstSearchIntervalForPosition(position);
// If the split position coincides with the beginning of a use interval
// we need to split use positons in a special way.
bool split_at_start = false;
- if (current->start().Value() == position.Value()) {
+ if (current->start() == position) {
// When splitting at start we need to locate the previous use interval.
current = first_interval_;
}
+ UseInterval* after = nullptr;
while (current != nullptr) {
if (current->Contains(position)) {
- current->SplitAt(position, zone);
+ after = current->SplitAt(position, zone);
break;
}
- auto next = current->next();
- if (next->start().Value() >= position.Value()) {
- split_at_start = (next->start().Value() == position.Value());
+ UseInterval* next = current->next();
+ if (next->start() >= position) {
+ split_at_start = (next->start() == position);
+ after = next;
+ current->set_next(nullptr);
break;
}
current = next;
}
+ DCHECK(nullptr != after);
// Partition original use intervals to the two live ranges.
- auto before = current;
- auto after = before->next();
+ UseInterval* before = current;
result->last_interval_ =
(last_interval_ == before)
? after // Only interval in the range after split.
@@ -340,20 +508,21 @@
// Find the last use position before the split and the first use
// position after it.
- auto use_after = first_pos_;
+ UsePosition* use_after =
+ splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
+ ? first_pos()
+ : splitting_pointer_;
UsePosition* use_before = nullptr;
if (split_at_start) {
// The split position coincides with the beginning of a use interval (the
// end of a lifetime hole). Use at this position should be attributed to
// the split child because split child owns use interval covering it.
- while (use_after != nullptr &&
- use_after->pos().Value() < position.Value()) {
+ while (use_after != nullptr && use_after->pos() < position) {
use_before = use_after;
use_after = use_after->next();
}
} else {
- while (use_after != nullptr &&
- use_after->pos().Value() <= position.Value()) {
+ while (use_after != nullptr && use_after->pos() <= position) {
use_before = use_after;
use_after = use_after->next();
}
@@ -361,7 +530,7 @@
// Partition original use positions to the two live ranges.
if (use_before != nullptr) {
- use_before->next_ = nullptr;
+ use_before->set_next(nullptr);
} else {
first_pos_ = nullptr;
}
@@ -372,17 +541,44 @@
last_processed_use_ = nullptr;
current_interval_ = nullptr;
- // Link the new live range in the chain before any of the other
- // ranges linked from the range before the split.
- result->parent_ = (parent_ == nullptr) ? this : parent_;
- result->kind_ = result->parent_->kind_;
- result->next_ = next_;
- next_ = result;
-
+ // Invalidate size and weight of this range. The child range has them
+ // invalid at construction.
+ size_ = kInvalidSize;
+ weight_ = kInvalidWeight;
#ifdef DEBUG
- Verify();
- result->Verify();
+ VerifyChildStructure();
+ result->VerifyChildStructure();
#endif
+ return use_before;
+}
+
+
+void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
+ LiveRange* child = this;
+ for (; child != nullptr; child = child->next()) {
+ child->top_level_ = new_top_level;
+ }
+}
+
+
+void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
+ const InstructionOperand& spill_op) {
+ for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
+ DCHECK(Start() <= pos->pos() && pos->pos() <= End());
+ if (!pos->HasOperand()) continue;
+ switch (pos->type()) {
+ case UsePositionType::kRequiresSlot:
+ DCHECK(spill_op.IsStackSlot() || spill_op.IsDoubleStackSlot());
+ InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
+ break;
+ case UsePositionType::kRequiresRegister:
+ DCHECK(op.IsRegister() || op.IsDoubleRegister());
+ // Fall through.
+ case UsePositionType::kAny:
+ InstructionOperand::ReplaceWith(pos->operand(), &op);
+ break;
+ }
+ }
}
@@ -394,156 +590,68 @@
bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
LifetimePosition start = Start();
LifetimePosition other_start = other->Start();
- if (start.Value() == other_start.Value()) {
+ if (start == other_start) {
UsePosition* pos = first_pos();
if (pos == nullptr) return false;
UsePosition* other_pos = other->first_pos();
if (other_pos == nullptr) return true;
- return pos->pos().Value() < other_pos->pos().Value();
+ return pos->pos() < other_pos->pos();
}
- return start.Value() < other_start.Value();
+ return start < other_start;
}
-void LiveRange::ShortenTo(LifetimePosition start) {
- TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
- DCHECK(first_interval_ != nullptr);
- DCHECK(first_interval_->start().Value() <= start.Value());
- DCHECK(start.Value() < first_interval_->end().Value());
- first_interval_->set_start(start);
-}
-
-
-void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
- Zone* zone) {
- TraceAlloc("Ensure live range %d in interval [%d %d[\n", id_, start.Value(),
- end.Value());
- auto new_end = end;
- while (first_interval_ != nullptr &&
- first_interval_->start().Value() <= end.Value()) {
- if (first_interval_->end().Value() > end.Value()) {
- new_end = first_interval_->end();
+void LiveRange::SetUseHints(int register_index) {
+ for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
+ if (!pos->HasOperand()) continue;
+ switch (pos->type()) {
+ case UsePositionType::kRequiresSlot:
+ break;
+ case UsePositionType::kRequiresRegister:
+ case UsePositionType::kAny:
+ pos->set_assigned_register(register_index);
+ break;
}
- first_interval_ = first_interval_->next();
- }
-
- auto new_interval = new (zone) UseInterval(start, new_end);
- new_interval->next_ = first_interval_;
- first_interval_ = new_interval;
- if (new_interval->next() == nullptr) {
- last_interval_ = new_interval;
- }
-}
-
-
-void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
- Zone* zone) {
- TraceAlloc("Add to live range %d interval [%d %d[\n", id_, start.Value(),
- end.Value());
- if (first_interval_ == nullptr) {
- auto interval = new (zone) UseInterval(start, end);
- first_interval_ = interval;
- last_interval_ = interval;
- } else {
- if (end.Value() == first_interval_->start().Value()) {
- first_interval_->set_start(start);
- } else if (end.Value() < first_interval_->start().Value()) {
- auto interval = new (zone) UseInterval(start, end);
- interval->set_next(first_interval_);
- first_interval_ = interval;
- } else {
- // Order of instruction's processing (see ProcessInstructions) guarantees
- // that each new use interval either precedes or intersects with
- // last added interval.
- DCHECK(start.Value() < first_interval_->end().Value());
- first_interval_->start_ = Min(start, first_interval_->start_);
- first_interval_->end_ = Max(end, first_interval_->end_);
- }
- }
-}
-
-
-void LiveRange::AddUsePosition(LifetimePosition pos,
- InstructionOperand* operand,
- InstructionOperand* hint, Zone* zone) {
- TraceAlloc("Add to live range %d use position %d\n", id_, pos.Value());
- auto use_pos = new (zone) UsePosition(pos, operand, hint);
- UsePosition* prev_hint = nullptr;
- UsePosition* prev = nullptr;
- auto current = first_pos_;
- while (current != nullptr && current->pos().Value() < pos.Value()) {
- prev_hint = current->HasHint() ? current : prev_hint;
- prev = current;
- current = current->next();
- }
-
- if (prev == nullptr) {
- use_pos->set_next(first_pos_);
- first_pos_ = use_pos;
- } else {
- use_pos->next_ = prev->next_;
- prev->next_ = use_pos;
- }
-
- if (prev_hint == nullptr && use_pos->HasHint()) {
- current_hint_operand_ = hint;
- }
-}
-
-
-void LiveRange::ConvertUsesToOperand(InstructionOperand* op) {
- auto use_pos = first_pos();
- while (use_pos != nullptr) {
- DCHECK(Start().Value() <= use_pos->pos().Value() &&
- use_pos->pos().Value() <= End().Value());
-
- if (use_pos->HasOperand()) {
- DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
- !use_pos->RequiresRegister());
- use_pos->operand()->ConvertTo(op->kind(), op->index());
- }
- use_pos = use_pos->next();
}
}
bool LiveRange::CanCover(LifetimePosition position) const {
if (IsEmpty()) return false;
- return Start().Value() <= position.Value() &&
- position.Value() < End().Value();
+ return Start() <= position && position < End();
}
-bool LiveRange::Covers(LifetimePosition position) {
+bool LiveRange::Covers(LifetimePosition position) const {
if (!CanCover(position)) return false;
- auto start_search = FirstSearchIntervalForPosition(position);
- for (auto interval = start_search; interval != nullptr;
+ UseInterval* start_search = FirstSearchIntervalForPosition(position);
+ for (UseInterval* interval = start_search; interval != nullptr;
interval = interval->next()) {
DCHECK(interval->next() == nullptr ||
- interval->next()->start().Value() >= interval->start().Value());
+ interval->next()->start() >= interval->start());
AdvanceLastProcessedMarker(interval, position);
if (interval->Contains(position)) return true;
- if (interval->start().Value() > position.Value()) return false;
+ if (interval->start() > position) return false;
}
return false;
}
-LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
- auto b = other->first_interval();
+LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
+ UseInterval* b = other->first_interval();
if (b == nullptr) return LifetimePosition::Invalid();
- auto advance_last_processed_up_to = b->start();
- auto a = FirstSearchIntervalForPosition(b->start());
+ LifetimePosition advance_last_processed_up_to = b->start();
+ UseInterval* a = FirstSearchIntervalForPosition(b->start());
while (a != nullptr && b != nullptr) {
- if (a->start().Value() > other->End().Value()) break;
- if (b->start().Value() > End().Value()) break;
- auto cur_intersection = a->Intersect(b);
+ if (a->start() > other->End()) break;
+ if (b->start() > End()) break;
+ LifetimePosition cur_intersection = a->Intersect(b);
if (cur_intersection.IsValid()) {
return cur_intersection;
}
- if (a->start().Value() < b->start().Value()) {
+ if (a->start() < b->start()) {
a = a->next();
- if (a == nullptr || a->start().Value() > other->End().Value()) break;
+ if (a == nullptr || a->start() > other->End()) break;
AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
} else {
b = b->next();
@@ -553,250 +661,433 @@
}
-RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
- Zone* zone, Frame* frame,
- InstructionSequence* code,
- const char* debug_name)
- : local_zone_(zone),
- frame_(frame),
- code_(code),
- debug_name_(debug_name),
- config_(config),
- phi_map_(PhiMap::key_compare(), PhiMap::allocator_type(local_zone())),
- live_in_sets_(code->InstructionBlockCount(), nullptr, local_zone()),
- live_ranges_(code->VirtualRegisterCount() * 2, nullptr, local_zone()),
- fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
- local_zone()),
- fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
- local_zone()),
- unhandled_live_ranges_(local_zone()),
- active_live_ranges_(local_zone()),
- inactive_live_ranges_(local_zone()),
- reusable_slots_(local_zone()),
- spill_ranges_(local_zone()),
- mode_(UNALLOCATED_REGISTERS),
- num_registers_(-1),
- allocation_ok_(true) {
- DCHECK(this->config()->num_general_registers() <=
- RegisterConfiguration::kMaxGeneralRegisters);
- DCHECK(this->config()->num_double_registers() <=
- RegisterConfiguration::kMaxDoubleRegisters);
- // TryAllocateFreeReg and AllocateBlockedReg assume this
- // when allocating local arrays.
- DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
- this->config()->num_general_registers());
- unhandled_live_ranges().reserve(
- static_cast<size_t>(code->VirtualRegisterCount() * 2));
- active_live_ranges().reserve(8);
- inactive_live_ranges().reserve(8);
- reusable_slots().reserve(8);
- spill_ranges().reserve(8);
- assigned_registers_ =
- new (code_zone()) BitVector(config->num_general_registers(), code_zone());
- assigned_double_registers_ = new (code_zone())
- BitVector(config->num_aliased_double_registers(), code_zone());
- frame->SetAllocatedRegisters(assigned_registers_);
- frame->SetAllocatedDoubleRegisters(assigned_double_registers_);
-}
-
-
-BitVector* RegisterAllocator::ComputeLiveOut(const InstructionBlock* block) {
- // Compute live out for the given block, except not including backward
- // successor edges.
- auto live_out = new (local_zone())
- BitVector(code()->VirtualRegisterCount(), local_zone());
-
- // Process all successor blocks.
- for (auto succ : block->successors()) {
- // Add values live on entry to the successor. Note the successor's
- // live_in will not be computed yet for backwards edges.
- auto live_in = live_in_sets_[succ.ToSize()];
- if (live_in != nullptr) live_out->Union(*live_in);
-
- // All phi input operands corresponding to this successor edge are live
- // out from this block.
- auto successor = code()->InstructionBlockAt(succ);
- size_t index = successor->PredecessorIndexOf(block->rpo_number());
- DCHECK(index < successor->PredecessorCount());
- for (auto phi : successor->phis()) {
- live_out->Add(phi->operands()[index]);
+unsigned LiveRange::GetSize() {
+ if (size_ == kInvalidSize) {
+ size_ = 0;
+ for (const UseInterval* interval = first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ size_ += (interval->end().value() - interval->start().value());
}
}
- return live_out;
+
+ return static_cast<unsigned>(size_);
}
-void RegisterAllocator::AddInitialIntervals(const InstructionBlock* block,
- BitVector* live_out) {
- // Add an interval that includes the entire block to the live range for
- // each live_out value.
- auto start =
- LifetimePosition::FromInstructionIndex(block->first_instruction_index());
- auto end = LifetimePosition::FromInstructionIndex(
- block->last_instruction_index()).NextInstruction();
- BitVector::Iterator iterator(live_out);
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- auto range = LiveRangeFor(operand_index);
- range->AddUseInterval(start, end, local_zone());
- iterator.Advance();
+void LiveRange::Print(const RegisterConfiguration* config,
+ bool with_children) const {
+ OFStream os(stdout);
+ PrintableLiveRange wrapper;
+ wrapper.register_configuration_ = config;
+ for (const LiveRange* i = this; i != nullptr; i = i->next()) {
+ wrapper.range_ = i;
+ os << wrapper << std::endl;
+ if (!with_children) break;
}
}
-int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - config()->num_general_registers();
+void LiveRange::Print(bool with_children) const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config, with_children);
}
-InstructionOperand* RegisterAllocator::AllocateFixed(
- UnallocatedOperand* operand, int pos, bool is_tagged) {
- TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
- DCHECK(operand->HasFixedPolicy());
- if (operand->HasFixedSlotPolicy()) {
- operand->ConvertTo(InstructionOperand::STACK_SLOT,
- operand->fixed_slot_index());
- } else if (operand->HasFixedRegisterPolicy()) {
- int reg_index = operand->fixed_register_index();
- operand->ConvertTo(InstructionOperand::REGISTER, reg_index);
- } else if (operand->HasFixedDoubleRegisterPolicy()) {
- int reg_index = operand->fixed_register_index();
- operand->ConvertTo(InstructionOperand::DOUBLE_REGISTER, reg_index);
- } else {
- UNREACHABLE();
- }
- if (is_tagged) {
- TraceAlloc("Fixed reg is tagged at %d\n", pos);
- auto instr = InstructionAt(pos);
- if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(operand, code_zone());
+struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
+ SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
+ SpillMoveInsertionList* next)
+ : gap_index(gap_index), operand(operand), next(next) {}
+ const int gap_index;
+ InstructionOperand* const operand;
+ SpillMoveInsertionList* const next;
+};
+
+
+TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
+ : LiveRange(0, rep, this),
+ vreg_(vreg),
+ last_child_id_(0),
+ splintered_from_(nullptr),
+ spill_operand_(nullptr),
+ spill_move_insertion_locations_(nullptr),
+ spilled_in_deferred_blocks_(false),
+ spill_start_index_(kMaxInt),
+ last_pos_(nullptr),
+ splinter_(nullptr),
+ has_preassigned_slot_(false) {
+ bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
+}
+
+
+#if DEBUG
+int TopLevelLiveRange::debug_virt_reg() const {
+ return IsSplinter() ? splintered_from()->vreg() : vreg();
+}
+#endif
+
+
+void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
+ InstructionOperand* operand) {
+ DCHECK(HasNoSpillType());
+ spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList(
+ gap_index, operand, spill_move_insertion_locations_);
+}
+
+
+bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
+ InstructionSequence* code, const InstructionOperand& spill_operand) {
+ if (!IsSpilledOnlyInDeferredBlocks()) return false;
+
+ TRACE("Live Range %d will be spilled only in deferred blocks.\n", vreg());
+ // If we have ranges that aren't spilled but require the operand on the stack,
+ // make sure we insert the spill.
+ for (const LiveRange* child = this; child != nullptr; child = child->next()) {
+ if (!child->spilled() &&
+ child->NextSlotPosition(child->Start()) != nullptr) {
+ Instruction* instr =
+ code->InstructionAt(child->Start().ToInstructionIndex());
+ // Insert spill at the end to let live range connections happen at START.
+ ParallelMove* move =
+ instr->GetOrCreateParallelMove(Instruction::END, code->zone());
+ InstructionOperand assigned = child->GetAssignedOperand();
+ if (TopLevel()->has_slot_use()) {
+ bool found = false;
+ for (MoveOperands* move_op : *move) {
+ if (move_op->IsEliminated()) continue;
+ if (move_op->source().Equals(assigned) &&
+ move_op->destination().Equals(spill_operand)) {
+ found = true;
+ break;
+ }
+ }
+ if (found) continue;
+ }
+
+ move->AddMove(assigned, spill_operand);
}
}
- return operand;
+
+ return true;
}
-LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
- DCHECK(index < config()->num_general_registers());
- auto result = fixed_live_ranges()[index];
- if (result == nullptr) {
- // TODO(titzer): add a utility method to allocate a new LiveRange:
- // The LiveRange object itself can go in this zone, but the
- // InstructionOperand needs
- // to go in the code zone, since it may survive register allocation.
- result = new (local_zone()) LiveRange(FixedLiveRangeID(index), code_zone());
- DCHECK(result->IsFixed());
- result->kind_ = GENERAL_REGISTERS;
- SetLiveRangeAssignedRegister(result, index);
- fixed_live_ranges()[index] = result;
+void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
+ const InstructionOperand& op,
+ bool might_be_duplicated) {
+ DCHECK_IMPLIES(op.IsConstant(), spill_move_insertion_locations() == nullptr);
+ Zone* zone = sequence->zone();
+
+ for (SpillMoveInsertionList* to_spill = spill_move_insertion_locations();
+ to_spill != nullptr; to_spill = to_spill->next) {
+ Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
+ ParallelMove* move =
+ instr->GetOrCreateParallelMove(Instruction::START, zone);
+ // Skip insertion if it's possible that the move exists already as a
+ // constraint move from a fixed output register to a slot.
+ if (might_be_duplicated || has_preassigned_slot()) {
+ bool found = false;
+ for (MoveOperands* move_op : *move) {
+ if (move_op->IsEliminated()) continue;
+ if (move_op->source().Equals(*to_spill->operand) &&
+ move_op->destination().Equals(op)) {
+ found = true;
+ if (has_preassigned_slot()) move_op->Eliminate();
+ break;
+ }
+ }
+ if (found) continue;
+ }
+ if (!has_preassigned_slot()) {
+ move->AddMove(*to_spill->operand, op);
+ }
}
- return result;
}
-LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
- DCHECK(index < config()->num_aliased_double_registers());
- auto result = fixed_double_live_ranges()[index];
- if (result == nullptr) {
- result = new (local_zone())
- LiveRange(FixedDoubleLiveRangeID(index), code_zone());
- DCHECK(result->IsFixed());
- result->kind_ = DOUBLE_REGISTERS;
- SetLiveRangeAssignedRegister(result, index);
- fixed_double_live_ranges()[index] = result;
- }
- return result;
+void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
+ DCHECK(HasNoSpillType());
+ DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
+ set_spill_type(SpillType::kSpillOperand);
+ spill_operand_ = operand;
}
-LiveRange* RegisterAllocator::LiveRangeFor(int index) {
- if (index >= static_cast<int>(live_ranges().size())) {
- live_ranges().resize(index + 1, nullptr);
- }
- auto result = live_ranges()[index];
- if (result == nullptr) {
- result = new (local_zone()) LiveRange(index, code_zone());
- live_ranges()[index] = result;
- }
- return result;
+void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
+ DCHECK(!HasSpillOperand());
+ DCHECK(spill_range);
+ spill_range_ = spill_range;
}
-GapInstruction* RegisterAllocator::GetLastGap(const InstructionBlock* block) {
- int last_instruction = block->last_instruction_index();
- return code()->GapAt(last_instruction - 1);
+AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
+ SpillRange* spill_range = GetSpillRange();
+ int index = spill_range->assigned_slot();
+ return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
}
-LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
- if (operand->IsUnallocated()) {
- return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
- } else if (operand->IsRegister()) {
- return FixedLiveRangeFor(operand->index());
- } else if (operand->IsDoubleRegister()) {
- return FixedDoubleLiveRangeFor(operand->index());
+void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
+ Zone* zone) {
+ DCHECK(start != Start() || end != End());
+ DCHECK(start < end);
+
+ TopLevelLiveRange splinter_temp(-1, representation());
+ UsePosition* last_in_splinter = nullptr;
+ // Live ranges defined in deferred blocks stay in deferred blocks, so we
+ // don't need to splinter them. That means that start should always be
+ // after the beginning of the range.
+ DCHECK(start > Start());
+
+ if (end >= End()) {
+ DCHECK(start > Start());
+ DetachAt(start, &splinter_temp, zone);
+ next_ = nullptr;
} else {
- return nullptr;
+ DCHECK(start < End() && Start() < end);
+
+ const int kInvalidId = std::numeric_limits<int>::max();
+
+ UsePosition* last = DetachAt(start, &splinter_temp, zone);
+
+ LiveRange end_part(kInvalidId, this->representation(), nullptr);
+ last_in_splinter = splinter_temp.DetachAt(end, &end_part, zone);
+
+ next_ = end_part.next_;
+ last_interval_->set_next(end_part.first_interval_);
+ // The next splinter will happen either at or after the current interval.
+ // We can optimize DetachAt by setting current_interval_ accordingly,
+ // which will then be picked up by FirstSearchIntervalForPosition.
+ current_interval_ = last_interval_;
+ last_interval_ = end_part.last_interval_;
+
+ if (first_pos_ == nullptr) {
+ first_pos_ = end_part.first_pos_;
+ } else {
+ splitting_pointer_ = last;
+ if (last != nullptr) last->set_next(end_part.first_pos_);
+ }
}
-}
-
-void RegisterAllocator::Define(LifetimePosition position,
- InstructionOperand* operand,
- InstructionOperand* hint) {
- auto range = LiveRangeFor(operand);
- if (range == nullptr) return;
-
- if (range->IsEmpty() || range->Start().Value() > position.Value()) {
- // Can happen if there is a definition without use.
- range->AddUseInterval(position, position.NextInstruction(), local_zone());
- range->AddUsePosition(position.NextInstruction(), nullptr, nullptr,
- local_zone());
+ if (splinter()->IsEmpty()) {
+ splinter()->first_interval_ = splinter_temp.first_interval_;
+ splinter()->last_interval_ = splinter_temp.last_interval_;
} else {
- range->ShortenTo(position);
+ splinter()->last_interval_->set_next(splinter_temp.first_interval_);
+ splinter()->last_interval_ = splinter_temp.last_interval_;
}
+ if (splinter()->first_pos() == nullptr) {
+ splinter()->first_pos_ = splinter_temp.first_pos_;
+ } else {
+ splinter()->last_pos_->set_next(splinter_temp.first_pos_);
+ }
+ if (last_in_splinter != nullptr) {
+ splinter()->last_pos_ = last_in_splinter;
+ } else {
+ if (splinter()->first_pos() != nullptr &&
+ splinter()->last_pos_ == nullptr) {
+ splinter()->last_pos_ = splinter()->first_pos();
+ for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
+ pos = pos->next()) {
+ splinter()->last_pos_ = pos;
+ }
+ }
+ }
+#if DEBUG
+ Verify();
+ splinter()->Verify();
+#endif
+}
- if (operand->IsUnallocated()) {
- auto unalloc_operand = UnallocatedOperand::cast(operand);
- range->AddUsePosition(position, unalloc_operand, hint, local_zone());
+
+void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
+ splintered_from_ = splinter_parent;
+ if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
+ SetSpillRange(splinter_parent->spill_range_);
}
}
-void RegisterAllocator::Use(LifetimePosition block_start,
- LifetimePosition position,
- InstructionOperand* operand,
- InstructionOperand* hint) {
- auto range = LiveRangeFor(operand);
- if (range == nullptr) return;
- if (operand->IsUnallocated()) {
- UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
- range->AddUsePosition(position, unalloc_operand, hint, local_zone());
+void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
+ DCHECK(merged->TopLevel() == this);
+
+ if (HasNoSpillType() && merged->HasSpillRange()) {
+ set_spill_type(merged->spill_type());
+ DCHECK(GetSpillRange()->live_ranges().size() > 0);
+ merged->spill_range_ = nullptr;
+ merged->bits_ =
+ SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
}
- range->AddUseInterval(block_start, position, local_zone());
}
-void RegisterAllocator::AddGapMove(int index,
- GapInstruction::InnerPosition position,
- InstructionOperand* from,
- InstructionOperand* to) {
- auto gap = code()->GapAt(index);
- auto move = gap->GetOrCreateParallelMove(position, code_zone());
- move->AddMove(from, to, code_zone());
+void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
+ DCHECK(Start() < other->Start());
+ DCHECK(other->splintered_from() == this);
+
+ LiveRange* first = this;
+ LiveRange* second = other;
+ DCHECK(first->Start() < second->Start());
+ while (first != nullptr && second != nullptr) {
+ DCHECK(first != second);
+ // Make sure the ranges are in order each time we iterate.
+ if (second->Start() < first->Start()) {
+ LiveRange* tmp = second;
+ second = first;
+ first = tmp;
+ continue;
+ }
+
+ if (first->End() <= second->Start()) {
+ if (first->next() == nullptr ||
+ first->next()->Start() > second->Start()) {
+ // First is in order before second.
+ LiveRange* temp = first->next();
+ first->next_ = second;
+ first = temp;
+ } else {
+ // First is in order before its successor (or second), so advance first.
+ first = first->next();
+ }
+ continue;
+ }
+
+ DCHECK(first->Start() < second->Start());
+ // If first and second intersect, split first.
+ if (first->Start() < second->End() && second->Start() < first->End()) {
+ LiveRange* temp = first->SplitAt(second->Start(), zone);
+ CHECK(temp != first);
+ temp->set_spilled(first->spilled());
+ if (!temp->spilled())
+ temp->set_assigned_register(first->assigned_register());
+
+ first->next_ = second;
+ first = temp;
+ continue;
+ }
+ DCHECK(first->End() <= second->Start());
+ }
+
+ TopLevel()->UpdateParentForAllChildren(TopLevel());
+ TopLevel()->UpdateSpillRangePostMerge(other);
+
+#if DEBUG
+ Verify();
+#endif
+}
+
+
+void TopLevelLiveRange::VerifyChildrenInOrder() const {
+ LifetimePosition last_end = End();
+ for (const LiveRange* child = this->next(); child != nullptr;
+ child = child->next()) {
+ DCHECK(last_end <= child->Start());
+ last_end = child->End();
+ }
+}
+
+
+void TopLevelLiveRange::Verify() const {
+ VerifyChildrenInOrder();
+ for (const LiveRange* child = this; child != nullptr; child = child->next()) {
+ VerifyChildStructure();
+ }
+}
+
+
+void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
+ TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
+ DCHECK(first_interval_ != nullptr);
+ DCHECK(first_interval_->start() <= start);
+ DCHECK(start < first_interval_->end());
+ first_interval_->set_start(start);
+}
+
+
+void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
+ LifetimePosition end, Zone* zone) {
+ TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
+ end.value());
+ LifetimePosition new_end = end;
+ while (first_interval_ != nullptr && first_interval_->start() <= end) {
+ if (first_interval_->end() > end) {
+ new_end = first_interval_->end();
+ }
+ first_interval_ = first_interval_->next();
+ }
+
+ UseInterval* new_interval = new (zone) UseInterval(start, new_end);
+ new_interval->set_next(first_interval_);
+ first_interval_ = new_interval;
+ if (new_interval->next() == nullptr) {
+ last_interval_ = new_interval;
+ }
+}
+
+
+void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
+ LifetimePosition end, Zone* zone) {
+ TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
+ end.value());
+ if (first_interval_ == nullptr) {
+ UseInterval* interval = new (zone) UseInterval(start, end);
+ first_interval_ = interval;
+ last_interval_ = interval;
+ } else {
+ if (end == first_interval_->start()) {
+ first_interval_->set_start(start);
+ } else if (end < first_interval_->start()) {
+ UseInterval* interval = new (zone) UseInterval(start, end);
+ interval->set_next(first_interval_);
+ first_interval_ = interval;
+ } else {
+ // Order of instruction's processing (see ProcessInstructions) guarantees
+ // that each new use interval either precedes or intersects with
+ // last added interval.
+ DCHECK(start < first_interval_->end());
+ first_interval_->set_start(Min(start, first_interval_->start()));
+ first_interval_->set_end(Max(end, first_interval_->end()));
+ }
+ }
+}
+
+
+void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
+ LifetimePosition pos = use_pos->pos();
+ TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
+ UsePosition* prev_hint = nullptr;
+ UsePosition* prev = nullptr;
+ UsePosition* current = first_pos_;
+ while (current != nullptr && current->pos() < pos) {
+ prev_hint = current->HasHint() ? current : prev_hint;
+ prev = current;
+ current = current->next();
+ }
+
+ if (prev == nullptr) {
+ use_pos->set_next(first_pos_);
+ first_pos_ = use_pos;
+ } else {
+ use_pos->set_next(prev->next());
+ prev->set_next(use_pos);
+ }
+
+ if (prev_hint == nullptr && use_pos->HasHint()) {
+ current_hint_position_ = use_pos;
+ }
}
static bool AreUseIntervalsIntersecting(UseInterval* interval1,
UseInterval* interval2) {
while (interval1 != nullptr && interval2 != nullptr) {
- if (interval1->start().Value() < interval2->start().Value()) {
- if (interval1->end().Value() > interval2->start().Value()) {
+ if (interval1->start() < interval2->start()) {
+ if (interval1->end() > interval2->start()) {
return true;
}
interval1 = interval1->next();
} else {
- if (interval2->end().Value() > interval1->start().Value()) {
+ if (interval2->end() > interval1->start()) {
return true;
}
interval2 = interval2->next();
@@ -806,33 +1097,79 @@
}
-SpillRange::SpillRange(LiveRange* range, Zone* zone) : live_ranges_(zone) {
- auto src = range->first_interval();
+std::ostream& operator<<(std::ostream& os,
+ const PrintableLiveRange& printable_range) {
+ const LiveRange* range = printable_range.range_;
+ os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
+ << " ";
+ if (range->TopLevel()->is_phi()) os << "phi ";
+ if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
+
+ os << "{" << std::endl;
+ UseInterval* interval = range->first_interval();
+ UsePosition* use_pos = range->first_pos();
+ PrintableInstructionOperand pio;
+ pio.register_configuration_ = printable_range.register_configuration_;
+ while (use_pos != nullptr) {
+ if (use_pos->HasOperand()) {
+ pio.op_ = *use_pos->operand();
+ os << pio << use_pos->pos() << " ";
+ }
+ use_pos = use_pos->next();
+ }
+ os << std::endl;
+
+ while (interval != nullptr) {
+ os << '[' << interval->start() << ", " << interval->end() << ')'
+ << std::endl;
+ interval = interval->next();
+ }
+ os << "}";
+ return os;
+}
+
+
+SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
+ : live_ranges_(zone),
+ assigned_slot_(kUnassignedSlot),
+ byte_width_(GetByteWidth(parent->representation())),
+ kind_(parent->kind()) {
+ // Spill ranges are created for top level, non-splintered ranges. This is so
+ // that, when merging decisions are made, we consider the full extent of the
+ // virtual register, and avoid clobbering it.
+ DCHECK(!parent->IsSplinter());
UseInterval* result = nullptr;
UseInterval* node = nullptr;
- // Copy the nodes
- while (src != nullptr) {
- auto new_node = new (zone) UseInterval(src->start(), src->end());
- if (result == nullptr) {
- result = new_node;
- } else {
- node->set_next(new_node);
+ // Copy the intervals for all ranges.
+ for (LiveRange* range = parent; range != nullptr; range = range->next()) {
+ UseInterval* src = range->first_interval();
+ while (src != nullptr) {
+ UseInterval* new_node = new (zone) UseInterval(src->start(), src->end());
+ if (result == nullptr) {
+ result = new_node;
+ } else {
+ node->set_next(new_node);
+ }
+ node = new_node;
+ src = src->next();
}
- node = new_node;
- src = src->next();
}
use_interval_ = result;
- live_ranges().push_back(range);
+ live_ranges().push_back(parent);
end_position_ = node->end();
- DCHECK(!range->HasSpillRange());
- range->SetSpillRange(this);
+ parent->SetSpillRange(this);
+}
+
+
+int SpillRange::ByteWidth() const {
+ return GetByteWidth(live_ranges_[0]->representation());
}
bool SpillRange::IsIntersectingWith(SpillRange* other) const {
if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
- this->End().Value() <= other->use_interval_->start().Value() ||
- other->End().Value() <= this->use_interval_->start().Value()) {
+ this->End() <= other->use_interval_->start() ||
+ other->End() <= this->use_interval_->start()) {
return false;
}
return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
@@ -840,11 +1177,15 @@
bool SpillRange::TryMerge(SpillRange* other) {
- if (Kind() != other->Kind() || IsIntersectingWith(other)) return false;
+ if (HasSlot() || other->HasSlot()) return false;
+ // TODO(dcarney): byte widths should be compared here not kinds.
+ if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
+ IsIntersectingWith(other)) {
+ return false;
+ }
- auto max = LifetimePosition::MaxPosition();
- if (End().Value() < other->End().Value() &&
- other->End().Value() != max.Value()) {
+ LifetimePosition max = LifetimePosition::MaxPosition();
+ if (End() < other->End() && other->End() != max) {
end_position_ = other->End();
}
other->end_position_ = max;
@@ -852,7 +1193,7 @@
MergeDisjointIntervals(other->use_interval_);
other->use_interval_ = nullptr;
- for (auto range : other->live_ranges()) {
+ for (TopLevelLiveRange* range : other->live_ranges()) {
DCHECK(range->GetSpillRange() == other);
range->SetSpillRange(this);
}
@@ -865,26 +1206,16 @@
}
-void SpillRange::SetOperand(InstructionOperand* op) {
- for (auto range : live_ranges()) {
- DCHECK(range->GetSpillRange() == this);
- range->CommitSpillOperand(op);
- }
-}
-
-
void SpillRange::MergeDisjointIntervals(UseInterval* other) {
UseInterval* tail = nullptr;
- auto current = use_interval_;
+ UseInterval* current = use_interval_;
while (other != nullptr) {
// Make sure the 'current' list starts first
- if (current == nullptr ||
- current->start().Value() > other->start().Value()) {
+ if (current == nullptr || current->start() > other->start()) {
std::swap(current, other);
}
// Check disjointness
- DCHECK(other == nullptr ||
- current->end().Value() <= other->start().Value());
+ DCHECK(other == nullptr || current->end() <= other->start());
// Append the 'current' node to the result accumulator and move forward
if (tail == nullptr) {
use_interval_ = current;
@@ -898,81 +1229,1617 @@
}
-void RegisterAllocator::ReuseSpillSlots() {
- DCHECK(FLAG_turbo_reuse_spill_slots);
+void SpillRange::Print() const {
+ OFStream os(stdout);
+ os << "{" << std::endl;
+ for (TopLevelLiveRange* range : live_ranges()) {
+ os << range->vreg() << " ";
+ }
+ os << std::endl;
- // Merge disjoint spill ranges
- for (size_t i = 0; i < spill_ranges().size(); i++) {
- auto range = spill_ranges()[i];
- if (range->IsEmpty()) continue;
- for (size_t j = i + 1; j < spill_ranges().size(); j++) {
- auto other = spill_ranges()[j];
- if (!other->IsEmpty()) {
- range->TryMerge(other);
+ for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
+ os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
+ }
+ os << "}" << std::endl;
+}
+
+
+RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
+ const InstructionBlock* block,
+ Zone* zone)
+ : phi_(phi),
+ block_(block),
+ incoming_operands_(zone),
+ assigned_register_(kUnassignedRegister) {
+ incoming_operands_.reserve(phi->operands().size());
+}
+
+
+void RegisterAllocationData::PhiMapValue::AddOperand(
+ InstructionOperand* operand) {
+ incoming_operands_.push_back(operand);
+}
+
+
+void RegisterAllocationData::PhiMapValue::CommitAssignment(
+ const InstructionOperand& assigned) {
+ for (InstructionOperand* operand : incoming_operands_) {
+ InstructionOperand::ReplaceWith(operand, &assigned);
+ }
+}
+
+
+RegisterAllocationData::RegisterAllocationData(
+ const RegisterConfiguration* config, Zone* zone, Frame* frame,
+ InstructionSequence* code, const char* debug_name)
+ : allocation_zone_(zone),
+ frame_(frame),
+ code_(code),
+ debug_name_(debug_name),
+ config_(config),
+ phi_map_(allocation_zone()),
+ allocatable_codes_(this->config()->num_general_registers(), -1,
+ allocation_zone()),
+ allocatable_double_codes_(this->config()->num_double_registers(), -1,
+ allocation_zone()),
+ live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
+ live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
+ live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
+ allocation_zone()),
+ fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
+ allocation_zone()),
+ fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
+ allocation_zone()),
+ spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
+ delayed_references_(allocation_zone()),
+ assigned_registers_(nullptr),
+ assigned_double_registers_(nullptr),
+ virtual_register_count_(code->VirtualRegisterCount()),
+ preassigned_slot_ranges_(zone) {
+ DCHECK(this->config()->num_general_registers() <=
+ RegisterConfiguration::kMaxGeneralRegisters);
+ DCHECK(this->config()->num_double_registers() <=
+ RegisterConfiguration::kMaxDoubleRegisters);
+ assigned_registers_ = new (code_zone())
+ BitVector(this->config()->num_general_registers(), code_zone());
+ assigned_double_registers_ = new (code_zone())
+ BitVector(this->config()->num_double_registers(), code_zone());
+ this->frame()->SetAllocatedRegisters(assigned_registers_);
+ this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
+}
+
+
+MoveOperands* RegisterAllocationData::AddGapMove(
+ int index, Instruction::GapPosition position,
+ const InstructionOperand& from, const InstructionOperand& to) {
+ Instruction* instr = code()->InstructionAt(index);
+ ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
+ return moves->AddMove(from, to);
+}
+
+
+MachineRepresentation RegisterAllocationData::RepresentationFor(
+ int virtual_register) {
+ DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
+ return code()->GetRepresentation(virtual_register);
+}
+
+
+TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
+ if (index >= static_cast<int>(live_ranges().size())) {
+ live_ranges().resize(index + 1, nullptr);
+ }
+ TopLevelLiveRange* result = live_ranges()[index];
+ if (result == nullptr) {
+ result = NewLiveRange(index, RepresentationFor(index));
+ live_ranges()[index] = result;
+ }
+ return result;
+}
+
+
+TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
+ int index, MachineRepresentation rep) {
+ return new (allocation_zone()) TopLevelLiveRange(index, rep);
+}
+
+
+int RegisterAllocationData::GetNextLiveRangeId() {
+ int vreg = virtual_register_count_++;
+ if (vreg >= static_cast<int>(live_ranges().size())) {
+ live_ranges().resize(vreg + 1, nullptr);
+ }
+ return vreg;
+}
+
+
+TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
+ MachineRepresentation rep) {
+ int vreg = GetNextLiveRangeId();
+ TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
+ return ret;
+}
+
+
+RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
+ const InstructionBlock* block, PhiInstruction* phi) {
+ RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
+ RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
+ auto res =
+ phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
+ DCHECK(res.second);
+ USE(res);
+ return map_value;
+}
+
+
+RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
+ int virtual_register) {
+ auto it = phi_map_.find(virtual_register);
+ DCHECK(it != phi_map_.end());
+ return it->second;
+}
+
+
+RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
+ TopLevelLiveRange* top_range) {
+ return GetPhiMapValueFor(top_range->vreg());
+}
+
+
+bool RegisterAllocationData::ExistsUseWithoutDefinition() {
+ bool found = false;
+ BitVector::Iterator iterator(live_in_sets()[0]);
+ while (!iterator.Done()) {
+ found = true;
+ int operand_index = iterator.Current();
+ PrintF("Register allocator error: live v%d reached first block.\n",
+ operand_index);
+ LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
+ PrintF(" (first use is at %d)\n", range->first_pos()->pos().value());
+ if (debug_name() == nullptr) {
+ PrintF("\n");
+ } else {
+ PrintF(" (function: %s)\n", debug_name());
+ }
+ iterator.Advance();
+ }
+ return found;
+}
+
+
+// If a range is defined in a deferred block, we can expect all the range
+// to only cover positions in deferred blocks. Otherwise, a block on the
+// hot path would be dominated by a deferred block, meaning it is unreachable
+// without passing through the deferred block, which is contradictory.
+// In particular, when such a range contributes a result back on the hot
+// path, it will be as one of the inputs of a phi. In that case, the value
+// will be transferred via a move in the Gap::END's of the last instruction
+// of a deferred block.
+bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
+ for (const TopLevelLiveRange* range : live_ranges()) {
+ if (range == nullptr || range->IsEmpty() ||
+ !code()
+ ->GetInstructionBlock(range->Start().ToInstructionIndex())
+ ->IsDeferred()) {
+ continue;
+ }
+ for (const UseInterval* i = range->first_interval(); i != nullptr;
+ i = i->next()) {
+ int first = i->FirstGapIndex();
+ int last = i->LastGapIndex();
+ for (int instr = first; instr <= last;) {
+ const InstructionBlock* block = code()->GetInstructionBlock(instr);
+ if (!block->IsDeferred()) return false;
+ instr = block->last_instruction_index() + 1;
}
}
}
-
- // Allocate slots for the merged spill ranges.
- for (auto range : spill_ranges()) {
- if (range->IsEmpty()) continue;
- // Allocate a new operand referring to the spill slot.
- auto kind = range->Kind();
- int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
- auto op_kind = kind == DOUBLE_REGISTERS
- ? InstructionOperand::DOUBLE_STACK_SLOT
- : InstructionOperand::STACK_SLOT;
- auto op = new (code_zone()) InstructionOperand(op_kind, index);
- range->SetOperand(op);
- }
+ return true;
}
-void RegisterAllocator::CommitAssignment() {
- for (auto range : live_ranges()) {
- if (range == nullptr || range->IsEmpty()) continue;
- // Register assignments were committed in set_assigned_register.
- if (range->HasRegisterAssigned()) continue;
- auto assigned = range->CreateAssignedOperand(code_zone());
- range->ConvertUsesToOperand(assigned);
- if (range->IsSpilled()) {
- range->CommitSpillsAtDefinition(code(), assigned);
- }
+SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
+ TopLevelLiveRange* range) {
+ DCHECK(!range->HasSpillOperand());
+
+ SpillRange* spill_range = range->GetAllocatedSpillRange();
+ if (spill_range == nullptr) {
+ DCHECK(!range->IsSplinter());
+ spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
}
-}
+ range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
+ int spill_range_index =
+ range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
-SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) {
- DCHECK(FLAG_turbo_reuse_spill_slots);
- auto spill_range = new (local_zone()) SpillRange(range, local_zone());
- spill_ranges().push_back(spill_range);
+ spill_ranges()[spill_range_index] = spill_range;
+
return spill_range;
}
-bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
- DCHECK(FLAG_turbo_reuse_spill_slots);
- if (range->IsChild() || !range->is_phi()) return false;
- DCHECK(range->HasNoSpillType());
+SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
+ TopLevelLiveRange* range) {
+ DCHECK(!range->HasSpillOperand());
+ DCHECK(!range->IsSplinter());
+ SpillRange* spill_range =
+ new (allocation_zone()) SpillRange(range, allocation_zone());
+ return spill_range;
+}
- auto lookup = phi_map_.find(range->id());
- DCHECK(lookup != phi_map_.end());
- auto phi = lookup->second.phi;
- auto block = lookup->second.block;
+
+void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
+ if (kind == DOUBLE_REGISTERS) {
+ assigned_double_registers_->Add(index);
+ } else {
+ DCHECK(kind == GENERAL_REGISTERS);
+ assigned_registers_->Add(index);
+ }
+}
+
+
+bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
+ return pos.IsFullStart() &&
+ code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
+ pos.ToInstructionIndex();
+}
+
+
+ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
+ : data_(data) {}
+
+
+InstructionOperand* ConstraintBuilder::AllocateFixed(
+ UnallocatedOperand* operand, int pos, bool is_tagged) {
+ TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
+ DCHECK(operand->HasFixedPolicy());
+ InstructionOperand allocated;
+ MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
+ int virtual_register = operand->virtual_register();
+ if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
+ rep = data()->RepresentationFor(virtual_register);
+ }
+ if (operand->HasFixedSlotPolicy()) {
+ allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
+ operand->fixed_slot_index());
+ } else if (operand->HasFixedRegisterPolicy()) {
+ DCHECK(!IsFloatingPoint(rep));
+ allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
+ operand->fixed_register_index());
+ } else if (operand->HasFixedDoubleRegisterPolicy()) {
+ DCHECK(IsFloatingPoint(rep));
+ DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
+ allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
+ operand->fixed_register_index());
+ } else {
+ UNREACHABLE();
+ }
+ InstructionOperand::ReplaceWith(operand, &allocated);
+ if (is_tagged) {
+ TRACE("Fixed reg is tagged at %d\n", pos);
+ Instruction* instr = code()->InstructionAt(pos);
+ if (instr->HasReferenceMap()) {
+ instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
+ }
+ }
+ return operand;
+}
+
+
+void ConstraintBuilder::MeetRegisterConstraints() {
+ for (InstructionBlock* block : code()->instruction_blocks()) {
+ MeetRegisterConstraints(block);
+ }
+}
+
+
+void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
+ int start = block->first_instruction_index();
+ int end = block->last_instruction_index();
+ DCHECK_NE(-1, start);
+ for (int i = start; i <= end; ++i) {
+ MeetConstraintsBefore(i);
+ if (i != end) MeetConstraintsAfter(i);
+ }
+ // Meet register constraints for the instruction in the end.
+ MeetRegisterConstraintsForLastInstructionInBlock(block);
+}
+
+
+void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
+ const InstructionBlock* block) {
+ int end = block->last_instruction_index();
+ Instruction* last_instruction = code()->InstructionAt(end);
+ for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
+ InstructionOperand* output_operand = last_instruction->OutputAt(i);
+ DCHECK(!output_operand->IsConstant());
+ UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
+ int output_vreg = output->virtual_register();
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
+ bool assigned = false;
+ if (output->HasFixedPolicy()) {
+ AllocateFixed(output, -1, false);
+ // This value is produced on the stack, we never need to spill it.
+ if (output->IsStackSlot()) {
+ DCHECK(LocationOperand::cast(output)->index() <
+ data()->frame()->GetSpillSlotCount());
+ range->SetSpillOperand(LocationOperand::cast(output));
+ range->SetSpillStartIndex(end);
+ assigned = true;
+ }
+
+ for (const RpoNumber& succ : block->successors()) {
+ const InstructionBlock* successor = code()->InstructionBlockAt(succ);
+ DCHECK(successor->PredecessorCount() == 1);
+ int gap_index = successor->first_instruction_index();
+ // Create an unconstrained operand for the same virtual register
+ // and insert a gap move from the fixed output to the operand.
+ UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
+ data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
+ }
+ }
+
+ if (!assigned) {
+ for (const RpoNumber& succ : block->successors()) {
+ const InstructionBlock* successor = code()->InstructionBlockAt(succ);
+ DCHECK(successor->PredecessorCount() == 1);
+ int gap_index = successor->first_instruction_index();
+ range->RecordSpillLocation(allocation_zone(), gap_index, output);
+ range->SetSpillStartIndex(gap_index);
+ }
+ }
+ }
+}
+
+
+void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
+ Instruction* first = code()->InstructionAt(instr_index);
+ // Handle fixed temporaries.
+ for (size_t i = 0; i < first->TempCount(); i++) {
+ UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
+ if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
+ }
+ // Handle constant/fixed output operands.
+ for (size_t i = 0; i < first->OutputCount(); i++) {
+ InstructionOperand* output = first->OutputAt(i);
+ if (output->IsConstant()) {
+ int output_vreg = ConstantOperand::cast(output)->virtual_register();
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
+ range->SetSpillStartIndex(instr_index + 1);
+ range->SetSpillOperand(output);
+ continue;
+ }
+ UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
+ TopLevelLiveRange* range =
+ data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
+ bool assigned = false;
+ if (first_output->HasFixedPolicy()) {
+ int output_vreg = first_output->virtual_register();
+ UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
+ bool is_tagged = code()->IsReference(output_vreg);
+ if (first_output->HasSecondaryStorage()) {
+ range->MarkHasPreassignedSlot();
+ data()->preassigned_slot_ranges().push_back(
+ std::make_pair(range, first_output->GetSecondaryStorage()));
+ }
+ AllocateFixed(first_output, instr_index, is_tagged);
+
+ // This value is produced on the stack, we never need to spill it.
+ if (first_output->IsStackSlot()) {
+ DCHECK(LocationOperand::cast(first_output)->index() <
+ data()->frame()->GetTotalFrameSlotCount());
+ range->SetSpillOperand(LocationOperand::cast(first_output));
+ range->SetSpillStartIndex(instr_index + 1);
+ assigned = true;
+ }
+ data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
+ output_copy);
+ }
+ // Make sure we add a gap move for spilling (if we have not done
+ // so already).
+ if (!assigned) {
+ range->RecordSpillLocation(allocation_zone(), instr_index + 1,
+ first_output);
+ range->SetSpillStartIndex(instr_index + 1);
+ }
+ }
+}
+
+
+void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
+ Instruction* second = code()->InstructionAt(instr_index);
+ // Handle fixed input operands of second instruction.
+ for (size_t i = 0; i < second->InputCount(); i++) {
+ InstructionOperand* input = second->InputAt(i);
+ if (input->IsImmediate() || input->IsExplicit()) {
+ continue; // Ignore immediates and explicitly reserved registers.
+ }
+ UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
+ if (cur_input->HasFixedPolicy()) {
+ int input_vreg = cur_input->virtual_register();
+ UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
+ bool is_tagged = code()->IsReference(input_vreg);
+ AllocateFixed(cur_input, instr_index, is_tagged);
+ data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
+ }
+ }
+ // Handle "output same as input" for second instruction.
+ for (size_t i = 0; i < second->OutputCount(); i++) {
+ InstructionOperand* output = second->OutputAt(i);
+ if (!output->IsUnallocated()) continue;
+ UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
+ if (!second_output->HasSameAsInputPolicy()) continue;
+ DCHECK(i == 0); // Only valid for first output.
+ UnallocatedOperand* cur_input =
+ UnallocatedOperand::cast(second->InputAt(0));
+ int output_vreg = second_output->virtual_register();
+ int input_vreg = cur_input->virtual_register();
+ UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
+ cur_input->set_virtual_register(second_output->virtual_register());
+ MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
+ input_copy, *cur_input);
+ if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
+ if (second->HasReferenceMap()) {
+ RegisterAllocationData::DelayedReference delayed_reference = {
+ second->reference_map(), &gap_move->source()};
+ data()->delayed_references().push_back(delayed_reference);
+ }
+ } else if (!code()->IsReference(input_vreg) &&
+ code()->IsReference(output_vreg)) {
+ // The input is assumed to immediately have a tagged representation,
+ // before the pointer map can be used. I.e. the pointer map at the
+ // instruction will include the output operand (whose value at the
+ // beginning of the instruction is equal to the input operand). If
+ // this is not desired, then the pointer map at this instruction needs
+ // to be adjusted manually.
+ }
+ }
+}
+
+
+void ConstraintBuilder::ResolvePhis() {
+ // Process the blocks in reverse order.
+ for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
+ ResolvePhis(block);
+ }
+}
+
+
+void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
+ for (PhiInstruction* phi : block->phis()) {
+ int phi_vreg = phi->virtual_register();
+ RegisterAllocationData::PhiMapValue* map_value =
+ data()->InitializePhiMap(block, phi);
+ InstructionOperand& output = phi->output();
+ // Map the destination operands, so the commitment phase can find them.
+ for (size_t i = 0; i < phi->operands().size(); ++i) {
+ InstructionBlock* cur_block =
+ code()->InstructionBlockAt(block->predecessors()[i]);
+ UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]);
+ MoveOperands* move = data()->AddGapMove(
+ cur_block->last_instruction_index(), Instruction::END, input, output);
+ map_value->AddOperand(&move->destination());
+ DCHECK(!code()
+ ->InstructionAt(cur_block->last_instruction_index())
+ ->HasReferenceMap());
+ }
+ TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
+ int gap_index = block->first_instruction_index();
+ live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
+ live_range->SetSpillStartIndex(gap_index);
+ // We use the phi-ness of some nodes in some later heuristics.
+ live_range->set_is_phi(true);
+ live_range->set_is_non_loop_phi(!block->IsLoopHeader());
+ }
+}
+
+
+LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
+ Zone* local_zone)
+ : data_(data), phi_hints_(local_zone) {}
+
+
+BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
+ RegisterAllocationData* data) {
+ size_t block_index = block->rpo_number().ToSize();
+ BitVector* live_out = data->live_out_sets()[block_index];
+ if (live_out == nullptr) {
+ // Compute live out for the given block, except not including backward
+ // successor edges.
+ Zone* zone = data->allocation_zone();
+ const InstructionSequence* code = data->code();
+
+ live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone);
+
+ // Process all successor blocks.
+ for (const RpoNumber& succ : block->successors()) {
+ // Add values live on entry to the successor.
+ if (succ <= block->rpo_number()) continue;
+ BitVector* live_in = data->live_in_sets()[succ.ToSize()];
+ if (live_in != nullptr) live_out->Union(*live_in);
+
+ // All phi input operands corresponding to this successor edge are live
+ // out from this block.
+ const InstructionBlock* successor = code->InstructionBlockAt(succ);
+ size_t index = successor->PredecessorIndexOf(block->rpo_number());
+ DCHECK(index < successor->PredecessorCount());
+ for (PhiInstruction* phi : successor->phis()) {
+ live_out->Add(phi->operands()[index]);
+ }
+ }
+ data->live_out_sets()[block_index] = live_out;
+ }
+ return live_out;
+}
+
+
+void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
+ BitVector* live_out) {
+ // Add an interval that includes the entire block to the live range for
+ // each live_out value.
+ LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
+ block->first_instruction_index());
+ LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
+ block->last_instruction_index())
+ .NextStart();
+ BitVector::Iterator iterator(live_out);
+ while (!iterator.Done()) {
+ int operand_index = iterator.Current();
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
+ range->AddUseInterval(start, end, allocation_zone());
+ iterator.Advance();
+ }
+}
+
+
+int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) {
+ return -index - 1 - config()->num_general_registers();
+}
+
+
+TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
+ DCHECK(index < config()->num_general_registers());
+ TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
+ if (result == nullptr) {
+ result = data()->NewLiveRange(FixedLiveRangeID(index),
+ InstructionSequence::DefaultRepresentation());
+ DCHECK(result->IsFixed());
+ result->set_assigned_register(index);
+ data()->MarkAllocated(GENERAL_REGISTERS, index);
+ data()->fixed_live_ranges()[index] = result;
+ }
+ return result;
+}
+
+
+TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
+ DCHECK(index < config()->num_double_registers());
+ TopLevelLiveRange* result = data()->fixed_double_live_ranges()[index];
+ if (result == nullptr) {
+ result = data()->NewLiveRange(FixedDoubleLiveRangeID(index),
+ MachineRepresentation::kFloat64);
+ DCHECK(result->IsFixed());
+ result->set_assigned_register(index);
+ data()->MarkAllocated(DOUBLE_REGISTERS, index);
+ data()->fixed_double_live_ranges()[index] = result;
+ }
+ return result;
+}
+
+
+TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
+ if (operand->IsUnallocated()) {
+ return data()->GetOrCreateLiveRangeFor(
+ UnallocatedOperand::cast(operand)->virtual_register());
+ } else if (operand->IsConstant()) {
+ return data()->GetOrCreateLiveRangeFor(
+ ConstantOperand::cast(operand)->virtual_register());
+ } else if (operand->IsRegister()) {
+ return FixedLiveRangeFor(
+ LocationOperand::cast(operand)->GetRegister().code());
+ } else if (operand->IsDoubleRegister()) {
+ return FixedDoubleLiveRangeFor(
+ LocationOperand::cast(operand)->GetDoubleRegister().code());
+ } else {
+ return nullptr;
+ }
+}
+
+
+UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
+ InstructionOperand* operand,
+ void* hint,
+ UsePositionHintType hint_type) {
+ return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type);
+}
+
+
+UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
+ InstructionOperand* operand, void* hint,
+ UsePositionHintType hint_type) {
+ TopLevelLiveRange* range = LiveRangeFor(operand);
+ if (range == nullptr) return nullptr;
+
+ if (range->IsEmpty() || range->Start() > position) {
+ // Can happen if there is a definition without use.
+ range->AddUseInterval(position, position.NextStart(), allocation_zone());
+ range->AddUsePosition(NewUsePosition(position.NextStart()));
+ } else {
+ range->ShortenTo(position);
+ }
+ if (!operand->IsUnallocated()) return nullptr;
+ UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+ UsePosition* use_pos =
+ NewUsePosition(position, unalloc_operand, hint, hint_type);
+ range->AddUsePosition(use_pos);
+ return use_pos;
+}
+
+
+UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
+ LifetimePosition position,
+ InstructionOperand* operand, void* hint,
+ UsePositionHintType hint_type) {
+ TopLevelLiveRange* range = LiveRangeFor(operand);
+ if (range == nullptr) return nullptr;
+ UsePosition* use_pos = nullptr;
+ if (operand->IsUnallocated()) {
+ UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+ use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
+ range->AddUsePosition(use_pos);
+ }
+ range->AddUseInterval(block_start, position, allocation_zone());
+ return use_pos;
+}
+
+
+void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
+ BitVector* live) {
+ int block_start = block->first_instruction_index();
+ LifetimePosition block_start_position =
+ LifetimePosition::GapFromInstructionIndex(block_start);
+
+ for (int index = block->last_instruction_index(); index >= block_start;
+ index--) {
+ LifetimePosition curr_position =
+ LifetimePosition::InstructionFromInstructionIndex(index);
+ Instruction* instr = code()->InstructionAt(index);
+ DCHECK(instr != nullptr);
+ DCHECK(curr_position.IsInstructionPosition());
+ // Process output, inputs, and temps of this instruction.
+ for (size_t i = 0; i < instr->OutputCount(); i++) {
+ InstructionOperand* output = instr->OutputAt(i);
+ if (output->IsUnallocated()) {
+ // Unsupported.
+ DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
+ int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
+ live->Remove(out_vreg);
+ } else if (output->IsConstant()) {
+ int out_vreg = ConstantOperand::cast(output)->virtual_register();
+ live->Remove(out_vreg);
+ }
+ if (block->IsHandler() && index == block_start && output->IsAllocated() &&
+ output->IsRegister() &&
+ AllocatedOperand::cast(output)->GetRegister().is(
+ v8::internal::kReturnRegister0)) {
+ // The register defined here is blocked from gap start - it is the
+ // exception value.
+ // TODO(mtrofin): should we explore an explicit opcode for
+ // the first instruction in the handler?
+ Define(LifetimePosition::GapFromInstructionIndex(index), output);
+ } else {
+ Define(curr_position, output);
+ }
+ }
+
+ if (instr->ClobbersRegisters()) {
+ for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
+ int code = config()->GetAllocatableGeneralCode(i);
+ if (!IsOutputRegisterOf(instr, Register::from_code(code))) {
+ TopLevelLiveRange* range = FixedLiveRangeFor(code);
+ range->AddUseInterval(curr_position, curr_position.End(),
+ allocation_zone());
+ }
+ }
+ }
+
+ if (instr->ClobbersDoubleRegisters()) {
+ for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
+ ++i) {
+ int code = config()->GetAllocatableDoubleCode(i);
+ if (!IsOutputDoubleRegisterOf(instr, DoubleRegister::from_code(code))) {
+ TopLevelLiveRange* range = FixedDoubleLiveRangeFor(code);
+ range->AddUseInterval(curr_position, curr_position.End(),
+ allocation_zone());
+ }
+ }
+ }
+
+ for (size_t i = 0; i < instr->InputCount(); i++) {
+ InstructionOperand* input = instr->InputAt(i);
+ if (input->IsImmediate() || input->IsExplicit()) {
+ continue; // Ignore immediates and explicitly reserved registers.
+ }
+ LifetimePosition use_pos;
+ if (input->IsUnallocated() &&
+ UnallocatedOperand::cast(input)->IsUsedAtStart()) {
+ use_pos = curr_position;
+ } else {
+ use_pos = curr_position.End();
+ }
+
+ if (input->IsUnallocated()) {
+ UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
+ int vreg = unalloc->virtual_register();
+ live->Add(vreg);
+ if (unalloc->HasSlotPolicy()) {
+ data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true);
+ }
+ }
+ Use(block_start_position, use_pos, input);
+ }
+
+ for (size_t i = 0; i < instr->TempCount(); i++) {
+ InstructionOperand* temp = instr->TempAt(i);
+ // Unsupported.
+ DCHECK_IMPLIES(temp->IsUnallocated(),
+ !UnallocatedOperand::cast(temp)->HasSlotPolicy());
+ if (instr->ClobbersTemps()) {
+ if (temp->IsRegister()) continue;
+ if (temp->IsUnallocated()) {
+ UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
+ if (temp_unalloc->HasFixedPolicy()) {
+ continue;
+ }
+ }
+ }
+ Use(block_start_position, curr_position.End(), temp);
+ Define(curr_position, temp);
+ }
+
+ // Process the moves of the instruction's gaps, making their sources live.
+ const Instruction::GapPosition kPositions[] = {Instruction::END,
+ Instruction::START};
+ curr_position = curr_position.PrevStart();
+ DCHECK(curr_position.IsGapPosition());
+ for (const Instruction::GapPosition& position : kPositions) {
+ ParallelMove* move = instr->GetParallelMove(position);
+ if (move == nullptr) continue;
+ if (position == Instruction::END) {
+ curr_position = curr_position.End();
+ } else {
+ curr_position = curr_position.Start();
+ }
+ for (MoveOperands* cur : *move) {
+ InstructionOperand& from = cur->source();
+ InstructionOperand& to = cur->destination();
+ void* hint = &to;
+ UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
+ UsePosition* to_use = nullptr;
+ int phi_vreg = -1;
+ if (to.IsUnallocated()) {
+ int to_vreg = UnallocatedOperand::cast(to).virtual_register();
+ TopLevelLiveRange* to_range =
+ data()->GetOrCreateLiveRangeFor(to_vreg);
+ if (to_range->is_phi()) {
+ phi_vreg = to_vreg;
+ if (to_range->is_non_loop_phi()) {
+ hint = to_range->current_hint_position();
+ hint_type = hint == nullptr ? UsePositionHintType::kNone
+ : UsePositionHintType::kUsePos;
+ } else {
+ hint_type = UsePositionHintType::kPhi;
+ hint = data()->GetPhiMapValueFor(to_vreg);
+ }
+ } else {
+ if (live->Contains(to_vreg)) {
+ to_use = Define(curr_position, &to, &from,
+ UsePosition::HintTypeForOperand(from));
+ live->Remove(to_vreg);
+ } else {
+ cur->Eliminate();
+ continue;
+ }
+ }
+ } else {
+ Define(curr_position, &to);
+ }
+ UsePosition* from_use =
+ Use(block_start_position, curr_position, &from, hint, hint_type);
+ // Mark range live.
+ if (from.IsUnallocated()) {
+ live->Add(UnallocatedOperand::cast(from).virtual_register());
+ }
+ // Resolve use position hints just created.
+ if (to_use != nullptr && from_use != nullptr) {
+ to_use->ResolveHint(from_use);
+ from_use->ResolveHint(to_use);
+ }
+ DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
+ DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
+ // Potentially resolve phi hint.
+ if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
+ }
+ }
+ }
+}
+
+
+void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
+ BitVector* live) {
+ for (PhiInstruction* phi : block->phis()) {
+ // The live range interval already ends at the first instruction of the
+ // block.
+ int phi_vreg = phi->virtual_register();
+ live->Remove(phi_vreg);
+ InstructionOperand* hint = nullptr;
+ Instruction* instr = GetLastInstruction(
+ code(), code()->InstructionBlockAt(block->predecessors()[0]));
+ for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
+ InstructionOperand& to = move->destination();
+ if (to.IsUnallocated() &&
+ UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
+ hint = &move->source();
+ break;
+ }
+ }
+ DCHECK(hint != nullptr);
+ LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
+ block->first_instruction_index());
+ UsePosition* use_pos = Define(block_start, &phi->output(), hint,
+ UsePosition::HintTypeForOperand(*hint));
+ MapPhiHint(hint, use_pos);
+ }
+}
+
+
+void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
+ BitVector* live) {
+ DCHECK(block->IsLoopHeader());
+ // Add a live range stretching from the first loop instruction to the last
+ // for each value live on entry to the header.
+ BitVector::Iterator iterator(live);
+ LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
+ block->first_instruction_index());
+ LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
+ code()->LastLoopInstructionIndex(block))
+ .NextFullStart();
+ while (!iterator.Done()) {
+ int operand_index = iterator.Current();
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
+ range->EnsureInterval(start, end, allocation_zone());
+ iterator.Advance();
+ }
+ // Insert all values into the live in sets of all blocks in the loop.
+ for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
+ ++i) {
+ live_in_sets()[i]->Union(*live);
+ }
+}
+
+
+void LiveRangeBuilder::BuildLiveRanges() {
+ // Process the blocks in reverse order.
+ for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
+ --block_id) {
+ InstructionBlock* block =
+ code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
+ BitVector* live = ComputeLiveOut(block, data());
+ // Initially consider all live_out values live for the entire block. We
+ // will shorten these intervals if necessary.
+ AddInitialIntervals(block, live);
+ // Process the instructions in reverse order, generating and killing
+ // live values.
+ ProcessInstructions(block, live);
+ // All phi output operands are killed by this block.
+ ProcessPhis(block, live);
+ // Now live is live_in for this block except not including values live
+ // out on backward successor edges.
+ if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
+ live_in_sets()[block_id] = live;
+ }
+ // Postprocess the ranges.
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
+ if (range == nullptr) continue;
+ // Give slots to all ranges with a non fixed slot use.
+ if (range->has_slot_use() && range->HasNoSpillType()) {
+ data()->AssignSpillRangeToLiveRange(range);
+ }
+ // TODO(bmeurer): This is a horrible hack to make sure that for constant
+ // live ranges, every use requires the constant to be in a register.
+ // Without this hack, all uses with "any" policy would get the constant
+ // operand assigned.
+ if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
+ for (UsePosition* pos = range->first_pos(); pos != nullptr;
+ pos = pos->next()) {
+ if (pos->type() == UsePositionType::kRequiresSlot) continue;
+ UsePositionType new_type = UsePositionType::kAny;
+ // Can't mark phis as needing a register.
+ if (!pos->pos().IsGapPosition()) {
+ new_type = UsePositionType::kRequiresRegister;
+ }
+ pos->set_type(new_type, true);
+ }
+ }
+ }
+ for (auto preassigned : data()->preassigned_slot_ranges()) {
+ TopLevelLiveRange* range = preassigned.first;
+ int slot_id = preassigned.second;
+ SpillRange* spill = range->HasSpillRange()
+ ? range->GetSpillRange()
+ : data()->AssignSpillRangeToLiveRange(range);
+ spill->set_assigned_slot(slot_id);
+ }
+#ifdef DEBUG
+ Verify();
+#endif
+}
+
+
+void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
+ UsePosition* use_pos) {
+ DCHECK(!use_pos->IsResolved());
+ auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
+ DCHECK(res.second);
+ USE(res);
+}
+
+
+void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
+ UsePosition* use_pos) {
+ auto it = phi_hints_.find(operand);
+ if (it == phi_hints_.end()) return;
+ DCHECK(!it->second->IsResolved());
+ it->second->ResolveHint(use_pos);
+}
+
+
+void LiveRangeBuilder::Verify() const {
+ for (auto& hint : phi_hints_) {
+ CHECK(hint.second->IsResolved());
+ }
+ for (TopLevelLiveRange* current : data()->live_ranges()) {
+ if (current != nullptr && !current->IsEmpty()) current->Verify();
+ }
+}
+
+
+RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
+ RegisterKind kind)
+ : data_(data),
+ mode_(kind),
+ num_registers_(GetRegisterCount(data->config(), kind)),
+ num_allocatable_registers_(
+ GetAllocatableRegisterCount(data->config(), kind)),
+ allocatable_register_codes_(
+ GetAllocatableRegisterCodes(data->config(), kind)) {}
+
+
+LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
+ const LiveRange* range, int instruction_index) {
+ LifetimePosition ret = LifetimePosition::Invalid();
+
+ ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
+ if (range->Start() >= ret || ret >= range->End()) {
+ return LifetimePosition::Invalid();
+ }
+ return ret;
+}
+
+
+void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand(
+ bool operands_only) {
+ size_t initial_range_count = data()->live_ranges().size();
+ for (size_t i = 0; i < initial_range_count; ++i) {
+ TopLevelLiveRange* range = data()->live_ranges()[i];
+ if (!CanProcessRange(range)) continue;
+ if (range->HasNoSpillType() || (operands_only && range->HasSpillRange())) {
+ continue;
+ }
+
+ LifetimePosition start = range->Start();
+ TRACE("Live range %d:%d is defined by a spill operand.\n",
+ range->TopLevel()->vreg(), range->relative_id());
+ LifetimePosition next_pos = start;
+ if (next_pos.IsGapPosition()) {
+ next_pos = next_pos.NextStart();
+ }
+ UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ // If the range already has a spill operand and it doesn't need a
+ // register immediately, split it and spill the first part of the range.
+ if (pos == nullptr) {
+ Spill(range);
+ } else if (pos->pos() > range->Start().NextStart()) {
+ // Do not spill live range eagerly if use position that can benefit from
+ // the register is too close to the start of live range.
+ LifetimePosition split_pos = GetSplitPositionForInstruction(
+ range, pos->pos().ToInstructionIndex());
+ // There is no place to split, so we can't split and spill.
+ if (!split_pos.IsValid()) continue;
+
+ split_pos =
+ FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
+
+ SplitRangeAt(range, split_pos);
+ Spill(range);
+ }
+ }
+}
+
+
+LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
+ LifetimePosition pos) {
+ DCHECK(!range->TopLevel()->IsFixed());
+ TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
+ range->relative_id(), pos.value());
+
+ if (pos <= range->Start()) return range;
+
+ // We can't properly connect liveranges if splitting occurred at the end
+ // a block.
+ DCHECK(pos.IsStart() || pos.IsGapPosition() ||
+ (GetInstructionBlock(code(), pos)->last_instruction_index() !=
+ pos.ToInstructionIndex()));
+
+ LiveRange* result = range->SplitAt(pos, allocation_zone());
+ return result;
+}
+
+
+LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end) {
+ DCHECK(!range->TopLevel()->IsFixed());
+ TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
+ range->TopLevel()->vreg(), range->relative_id(), start.value(),
+ end.value());
+
+ LifetimePosition split_pos = FindOptimalSplitPos(start, end);
+ DCHECK(split_pos >= start);
+ return SplitRangeAt(range, split_pos);
+}
+
+
+LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
+ LifetimePosition end) {
+ int start_instr = start.ToInstructionIndex();
+ int end_instr = end.ToInstructionIndex();
+ DCHECK(start_instr <= end_instr);
+
+ // We have no choice
+ if (start_instr == end_instr) return end;
+
+ const InstructionBlock* start_block = GetInstructionBlock(code(), start);
+ const InstructionBlock* end_block = GetInstructionBlock(code(), end);
+
+ if (end_block == start_block) {
+ // The interval is split in the same basic block. Split at the latest
+ // possible position.
+ return end;
+ }
+
+ const InstructionBlock* block = end_block;
+ // Find header of outermost loop.
+ // TODO(titzer): fix redundancy below.
+ while (GetContainingLoop(code(), block) != nullptr &&
+ GetContainingLoop(code(), block)->rpo_number().ToInt() >
+ start_block->rpo_number().ToInt()) {
+ block = GetContainingLoop(code(), block);
+ }
+
+ // We did not find any suitable outer loop. Split at the latest possible
+ // position unless end_block is a loop header itself.
+ if (block == end_block && !end_block->IsLoopHeader()) return end;
+
+ return LifetimePosition::GapFromInstructionIndex(
+ block->first_instruction_index());
+}
+
+
+LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
+ LiveRange* range, LifetimePosition pos) {
+ const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
+ const InstructionBlock* loop_header =
+ block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
+
+ if (loop_header == nullptr) return pos;
+
+ const UsePosition* prev_use =
+ range->PreviousUsePositionRegisterIsBeneficial(pos);
+
+ while (loop_header != nullptr) {
+ // We are going to spill live range inside the loop.
+ // If possible try to move spilling position backwards to loop header.
+ // This will reduce number of memory moves on the back edge.
+ LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
+ loop_header->first_instruction_index());
+
+ if (range->Covers(loop_start)) {
+ if (prev_use == nullptr || prev_use->pos() < loop_start) {
+ // No register beneficial use inside the loop before the pos.
+ pos = loop_start;
+ }
+ }
+
+ // Try hoisting out to an outer loop.
+ loop_header = GetContainingLoop(code(), loop_header);
+ }
+
+ return pos;
+}
+
+
+void RegisterAllocator::Spill(LiveRange* range) {
+ DCHECK(!range->spilled());
+ TopLevelLiveRange* first = range->TopLevel();
+ TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id());
+
+ if (first->HasNoSpillType()) {
+ data()->AssignSpillRangeToLiveRange(first);
+ }
+ range->Spill();
+}
+
+
+const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
+ const {
+ return mode() == DOUBLE_REGISTERS ? data()->fixed_double_live_ranges()
+ : data()->fixed_live_ranges();
+}
+
+
+const char* RegisterAllocator::RegisterName(int register_code) const {
+ if (mode() == GENERAL_REGISTERS) {
+ return data()->config()->GetGeneralRegisterName(register_code);
+ } else {
+ return data()->config()->GetDoubleRegisterName(register_code);
+ }
+}
+
+
+LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
+ RegisterKind kind, Zone* local_zone)
+ : RegisterAllocator(data, kind),
+ unhandled_live_ranges_(local_zone),
+ active_live_ranges_(local_zone),
+ inactive_live_ranges_(local_zone) {
+ unhandled_live_ranges().reserve(
+ static_cast<size_t>(code()->VirtualRegisterCount() * 2));
+ active_live_ranges().reserve(8);
+ inactive_live_ranges().reserve(8);
+ // TryAllocateFreeReg and AllocateBlockedReg assume this
+ // when allocating local arrays.
+ DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
+ this->data()->config()->num_general_registers());
+}
+
+
+void LinearScanAllocator::AllocateRegisters() {
+ DCHECK(unhandled_live_ranges().empty());
+ DCHECK(active_live_ranges().empty());
+ DCHECK(inactive_live_ranges().empty());
+
+ SplitAndSpillRangesDefinedByMemoryOperand(code()->VirtualRegisterCount() <=
+ num_allocatable_registers());
+
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
+ if (!CanProcessRange(range)) continue;
+ for (LiveRange* to_add = range; to_add != nullptr;
+ to_add = to_add->next()) {
+ if (!to_add->spilled()) {
+ AddToUnhandledUnsorted(to_add);
+ }
+ }
+ }
+ SortUnhandled();
+ DCHECK(UnhandledIsSorted());
+
+ auto& fixed_ranges = GetFixedRegisters();
+ for (TopLevelLiveRange* current : fixed_ranges) {
+ if (current != nullptr) {
+ DCHECK_EQ(mode(), current->kind());
+ AddToInactive(current);
+ }
+ }
+
+ while (!unhandled_live_ranges().empty()) {
+ DCHECK(UnhandledIsSorted());
+ LiveRange* current = unhandled_live_ranges().back();
+ unhandled_live_ranges().pop_back();
+ DCHECK(UnhandledIsSorted());
+ LifetimePosition position = current->Start();
+#ifdef DEBUG
+ allocation_finger_ = position;
+#endif
+ TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
+ current->relative_id(), position.value());
+
+ if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
+ continue;
+
+ for (size_t i = 0; i < active_live_ranges().size(); ++i) {
+ LiveRange* cur_active = active_live_ranges()[i];
+ if (cur_active->End() <= position) {
+ ActiveToHandled(cur_active);
+ --i; // The live range was removed from the list of active live ranges.
+ } else if (!cur_active->Covers(position)) {
+ ActiveToInactive(cur_active);
+ --i; // The live range was removed from the list of active live ranges.
+ }
+ }
+
+ for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
+ LiveRange* cur_inactive = inactive_live_ranges()[i];
+ if (cur_inactive->End() <= position) {
+ InactiveToHandled(cur_inactive);
+ --i; // Live range was removed from the list of inactive live ranges.
+ } else if (cur_inactive->Covers(position)) {
+ InactiveToActive(cur_inactive);
+ --i; // Live range was removed from the list of inactive live ranges.
+ }
+ }
+
+ DCHECK(!current->HasRegisterAssigned() && !current->spilled());
+
+ bool result = TryAllocateFreeReg(current);
+ if (!result) AllocateBlockedReg(current);
+ if (current->HasRegisterAssigned()) {
+ AddToActive(current);
+ }
+ }
+}
+
+
+void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
+ int reg) {
+ data()->MarkAllocated(range->kind(), reg);
+ range->set_assigned_register(reg);
+ range->SetUseHints(reg);
+ if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
+ data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
+ }
+}
+
+
+void LinearScanAllocator::AddToActive(LiveRange* range) {
+ TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(),
+ range->relative_id());
+ active_live_ranges().push_back(range);
+}
+
+
+void LinearScanAllocator::AddToInactive(LiveRange* range) {
+ TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
+ range->relative_id());
+ inactive_live_ranges().push_back(range);
+}
+
+
+void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
+ if (range == nullptr || range->IsEmpty()) return;
+ DCHECK(!range->HasRegisterAssigned() && !range->spilled());
+ DCHECK(allocation_finger_ <= range->Start());
+ for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
+ --i) {
+ LiveRange* cur_range = unhandled_live_ranges().at(i);
+ if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
+ TRACE("Add live range %d:%d to unhandled at %d\n",
+ range->TopLevel()->vreg(), range->relative_id(), i + 1);
+ auto it = unhandled_live_ranges().begin() + (i + 1);
+ unhandled_live_ranges().insert(it, range);
+ DCHECK(UnhandledIsSorted());
+ return;
+ }
+ TRACE("Add live range %d:%d to unhandled at start\n",
+ range->TopLevel()->vreg(), range->relative_id());
+ unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
+ DCHECK(UnhandledIsSorted());
+}
+
+
+void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) {
+ if (range == nullptr || range->IsEmpty()) return;
+ DCHECK(!range->HasRegisterAssigned() && !range->spilled());
+ TRACE("Add live range %d:%d to unhandled unsorted at end\n",
+ range->TopLevel()->vreg(), range->relative_id());
+ unhandled_live_ranges().push_back(range);
+}
+
+
+static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
+ DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
+ if (a->ShouldBeAllocatedBefore(b)) return false;
+ if (b->ShouldBeAllocatedBefore(a)) return true;
+ return a->TopLevel()->vreg() < b->TopLevel()->vreg();
+}
+
+
+// Sort the unhandled live ranges so that the ranges to be processed first are
+// at the end of the array list. This is convenient for the register allocation
+// algorithm because it is efficient to remove elements from the end.
+void LinearScanAllocator::SortUnhandled() {
+ TRACE("Sort unhandled\n");
+ std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
+ &UnhandledSortHelper);
+}
+
+
+bool LinearScanAllocator::UnhandledIsSorted() {
+ size_t len = unhandled_live_ranges().size();
+ for (size_t i = 1; i < len; i++) {
+ LiveRange* a = unhandled_live_ranges().at(i - 1);
+ LiveRange* b = unhandled_live_ranges().at(i);
+ if (a->Start() < b->Start()) return false;
+ }
+ return true;
+}
+
+
+void LinearScanAllocator::ActiveToHandled(LiveRange* range) {
+ RemoveElement(&active_live_ranges(), range);
+ TRACE("Moving live range %d:%d from active to handled\n",
+ range->TopLevel()->vreg(), range->relative_id());
+}
+
+
+void LinearScanAllocator::ActiveToInactive(LiveRange* range) {
+ RemoveElement(&active_live_ranges(), range);
+ inactive_live_ranges().push_back(range);
+ TRACE("Moving live range %d:%d from active to inactive\n",
+ range->TopLevel()->vreg(), range->relative_id());
+}
+
+
+void LinearScanAllocator::InactiveToHandled(LiveRange* range) {
+ RemoveElement(&inactive_live_ranges(), range);
+ TRACE("Moving live range %d:%d from inactive to handled\n",
+ range->TopLevel()->vreg(), range->relative_id());
+}
+
+
+void LinearScanAllocator::InactiveToActive(LiveRange* range) {
+ RemoveElement(&inactive_live_ranges(), range);
+ active_live_ranges().push_back(range);
+ TRACE("Moving live range %d:%d from inactive to active\n",
+ range->TopLevel()->vreg(), range->relative_id());
+}
+
+
+bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
+ LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];
+
+ for (int i = 0; i < num_registers(); i++) {
+ free_until_pos[i] = LifetimePosition::MaxPosition();
+ }
+
+ for (LiveRange* cur_active : active_live_ranges()) {
+ free_until_pos[cur_active->assigned_register()] =
+ LifetimePosition::GapFromInstructionIndex(0);
+ TRACE("Register %s is free until pos %d (1)\n",
+ RegisterName(cur_active->assigned_register()),
+ LifetimePosition::GapFromInstructionIndex(0).value());
+ }
+
+ for (LiveRange* cur_inactive : inactive_live_ranges()) {
+ DCHECK(cur_inactive->End() > current->Start());
+ LifetimePosition next_intersection =
+ cur_inactive->FirstIntersection(current);
+ if (!next_intersection.IsValid()) continue;
+ int cur_reg = cur_inactive->assigned_register();
+ free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+ TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
+ Min(free_until_pos[cur_reg], next_intersection).value());
+ }
+
+ int hint_register;
+ if (current->FirstHintPosition(&hint_register) != nullptr) {
+ TRACE(
+ "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
+ RegisterName(hint_register), free_until_pos[hint_register].value(),
+ current->TopLevel()->vreg(), current->relative_id(),
+ current->End().value());
+
+ // The desired register is free until the end of the current live range.
+ if (free_until_pos[hint_register] >= current->End()) {
+ TRACE("Assigning preferred reg %s to live range %d:%d\n",
+ RegisterName(hint_register), current->TopLevel()->vreg(),
+ current->relative_id());
+ SetLiveRangeAssignedRegister(current, hint_register);
+ return true;
+ }
+ }
+
+ // Find the register which stays free for the longest time.
+ int reg = allocatable_register_code(0);
+ for (int i = 1; i < num_allocatable_registers(); ++i) {
+ int code = allocatable_register_code(i);
+ if (free_until_pos[code] > free_until_pos[reg]) {
+ reg = code;
+ }
+ }
+
+ LifetimePosition pos = free_until_pos[reg];
+
+ if (pos <= current->Start()) {
+ // All registers are blocked.
+ return false;
+ }
+
+ if (pos < current->End()) {
+ // Register reg is available at the range start but becomes blocked before
+ // the range end. Split current at position where it becomes blocked.
+ LiveRange* tail = SplitRangeAt(current, pos);
+ AddToUnhandledSorted(tail);
+ }
+
+ // Register reg is available at the range start and is free until
+ // the range end.
+ DCHECK(pos >= current->End());
+ TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
+ current->TopLevel()->vreg(), current->relative_id());
+ SetLiveRangeAssignedRegister(current, reg);
+
+ return true;
+}
+
+
+void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
+ UsePosition* register_use = current->NextRegisterPosition(current->Start());
+ if (register_use == nullptr) {
+ // There is no use in the current live range that requires a register.
+ // We can just spill it.
+ Spill(current);
+ return;
+ }
+
+ LifetimePosition use_pos[RegisterConfiguration::kMaxDoubleRegisters];
+ LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];
+
+ for (int i = 0; i < num_registers(); i++) {
+ use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
+ }
+
+ for (LiveRange* range : active_live_ranges()) {
+ int cur_reg = range->assigned_register();
+ if (range->TopLevel()->IsFixed() ||
+ !range->CanBeSpilled(current->Start())) {
+ block_pos[cur_reg] = use_pos[cur_reg] =
+ LifetimePosition::GapFromInstructionIndex(0);
+ } else {
+ UsePosition* next_use =
+ range->NextUsePositionRegisterIsBeneficial(current->Start());
+ if (next_use == nullptr) {
+ use_pos[cur_reg] = range->End();
+ } else {
+ use_pos[cur_reg] = next_use->pos();
+ }
+ }
+ }
+
+ for (LiveRange* range : inactive_live_ranges()) {
+ DCHECK(range->End() > current->Start());
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (!next_intersection.IsValid()) continue;
+ int cur_reg = range->assigned_register();
+ if (range->TopLevel()->IsFixed()) {
+ block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+ use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+ } else {
+ use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+ }
+ }
+
+ int reg = allocatable_register_code(0);
+ for (int i = 1; i < num_allocatable_registers(); ++i) {
+ int code = allocatable_register_code(i);
+ if (use_pos[code] > use_pos[reg]) {
+ reg = code;
+ }
+ }
+
+ LifetimePosition pos = use_pos[reg];
+
+ if (pos < register_use->pos()) {
+ // All registers are blocked before the first use that requires a register.
+ // Spill starting part of live range up to that use.
+ SpillBetween(current, current->Start(), register_use->pos());
+ return;
+ }
+
+ if (block_pos[reg] < current->End()) {
+ // Register becomes blocked before the current range end. Split before that
+ // position.
+ LiveRange* tail =
+ SplitBetween(current, current->Start(), block_pos[reg].Start());
+ AddToUnhandledSorted(tail);
+ }
+
+ // Register reg is not blocked for the whole range.
+ DCHECK(block_pos[reg] >= current->End());
+ TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
+ current->TopLevel()->vreg(), current->relative_id());
+ SetLiveRangeAssignedRegister(current, reg);
+
+ // This register was not free. Thus we need to find and spill
+ // parts of active and inactive live regions that use the same register
+ // at the same lifetime positions as current.
+ SplitAndSpillIntersecting(current);
+}
+
+
+void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
+ DCHECK(current->HasRegisterAssigned());
+ int reg = current->assigned_register();
+ LifetimePosition split_pos = current->Start();
+ for (size_t i = 0; i < active_live_ranges().size(); ++i) {
+ LiveRange* range = active_live_ranges()[i];
+ if (range->assigned_register() == reg) {
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
+ if (next_pos == nullptr) {
+ SpillAfter(range, spill_pos);
+ } else {
+ // When spilling between spill_pos and next_pos ensure that the range
+ // remains spilled at least until the start of the current live range.
+ // This guarantees that we will not introduce new unhandled ranges that
+ // start before the current range as this violates allocation invariant
+ // and will lead to an inconsistent state of active and inactive
+ // live-ranges: ranges are allocated in order of their start positions,
+ // ranges are retired from active/inactive when the start of the
+ // current live-range is larger than their end.
+ SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+ }
+ ActiveToHandled(range);
+ --i;
+ }
+ }
+
+ for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
+ LiveRange* range = inactive_live_ranges()[i];
+ DCHECK(range->End() > current->Start());
+ if (range->assigned_register() == reg && !range->TopLevel()->IsFixed()) {
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (next_intersection.IsValid()) {
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ if (next_pos == nullptr) {
+ SpillAfter(range, split_pos);
+ } else {
+ next_intersection = Min(next_intersection, next_pos->pos());
+ SpillBetween(range, split_pos, next_intersection);
+ }
+ InactiveToHandled(range);
+ --i;
+ }
+ }
+ }
+}
+
+
+bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
+ if (!range->is_phi()) return false;
+
+ DCHECK(!range->HasSpillOperand());
+ RegisterAllocationData::PhiMapValue* phi_map_value =
+ data()->GetPhiMapValueFor(range);
+ const PhiInstruction* phi = phi_map_value->phi();
+ const InstructionBlock* block = phi_map_value->block();
// Count the number of spilled operands.
size_t spilled_count = 0;
LiveRange* first_op = nullptr;
for (size_t i = 0; i < phi->operands().size(); i++) {
int op = phi->operands()[i];
- LiveRange* op_range = LiveRangeFor(op);
- if (op_range->GetSpillRange() == nullptr) continue;
- auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
- auto pred_end =
- LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+ LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
+ if (!op_range->TopLevel()->HasSpillRange()) continue;
+ const InstructionBlock* pred =
+ code()->InstructionBlockAt(block->predecessors()[i]);
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
while (op_range != nullptr && !op_range->CanCover(pred_end)) {
op_range = op_range->next();
}
- if (op_range != nullptr && op_range->IsSpilled()) {
+ if (op_range != nullptr && op_range->spilled()) {
spilled_count++;
if (first_op == nullptr) {
first_op = op_range->TopLevel();
@@ -988,14 +2855,14 @@
// Try to merge the spilled operands and count the number of merged spilled
// operands.
DCHECK(first_op != nullptr);
- auto first_op_spill = first_op->GetSpillRange();
+ SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
size_t num_merged = 1;
for (size_t i = 1; i < phi->operands().size(); i++) {
int op = phi->operands()[i];
- auto op_range = LiveRangeFor(op);
- auto op_spill = op_range->GetSpillRange();
- if (op_spill != nullptr &&
- (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill))) {
+ TopLevelLiveRange* op_range = data()->live_ranges()[op];
+ if (!op_range->HasSpillRange()) continue;
+ SpillRange* op_spill = op_range->GetSpillRange();
+ if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
num_merged++;
}
}
@@ -1010,21 +2877,26 @@
// If the range does not need register soon, spill it to the merged
// spill range.
- auto next_pos = range->Start();
- if (code()->IsGapAt(next_pos.InstructionIndex())) {
- next_pos = next_pos.NextInstruction();
- }
- auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ LifetimePosition next_pos = range->Start();
+ if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
+ UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
if (pos == nullptr) {
- auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
- CHECK(first_op_spill->TryMerge(spill_range));
+ SpillRange* spill_range =
+ range->TopLevel()->HasSpillRange()
+ ? range->TopLevel()->GetSpillRange()
+ : data()->AssignSpillRangeToLiveRange(range->TopLevel());
+ bool merged = first_op_spill->TryMerge(spill_range);
+ CHECK(merged);
Spill(range);
return true;
- } else if (pos->pos().Value() > range->Start().NextInstruction().Value()) {
- auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
- CHECK(first_op_spill->TryMerge(spill_range));
+ } else if (pos->pos() > range->Start().NextStart()) {
+ SpillRange* spill_range =
+ range->TopLevel()->HasSpillRange()
+ ? range->TopLevel()->GetSpillRange()
+ : data()->AssignSpillRangeToLiveRange(range->TopLevel());
+ bool merged = first_op_spill->TryMerge(spill_range);
+ CHECK(merged);
SpillBetween(range, range->Start(), pos->pos());
- if (!AllocationOk()) return false;
DCHECK(UnhandledIsSorted());
return true;
}
@@ -1032,446 +2904,307 @@
}
-void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
- int start = block->first_instruction_index();
- int end = block->last_instruction_index();
- DCHECK_NE(-1, start);
- for (int i = start; i <= end; ++i) {
- if (code()->IsGapAt(i)) {
- Instruction* instr = nullptr;
- Instruction* prev_instr = nullptr;
- if (i < end) instr = InstructionAt(i + 1);
- if (i > start) prev_instr = InstructionAt(i - 1);
- MeetConstraintsBetween(prev_instr, instr, i);
- if (!AllocationOk()) return;
- }
- }
+void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+ LiveRange* second_part = SplitRangeAt(range, pos);
+ Spill(second_part);
+}
- // Meet register constraints for the instruction in the end.
- if (!code()->IsGapAt(end)) {
- MeetRegisterConstraintsForLastInstructionInBlock(block);
+
+void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
+ LifetimePosition end) {
+ SpillBetweenUntil(range, start, start, end);
+}
+
+
+void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition until,
+ LifetimePosition end) {
+ CHECK(start < end);
+ LiveRange* second_part = SplitRangeAt(range, start);
+
+ if (second_part->Start() < end) {
+ // The split result intersects with [start, end[.
+ // Split it at position between ]start+1, end[, spill the middle part
+ // and put the rest to unhandled.
+ LifetimePosition third_part_end = end.PrevStart().End();
+ if (data()->IsBlockBoundary(end.Start())) {
+ third_part_end = end.Start();
+ }
+ LiveRange* third_part = SplitBetween(
+ second_part, Max(second_part->Start().End(), until), third_part_end);
+
+ DCHECK(third_part != second_part);
+
+ Spill(second_part);
+ AddToUnhandledSorted(third_part);
+ } else {
+ // The split result does not intersect with [start, end[.
+ // Nothing to spill. Just put it to unhandled as whole.
+ AddToUnhandledSorted(second_part);
}
}
-void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
- const InstructionBlock* block) {
- int end = block->last_instruction_index();
- auto last_instruction = InstructionAt(end);
- for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
- auto output_operand = last_instruction->OutputAt(i);
- DCHECK(!output_operand->IsConstant());
- auto output = UnallocatedOperand::cast(output_operand);
- int output_vreg = output->virtual_register();
- auto range = LiveRangeFor(output_vreg);
- bool assigned = false;
- if (output->HasFixedPolicy()) {
- AllocateFixed(output, -1, false);
- // This value is produced on the stack, we never need to spill it.
- if (output->IsStackSlot()) {
- DCHECK(output->index() < 0);
- range->SetSpillOperand(output);
- range->SetSpillStartIndex(end);
- assigned = true;
- }
-
- for (auto succ : block->successors()) {
- const InstructionBlock* successor = code()->InstructionBlockAt(succ);
- DCHECK(successor->PredecessorCount() == 1);
- int gap_index = successor->first_instruction_index() + 1;
- DCHECK(code()->IsGapAt(gap_index));
-
- // Create an unconstrained operand for the same virtual register
- // and insert a gap move from the fixed output to the operand.
- UnallocatedOperand* output_copy =
- new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
- output_copy->set_virtual_register(output_vreg);
-
- AddGapMove(gap_index, GapInstruction::START, output, output_copy);
- }
- }
-
- if (!assigned) {
- for (auto succ : block->successors()) {
- const InstructionBlock* successor = code()->InstructionBlockAt(succ);
- DCHECK(successor->PredecessorCount() == 1);
- int gap_index = successor->first_instruction_index() + 1;
- range->SpillAtDefinition(local_zone(), gap_index, output);
- range->SetSpillStartIndex(gap_index);
- }
- }
- }
-}
+SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
+ : data_(data) {}
-void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
- Instruction* second,
- int gap_index) {
- if (first != nullptr) {
- // Handle fixed temporaries.
- for (size_t i = 0; i < first->TempCount(); i++) {
- auto temp = UnallocatedOperand::cast(first->TempAt(i));
- if (temp->HasFixedPolicy()) {
- AllocateFixed(temp, gap_index - 1, false);
- }
- }
-
- // Handle constant/fixed output operands.
- for (size_t i = 0; i < first->OutputCount(); i++) {
- InstructionOperand* output = first->OutputAt(i);
- if (output->IsConstant()) {
- int output_vreg = output->index();
- auto range = LiveRangeFor(output_vreg);
- range->SetSpillStartIndex(gap_index - 1);
- range->SetSpillOperand(output);
- } else {
- auto first_output = UnallocatedOperand::cast(output);
- auto range = LiveRangeFor(first_output->virtual_register());
- bool assigned = false;
- if (first_output->HasFixedPolicy()) {
- auto output_copy = first_output->CopyUnconstrained(code_zone());
- bool is_tagged = HasTaggedValue(first_output->virtual_register());
- AllocateFixed(first_output, gap_index, is_tagged);
-
- // This value is produced on the stack, we never need to spill it.
- if (first_output->IsStackSlot()) {
- DCHECK(first_output->index() < 0);
- range->SetSpillOperand(first_output);
- range->SetSpillStartIndex(gap_index - 1);
- assigned = true;
- }
- AddGapMove(gap_index, GapInstruction::START, first_output,
- output_copy);
- }
-
- // Make sure we add a gap move for spilling (if we have not done
- // so already).
- if (!assigned) {
- range->SpillAtDefinition(local_zone(), gap_index, first_output);
- range->SetSpillStartIndex(gap_index);
- }
- }
- }
- }
-
- if (second != nullptr) {
- // Handle fixed input operands of second instruction.
- for (size_t i = 0; i < second->InputCount(); i++) {
- auto input = second->InputAt(i);
- if (input->IsImmediate()) continue; // Ignore immediates.
- auto cur_input = UnallocatedOperand::cast(input);
- if (cur_input->HasFixedPolicy()) {
- auto input_copy = cur_input->CopyUnconstrained(code_zone());
- bool is_tagged = HasTaggedValue(cur_input->virtual_register());
- AllocateFixed(cur_input, gap_index + 1, is_tagged);
- AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
- }
- }
-
- // Handle "output same as input" for second instruction.
- for (size_t i = 0; i < second->OutputCount(); i++) {
- auto output = second->OutputAt(i);
- if (!output->IsUnallocated()) continue;
- auto second_output = UnallocatedOperand::cast(output);
- if (second_output->HasSameAsInputPolicy()) {
- DCHECK(i == 0); // Only valid for first output.
- UnallocatedOperand* cur_input =
- UnallocatedOperand::cast(second->InputAt(0));
- int output_vreg = second_output->virtual_register();
- int input_vreg = cur_input->virtual_register();
-
- auto input_copy = cur_input->CopyUnconstrained(code_zone());
- cur_input->set_virtual_register(second_output->virtual_register());
- AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
-
- if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
- int index = gap_index + 1;
- Instruction* instr = InstructionAt(index);
- if (instr->HasPointerMap()) {
- instr->pointer_map()->RecordPointer(input_copy, code_zone());
- }
- } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
- // The input is assumed to immediately have a tagged representation,
- // before the pointer map can be used. I.e. the pointer map at the
- // instruction will include the output operand (whose value at the
- // beginning of the instruction is equal to the input operand). If
- // this is not desired, then the pointer map at this instruction needs
- // to be adjusted manually.
- }
- }
- }
- }
-}
-
-
-bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
- for (size_t i = 0; i < instr->OutputCount(); i++) {
- auto output = instr->OutputAt(i);
- if (output->IsRegister() && output->index() == index) return true;
- }
- return false;
-}
-
-
-bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
- int index) {
- for (size_t i = 0; i < instr->OutputCount(); i++) {
- auto output = instr->OutputAt(i);
- if (output->IsDoubleRegister() && output->index() == index) return true;
- }
- return false;
-}
-
-
-void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
- BitVector* live) {
- int block_start = block->first_instruction_index();
- auto block_start_position =
- LifetimePosition::FromInstructionIndex(block_start);
-
- for (int index = block->last_instruction_index(); index >= block_start;
- index--) {
- auto curr_position = LifetimePosition::FromInstructionIndex(index);
- auto instr = InstructionAt(index);
- DCHECK(instr != nullptr);
- if (instr->IsGapMoves()) {
- // Process the moves of the gap instruction, making their sources live.
- auto gap = code()->GapAt(index);
- const GapInstruction::InnerPosition kPositions[] = {
- GapInstruction::END, GapInstruction::START};
- for (auto position : kPositions) {
- auto move = gap->GetParallelMove(position);
- if (move == nullptr) continue;
- if (position == GapInstruction::END) {
- curr_position = curr_position.InstructionEnd();
- } else {
- curr_position = curr_position.InstructionStart();
- }
- auto move_ops = move->move_operands();
- for (auto cur = move_ops->begin(); cur != move_ops->end(); ++cur) {
- auto from = cur->source();
- auto to = cur->destination();
- auto hint = to;
- if (to->IsUnallocated()) {
- int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
- auto to_range = LiveRangeFor(to_vreg);
- if (to_range->is_phi()) {
- DCHECK(!FLAG_turbo_delay_ssa_decon);
- if (to_range->is_non_loop_phi()) {
- hint = to_range->current_hint_operand();
- }
- } else {
- if (live->Contains(to_vreg)) {
- Define(curr_position, to, from);
- live->Remove(to_vreg);
- } else {
- cur->Eliminate();
- continue;
- }
- }
- } else {
- Define(curr_position, to, from);
- }
- Use(block_start_position, curr_position, from, hint);
- if (from->IsUnallocated()) {
- live->Add(UnallocatedOperand::cast(from)->virtual_register());
- }
+void SpillSlotLocator::LocateSpillSlots() {
+ const InstructionSequence* code = data()->code();
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
+ if (range == nullptr || range->IsEmpty()) continue;
+ // We care only about ranges which spill in the frame.
+ if (!range->HasSpillRange()) continue;
+ if (range->IsSpilledOnlyInDeferredBlocks()) {
+ for (LiveRange* child = range; child != nullptr; child = child->next()) {
+ if (child->spilled()) {
+ code->GetInstructionBlock(child->Start().ToInstructionIndex())
+ ->mark_needs_frame();
}
}
} else {
- // Process output, inputs, and temps of this non-gap instruction.
- for (size_t i = 0; i < instr->OutputCount(); i++) {
- auto output = instr->OutputAt(i);
- if (output->IsUnallocated()) {
- int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
- live->Remove(out_vreg);
- } else if (output->IsConstant()) {
- int out_vreg = output->index();
- live->Remove(out_vreg);
- }
- Define(curr_position, output, nullptr);
+ TopLevelLiveRange::SpillMoveInsertionList* spills =
+ range->spill_move_insertion_locations();
+ DCHECK_NOT_NULL(spills);
+ for (; spills != nullptr; spills = spills->next) {
+ code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
}
+ }
+ }
+}
- if (instr->ClobbersRegisters()) {
- for (int i = 0; i < config()->num_general_registers(); ++i) {
- if (!IsOutputRegisterOf(instr, i)) {
- auto range = FixedLiveRangeFor(i);
- range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
- local_zone());
- }
- }
+
+OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
+
+
+void OperandAssigner::AssignSpillSlots() {
+ ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
+ // Merge disjoint spill ranges
+ for (size_t i = 0; i < spill_ranges.size(); ++i) {
+ SpillRange* range = spill_ranges[i];
+ if (range == nullptr) continue;
+ if (range->IsEmpty()) continue;
+ for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
+ SpillRange* other = spill_ranges[j];
+ if (other != nullptr && !other->IsEmpty()) {
+ range->TryMerge(other);
}
+ }
+ }
+ // Allocate slots for the merged spill ranges.
+ for (SpillRange* range : spill_ranges) {
+ if (range == nullptr || range->IsEmpty()) continue;
+ // Allocate a new operand referring to the spill slot.
+ if (!range->HasSlot()) {
+ int byte_width = range->ByteWidth();
+ int index = data()->frame()->AllocateSpillSlot(byte_width);
+ range->set_assigned_slot(index);
+ }
+ }
+}
- if (instr->ClobbersDoubleRegisters()) {
- for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
- if (!IsOutputDoubleRegisterOf(instr, i)) {
- auto range = FixedDoubleLiveRangeFor(i);
- range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
- local_zone());
- }
- }
+
+void OperandAssigner::CommitAssignment() {
+ for (TopLevelLiveRange* top_range : data()->live_ranges()) {
+ if (top_range == nullptr || top_range->IsEmpty()) continue;
+ InstructionOperand spill_operand;
+ if (top_range->HasSpillOperand()) {
+ spill_operand = *top_range->TopLevel()->GetSpillOperand();
+ } else if (top_range->TopLevel()->HasSpillRange()) {
+ spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
+ }
+ if (top_range->is_phi()) {
+ data()->GetPhiMapValueFor(top_range)->CommitAssignment(
+ top_range->GetAssignedOperand());
+ }
+ for (LiveRange* range = top_range; range != nullptr;
+ range = range->next()) {
+ InstructionOperand assigned = range->GetAssignedOperand();
+ range->ConvertUsesToOperand(assigned, spill_operand);
+ }
+
+ if (!spill_operand.IsInvalid()) {
+ // If this top level range has a child spilled in a deferred block, we use
+ // the range and control flow connection mechanism instead of spilling at
+ // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
+ // phases. Normally, when we spill at definition, we do not insert a
+ // connecting move when a successor child range is spilled - because the
+ // spilled range picks up its value from the slot which was assigned at
+ // definition. For ranges that are determined to spill only in deferred
+ // blocks, we let ConnectLiveRanges and ResolveControlFlow insert such
+ // moves between ranges. Because of how the ranges are split around
+ // deferred blocks, this amounts to spilling and filling inside such
+ // blocks.
+ if (!top_range->TryCommitSpillInDeferredBlock(data()->code(),
+ spill_operand)) {
+ // Spill at definition if the range isn't spilled only in deferred
+ // blocks.
+ top_range->CommitSpillMoves(
+ data()->code(), spill_operand,
+ top_range->has_slot_use() || top_range->spilled());
}
+ }
+ }
+}
- for (size_t i = 0; i < instr->InputCount(); i++) {
- auto input = instr->InputAt(i);
- if (input->IsImmediate()) continue; // Ignore immediates.
- LifetimePosition use_pos;
- if (input->IsUnallocated() &&
- UnallocatedOperand::cast(input)->IsUsedAtStart()) {
- use_pos = curr_position;
+
+ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
+ : data_(data) {}
+
+
+bool ReferenceMapPopulator::SafePointsAreInOrder() const {
+ int safe_point = 0;
+ for (ReferenceMap* map : *data()->code()->reference_maps()) {
+ if (safe_point > map->instruction_position()) return false;
+ safe_point = map->instruction_position();
+ }
+ return true;
+}
+
+
+void ReferenceMapPopulator::PopulateReferenceMaps() {
+ DCHECK(SafePointsAreInOrder());
+ // Map all delayed references.
+ for (RegisterAllocationData::DelayedReference& delayed_reference :
+ data()->delayed_references()) {
+ delayed_reference.map->RecordReference(
+ AllocatedOperand::cast(*delayed_reference.operand));
+ }
+ // Iterate over all safe point positions and record a pointer
+ // for all spilled live ranges at this point.
+ int last_range_start = 0;
+ const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
+ ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
+ if (range == nullptr) continue;
+ // Skip non-reference values.
+ if (!data()->IsReference(range)) continue;
+ // Skip empty live ranges.
+ if (range->IsEmpty()) continue;
+ if (range->has_preassigned_slot()) continue;
+
+ // Find the extent of the range and its children.
+ int start = range->Start().ToInstructionIndex();
+ int end = 0;
+ for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
+ LifetimePosition this_end = cur->End();
+ if (this_end.ToInstructionIndex() > end)
+ end = this_end.ToInstructionIndex();
+ DCHECK(cur->Start().ToInstructionIndex() >= start);
+ }
+
+ // Most of the ranges are in order, but not all. Keep an eye on when they
+ // step backwards and reset the first_it so we don't miss any safe points.
+ if (start < last_range_start) first_it = reference_maps->begin();
+ last_range_start = start;
+
+ // Step across all the safe points that are before the start of this range,
+ // recording how far we step in order to save doing this for the next range.
+ for (; first_it != reference_maps->end(); ++first_it) {
+ ReferenceMap* map = *first_it;
+ if (map->instruction_position() >= start) break;
+ }
+
+ InstructionOperand spill_operand;
+ if (((range->HasSpillOperand() &&
+ !range->GetSpillOperand()->IsConstant()) ||
+ range->HasSpillRange())) {
+ if (range->HasSpillOperand()) {
+ spill_operand = *range->GetSpillOperand();
+ } else {
+ spill_operand = range->GetSpillRangeOperand();
+ }
+ DCHECK(spill_operand.IsStackSlot());
+ DCHECK_EQ(MachineRepresentation::kTagged,
+ AllocatedOperand::cast(spill_operand).representation());
+ }
+
+ LiveRange* cur = range;
+ // Step through the safe points to see whether they are in the range.
+ for (auto it = first_it; it != reference_maps->end(); ++it) {
+ ReferenceMap* map = *it;
+ int safe_point = map->instruction_position();
+
+ // The safe points are sorted so we can stop searching here.
+ if (safe_point - 1 > end) break;
+
+ // Advance to the next active range that covers the current
+ // safe point position.
+ LifetimePosition safe_point_pos =
+ LifetimePosition::InstructionFromInstructionIndex(safe_point);
+
+ // Search for the child range (cur) that covers safe_point_pos. If we
+ // don't find it before the children pass safe_point_pos, keep cur at
+ // the last child, because the next safe_point_pos may be covered by cur.
+ // This may happen if cur has more than one interval, and the current
+ // safe_point_pos is in between intervals.
+ // For that reason, cur may be at most the last child.
+ DCHECK_NOT_NULL(cur);
+ DCHECK(safe_point_pos >= cur->Start() || range == cur);
+ bool found = false;
+ while (!found) {
+ if (cur->Covers(safe_point_pos)) {
+ found = true;
} else {
- use_pos = curr_position.InstructionEnd();
- }
-
- Use(block_start_position, use_pos, input, nullptr);
- if (input->IsUnallocated()) {
- live->Add(UnallocatedOperand::cast(input)->virtual_register());
- }
- }
-
- for (size_t i = 0; i < instr->TempCount(); i++) {
- auto temp = instr->TempAt(i);
- if (instr->ClobbersTemps()) {
- if (temp->IsRegister()) continue;
- if (temp->IsUnallocated()) {
- UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
- if (temp_unalloc->HasFixedPolicy()) {
- continue;
- }
+ LiveRange* next = cur->next();
+ if (next == nullptr || next->Start() > safe_point_pos) {
+ break;
}
- }
- Use(block_start_position, curr_position.InstructionEnd(), temp,
- nullptr);
- Define(curr_position, temp, nullptr);
- }
- }
- }
-}
-
-
-void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
- for (auto phi : block->phis()) {
- if (FLAG_turbo_reuse_spill_slots) {
- auto res = phi_map_.insert(
- std::make_pair(phi->virtual_register(), PhiMapValue(phi, block)));
- DCHECK(res.second);
- USE(res);
- }
- auto output = phi->output();
- int phi_vreg = phi->virtual_register();
- if (!FLAG_turbo_delay_ssa_decon) {
- for (size_t i = 0; i < phi->operands().size(); ++i) {
- InstructionBlock* cur_block =
- code()->InstructionBlockAt(block->predecessors()[i]);
- AddGapMove(cur_block->last_instruction_index() - 1, GapInstruction::END,
- phi->inputs()[i], output);
- DCHECK(!InstructionAt(cur_block->last_instruction_index())
- ->HasPointerMap());
- }
- }
- auto live_range = LiveRangeFor(phi_vreg);
- int gap_index = block->first_instruction_index();
- live_range->SpillAtDefinition(local_zone(), gap_index, output);
- live_range->SetSpillStartIndex(gap_index);
- // We use the phi-ness of some nodes in some later heuristics.
- live_range->set_is_phi(true);
- live_range->set_is_non_loop_phi(!block->IsLoopHeader());
- }
-}
-
-
-void RegisterAllocator::MeetRegisterConstraints() {
- for (auto block : code()->instruction_blocks()) {
- MeetRegisterConstraints(block);
- }
-}
-
-
-void RegisterAllocator::ResolvePhis() {
- // Process the blocks in reverse order.
- for (auto i = code()->instruction_blocks().rbegin();
- i != code()->instruction_blocks().rend(); ++i) {
- ResolvePhis(*i);
- }
-}
-
-
-ParallelMove* RegisterAllocator::GetConnectingParallelMove(
- LifetimePosition pos) {
- int index = pos.InstructionIndex();
- if (code()->IsGapAt(index)) {
- auto gap = code()->GapAt(index);
- return gap->GetOrCreateParallelMove(
- pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
- code_zone());
- }
- int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
- return code()->GapAt(gap_pos)->GetOrCreateParallelMove(
- (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::BEFORE,
- code_zone());
-}
-
-
-const InstructionBlock* RegisterAllocator::GetInstructionBlock(
- LifetimePosition pos) {
- return code()->GetInstructionBlock(pos.InstructionIndex());
-}
-
-
-void RegisterAllocator::ConnectRanges() {
- for (auto first_range : live_ranges()) {
- if (first_range == nullptr || first_range->IsChild()) continue;
- auto second_range = first_range->next();
- while (second_range != nullptr) {
- auto pos = second_range->Start();
- if (!second_range->IsSpilled()) {
- // Add gap move if the two live ranges touch and there is no block
- // boundary.
- if (first_range->End().Value() == pos.Value()) {
- bool should_insert = true;
- if (IsBlockBoundary(pos)) {
- should_insert =
- CanEagerlyResolveControlFlow(GetInstructionBlock(pos));
- }
- if (should_insert) {
- auto move = GetConnectingParallelMove(pos);
- auto prev_operand = first_range->CreateAssignedOperand(code_zone());
- auto cur_operand = second_range->CreateAssignedOperand(code_zone());
- move->AddMove(prev_operand, cur_operand, code_zone());
- }
+ cur = next;
}
}
- first_range = second_range;
- second_range = second_range->next();
+
+ if (!found) {
+ continue;
+ }
+
+ // Check if the live range is spilled and the safe point is after
+ // the spill position.
+ int spill_index = range->IsSpilledOnlyInDeferredBlocks()
+ ? cur->Start().ToInstructionIndex()
+ : range->spill_start_index();
+
+ if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
+ TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
+ range->vreg(), spill_index, safe_point);
+ map->RecordReference(AllocatedOperand::cast(spill_operand));
+ }
+
+ if (!cur->spilled()) {
+ TRACE(
+ "Pointer in register for range %d:%d (start at %d) "
+ "at safe point %d\n",
+ range->vreg(), cur->relative_id(), cur->Start().value(),
+ safe_point);
+ InstructionOperand operand = cur->GetAssignedOperand();
+ DCHECK(!operand.IsStackSlot());
+ DCHECK_EQ(MachineRepresentation::kTagged,
+ AllocatedOperand::cast(operand).representation());
+ map->RecordReference(AllocatedOperand::cast(operand));
+ }
}
}
}
-bool RegisterAllocator::CanEagerlyResolveControlFlow(
- const InstructionBlock* block) const {
- if (block->PredecessorCount() != 1) return false;
- return block->predecessors()[0].IsNext(block->rpo_number());
-}
-
-
namespace {
class LiveRangeBound {
public:
- explicit LiveRangeBound(const LiveRange* range)
- : range_(range), start_(range->Start()), end_(range->End()) {
+ explicit LiveRangeBound(const LiveRange* range, bool skip)
+ : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
DCHECK(!range->IsEmpty());
}
bool CanCover(LifetimePosition position) {
- return start_.Value() <= position.Value() &&
- position.Value() < end_.Value();
+ return start_ <= position && position < end_;
}
const LiveRange* const range_;
const LifetimePosition start_;
const LifetimePosition end_;
+ const bool skip_;
private:
DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
@@ -1490,14 +3223,17 @@
bool ShouldInitialize() { return start_ == nullptr; }
- void Initialize(Zone* zone, const LiveRange* const range) {
- size_t length = 0;
- for (auto i = range; i != nullptr; i = i->next()) length++;
- start_ = zone->NewArray<LiveRangeBound>(static_cast<int>(length));
- length_ = length;
- auto curr = start_;
- for (auto i = range; i != nullptr; i = i->next(), ++curr) {
- new (curr) LiveRangeBound(i);
+ void Initialize(Zone* zone, const TopLevelLiveRange* const range) {
+ length_ = range->GetChildCount();
+
+ start_ = zone->NewArray<LiveRangeBound>(length_);
+ LiveRangeBound* curr = start_;
+ // Normally, spilled ranges do not need connecting moves, because the spill
+ // location has been assigned at definition. For ranges spilled in deferred
+ // blocks, that is not the case, so we need to connect the spilled children.
+ bool spilled_in_blocks = range->IsSpilledOnlyInDeferredBlocks();
+ for (const LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
+ new (curr) LiveRangeBound(i, !spilled_in_blocks && i->spilled());
}
}
@@ -1507,9 +3243,9 @@
while (true) {
size_t current_index = left_index + (right_index - left_index) / 2;
DCHECK(right_index > current_index);
- auto bound = &start_[current_index];
- if (bound->start_.Value() <= position.Value()) {
- if (position.Value() < bound->end_.Value()) return bound;
+ LiveRangeBound* bound = &start_[current_index];
+ if (bound->start_ <= position) {
+ if (position < bound->end_) return bound;
DCHECK(left_index < current_index);
left_index = current_index;
} else {
@@ -1519,32 +3255,41 @@
}
LiveRangeBound* FindPred(const InstructionBlock* pred) {
- auto pred_end =
- LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
return Find(pred_end);
}
LiveRangeBound* FindSucc(const InstructionBlock* succ) {
- auto succ_start =
- LifetimePosition::FromInstructionIndex(succ->first_instruction_index());
+ LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
+ succ->first_instruction_index());
return Find(succ_start);
}
- void Find(const InstructionBlock* block, const InstructionBlock* pred,
- FindResult* result) const {
- auto pred_end =
- LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
- auto bound = Find(pred_end);
+ bool FindConnectableSubranges(const InstructionBlock* block,
+ const InstructionBlock* pred,
+ FindResult* result) const {
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
+ LiveRangeBound* bound = Find(pred_end);
result->pred_cover_ = bound->range_;
- auto cur_start = LifetimePosition::FromInstructionIndex(
+ LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
- // Common case.
+
if (bound->CanCover(cur_start)) {
- result->cur_cover_ = bound->range_;
- return;
+ // Both blocks are covered by the same range, so there is nothing to
+ // connect.
+ return false;
}
- result->cur_cover_ = Find(cur_start)->range_;
+ bound = Find(cur_start);
+ if (bound->skip_) {
+ return false;
+ }
+ result->cur_cover_ = bound->range_;
DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
+ return (result->cur_cover_ != result->pred_cover_);
}
private:
@@ -1557,11 +3302,11 @@
class LiveRangeFinder {
public:
- explicit LiveRangeFinder(const RegisterAllocator& allocator)
- : allocator_(allocator),
- bounds_length_(static_cast<int>(allocator.live_ranges().size())),
- bounds_(allocator.local_zone()->NewArray<LiveRangeBoundArray>(
- bounds_length_)) {
+ explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
+ : data_(data),
+ bounds_length_(static_cast<int>(data_->live_ranges().size())),
+ bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
+ zone_(zone) {
for (int i = 0; i < bounds_length_; ++i) {
new (&bounds_[i]) LiveRangeBoundArray();
}
@@ -1569,65 +3314,81 @@
LiveRangeBoundArray* ArrayFor(int operand_index) {
DCHECK(operand_index < bounds_length_);
- auto range = allocator_.live_ranges()[operand_index];
+ TopLevelLiveRange* range = data_->live_ranges()[operand_index];
DCHECK(range != nullptr && !range->IsEmpty());
- auto array = &bounds_[operand_index];
+ LiveRangeBoundArray* array = &bounds_[operand_index];
if (array->ShouldInitialize()) {
- array->Initialize(allocator_.local_zone(), range);
+ array->Initialize(zone_, range);
}
return array;
}
private:
- const RegisterAllocator& allocator_;
+ const RegisterAllocationData* const data_;
const int bounds_length_;
LiveRangeBoundArray* const bounds_;
+ Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
};
+
+typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
+
+
+struct DelayedInsertionMapCompare {
+ bool operator()(const DelayedInsertionMapKey& a,
+ const DelayedInsertionMapKey& b) const {
+ if (a.first == b.first) {
+ return a.second.Compare(b.second);
+ }
+ return a.first < b.first;
+ }
+};
+
+
+typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
+ DelayedInsertionMapCompare> DelayedInsertionMap;
+
} // namespace
-void RegisterAllocator::ResolveControlFlow() {
+LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
+ : data_(data) {}
+
+
+bool LiveRangeConnector::CanEagerlyResolveControlFlow(
+ const InstructionBlock* block) const {
+ if (block->PredecessorCount() != 1) return false;
+ return block->predecessors()[0].IsNext(block->rpo_number());
+}
+
+
+void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
// Lazily linearize live ranges in memory for fast lookup.
- LiveRangeFinder finder(*this);
- for (auto block : code()->instruction_blocks()) {
+ LiveRangeFinder finder(data(), local_zone);
+ ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
+ for (const InstructionBlock* block : code()->instruction_blocks()) {
if (CanEagerlyResolveControlFlow(block)) continue;
- if (FLAG_turbo_delay_ssa_decon) {
- // resolve phis
- for (auto phi : block->phis()) {
- auto* block_bound =
- finder.ArrayFor(phi->virtual_register())->FindSucc(block);
- auto phi_output =
- block_bound->range_->CreateAssignedOperand(code_zone());
- phi->output()->ConvertTo(phi_output->kind(), phi_output->index());
- size_t pred_index = 0;
- for (auto pred : block->predecessors()) {
- const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
- auto* pred_bound = finder.ArrayFor(phi->operands()[pred_index])
- ->FindPred(pred_block);
- auto pred_op = pred_bound->range_->CreateAssignedOperand(code_zone());
- phi->inputs()[pred_index] = pred_op;
- ResolveControlFlow(block, phi_output, pred_block, pred_op);
- pred_index++;
- }
- }
- }
- auto live = live_in_sets_[block->rpo_number().ToInt()];
+ BitVector* live = live_in_sets[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live);
while (!iterator.Done()) {
- auto* array = finder.ArrayFor(iterator.Current());
- for (auto pred : block->predecessors()) {
+ LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current());
+ for (const RpoNumber& pred : block->predecessors()) {
FindResult result;
- const auto* pred_block = code()->InstructionBlockAt(pred);
- array->Find(block, pred_block, &result);
- if (result.cur_cover_ == result.pred_cover_ ||
- result.cur_cover_->IsSpilled())
+ const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
+ if (!array->FindConnectableSubranges(block, pred_block, &result)) {
continue;
- auto pred_op = result.pred_cover_->CreateAssignedOperand(code_zone());
- auto cur_op = result.cur_cover_->CreateAssignedOperand(code_zone());
- ResolveControlFlow(block, cur_op, pred_block, pred_op);
+ }
+ InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
+ InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
+ if (pred_op.Equals(cur_op)) continue;
+ int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
+ USE(move_loc);
+ DCHECK_IMPLIES(
+ result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
+ !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
+ code()->GetInstructionBlock(move_loc)->IsDeferred());
}
iterator.Advance();
}
@@ -1635,914 +3396,113 @@
}
-void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block,
- InstructionOperand* cur_op,
+int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
+ const InstructionOperand& cur_op,
const InstructionBlock* pred,
- InstructionOperand* pred_op) {
- if (pred_op->Equals(cur_op)) return;
+ const InstructionOperand& pred_op) {
+ DCHECK(!pred_op.Equals(cur_op));
int gap_index;
- GapInstruction::InnerPosition position;
+ Instruction::GapPosition position;
if (block->PredecessorCount() == 1) {
gap_index = block->first_instruction_index();
- position = GapInstruction::START;
+ position = Instruction::START;
} else {
DCHECK(pred->SuccessorCount() == 1);
- DCHECK(!InstructionAt(pred->last_instruction_index())->HasPointerMap());
- gap_index = pred->last_instruction_index() - 1;
- position = GapInstruction::END;
+ DCHECK(!code()
+ ->InstructionAt(pred->last_instruction_index())
+ ->HasReferenceMap());
+ gap_index = pred->last_instruction_index();
+ position = Instruction::END;
}
- AddGapMove(gap_index, position, pred_op, cur_op);
+ data()->AddGapMove(gap_index, position, pred_op, cur_op);
+ return gap_index;
}
-void RegisterAllocator::BuildLiveRanges() {
- // Process the blocks in reverse order.
- for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
- --block_id) {
- auto block =
- code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(block_id));
- auto live = ComputeLiveOut(block);
- // Initially consider all live_out values live for the entire block. We
- // will shorten these intervals if necessary.
- AddInitialIntervals(block, live);
-
- // Process the instructions in reverse order, generating and killing
- // live values.
- ProcessInstructions(block, live);
- // All phi output operands are killed by this block.
- for (auto phi : block->phis()) {
- // The live range interval already ends at the first instruction of the
- // block.
- int phi_vreg = phi->virtual_register();
- live->Remove(phi_vreg);
- if (!FLAG_turbo_delay_ssa_decon) {
- InstructionOperand* hint = nullptr;
- InstructionOperand* phi_operand = nullptr;
- auto gap =
- GetLastGap(code()->InstructionBlockAt(block->predecessors()[0]));
- auto move =
- gap->GetOrCreateParallelMove(GapInstruction::END, code_zone());
- for (int j = 0; j < move->move_operands()->length(); ++j) {
- auto to = move->move_operands()->at(j).destination();
- if (to->IsUnallocated() &&
- UnallocatedOperand::cast(to)->virtual_register() == phi_vreg) {
- hint = move->move_operands()->at(j).source();
- phi_operand = to;
- break;
- }
- }
- DCHECK(hint != nullptr);
- auto block_start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- Define(block_start, phi_operand, hint);
- }
- }
-
- // Now live is live_in for this block except not including values live
- // out on backward successor edges.
- live_in_sets_[block_id] = live;
-
- if (block->IsLoopHeader()) {
- // Add a live range stretching from the first loop instruction to the last
- // for each value live on entry to the header.
- BitVector::Iterator iterator(live);
- auto start = LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
- auto end = LifetimePosition::FromInstructionIndex(
- code()->LastLoopInstructionIndex(block)).NextInstruction();
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- auto range = LiveRangeFor(operand_index);
- range->EnsureInterval(start, end, local_zone());
- iterator.Advance();
- }
- // Insert all values into the live in sets of all blocks in the loop.
- for (int i = block->rpo_number().ToInt() + 1;
- i < block->loop_end().ToInt(); ++i) {
- live_in_sets_[i]->Union(*live);
- }
- }
- }
-
- for (auto range : live_ranges()) {
- if (range == nullptr) continue;
- range->kind_ = RequiredRegisterKind(range->id());
- // TODO(bmeurer): This is a horrible hack to make sure that for constant
- // live ranges, every use requires the constant to be in a register.
- // Without this hack, all uses with "any" policy would get the constant
- // operand assigned.
- if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
- for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next_) {
- pos->register_beneficial_ = true;
- // TODO(dcarney): should the else case assert requires_reg_ == false?
- // Can't mark phis as needing a register.
- if (!code()
- ->InstructionAt(pos->pos().InstructionIndex())
- ->IsGapMoves()) {
- pos->requires_reg_ = true;
- }
- }
- }
- }
-}
-
-
-bool RegisterAllocator::ExistsUseWithoutDefinition() {
- bool found = false;
- BitVector::Iterator iterator(live_in_sets_[0]);
- while (!iterator.Done()) {
- found = true;
- int operand_index = iterator.Current();
- PrintF("Register allocator error: live v%d reached first block.\n",
- operand_index);
- LiveRange* range = LiveRangeFor(operand_index);
- PrintF(" (first use is at %d)\n", range->first_pos()->pos().Value());
- if (debug_name() == nullptr) {
- PrintF("\n");
- } else {
- PrintF(" (function: %s)\n", debug_name());
- }
- iterator.Advance();
- }
- return found;
-}
-
-
-bool RegisterAllocator::SafePointsAreInOrder() const {
- int safe_point = 0;
- for (auto map : *code()->pointer_maps()) {
- if (safe_point > map->instruction_position()) return false;
- safe_point = map->instruction_position();
- }
- return true;
-}
-
-
-void RegisterAllocator::PopulatePointerMaps() {
- DCHECK(SafePointsAreInOrder());
-
- // Iterate over all safe point positions and record a pointer
- // for all spilled live ranges at this point.
- int last_range_start = 0;
- auto pointer_maps = code()->pointer_maps();
- PointerMapDeque::const_iterator first_it = pointer_maps->begin();
- for (LiveRange* range : live_ranges()) {
- if (range == nullptr) continue;
- // Iterate over the first parts of multi-part live ranges.
- if (range->IsChild()) continue;
- // Skip non-reference values.
- if (!HasTaggedValue(range->id())) continue;
- // Skip empty live ranges.
- if (range->IsEmpty()) continue;
-
- // Find the extent of the range and its children.
- int start = range->Start().InstructionIndex();
- int end = 0;
- for (auto cur = range; cur != nullptr; cur = cur->next()) {
- auto this_end = cur->End();
- if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
- DCHECK(cur->Start().InstructionIndex() >= start);
- }
-
- // Most of the ranges are in order, but not all. Keep an eye on when they
- // step backwards and reset the first_it so we don't miss any safe points.
- if (start < last_range_start) first_it = pointer_maps->begin();
- last_range_start = start;
-
- // Step across all the safe points that are before the start of this range,
- // recording how far we step in order to save doing this for the next range.
- for (; first_it != pointer_maps->end(); ++first_it) {
- auto map = *first_it;
- if (map->instruction_position() >= start) break;
- }
-
- // Step through the safe points to see whether they are in the range.
- for (auto it = first_it; it != pointer_maps->end(); ++it) {
- auto map = *it;
- int safe_point = map->instruction_position();
-
- // The safe points are sorted so we can stop searching here.
- if (safe_point - 1 > end) break;
-
- // Advance to the next active range that covers the current
- // safe point position.
- auto safe_point_pos = LifetimePosition::FromInstructionIndex(safe_point);
- auto cur = range;
- while (cur != nullptr && !cur->Covers(safe_point_pos)) {
- cur = cur->next();
- }
- if (cur == nullptr) continue;
-
- // Check if the live range is spilled and the safe point is after
- // the spill position.
- if (range->HasSpillOperand() &&
- safe_point >= range->spill_start_index() &&
- !range->GetSpillOperand()->IsConstant()) {
- TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
- range->id(), range->spill_start_index(), safe_point);
- map->RecordPointer(range->GetSpillOperand(), code_zone());
- }
-
- if (!cur->IsSpilled()) {
- TraceAlloc(
- "Pointer in register for range %d (start at %d) "
- "at safe point %d\n",
- cur->id(), cur->Start().Value(), safe_point);
- InstructionOperand* operand = cur->CreateAssignedOperand(code_zone());
- DCHECK(!operand->IsStackSlot());
- map->RecordPointer(operand, code_zone());
- }
- }
- }
-}
-
-
-void RegisterAllocator::AllocateGeneralRegisters() {
- num_registers_ = config()->num_general_registers();
- mode_ = GENERAL_REGISTERS;
- AllocateRegisters();
-}
-
-
-void RegisterAllocator::AllocateDoubleRegisters() {
- num_registers_ = config()->num_aliased_double_registers();
- mode_ = DOUBLE_REGISTERS;
- AllocateRegisters();
-}
-
-
-void RegisterAllocator::AllocateRegisters() {
- DCHECK(unhandled_live_ranges().empty());
-
- for (auto range : live_ranges()) {
- if (range == nullptr) continue;
- if (range->Kind() == mode_) {
- AddToUnhandledUnsorted(range);
- }
- }
- SortUnhandled();
- DCHECK(UnhandledIsSorted());
-
- DCHECK(reusable_slots().empty());
- DCHECK(active_live_ranges().empty());
- DCHECK(inactive_live_ranges().empty());
-
- if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
- auto current = fixed_double_live_ranges()[i];
- if (current != nullptr) {
- AddToInactive(current);
- }
- }
- } else {
- DCHECK(mode_ == GENERAL_REGISTERS);
- for (auto current : fixed_live_ranges()) {
- if (current != nullptr) {
- AddToInactive(current);
- }
- }
- }
-
- while (!unhandled_live_ranges().empty()) {
- DCHECK(UnhandledIsSorted());
- auto current = unhandled_live_ranges().back();
- unhandled_live_ranges().pop_back();
- DCHECK(UnhandledIsSorted());
- auto position = current->Start();
-#ifdef DEBUG
- allocation_finger_ = position;
-#endif
- TraceAlloc("Processing interval %d start=%d\n", current->id(),
- position.Value());
-
- if (!current->HasNoSpillType()) {
- TraceAlloc("Live range %d already has a spill operand\n", current->id());
- auto next_pos = position;
- if (code()->IsGapAt(next_pos.InstructionIndex())) {
- next_pos = next_pos.NextInstruction();
- }
- auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
- // If the range already has a spill operand and it doesn't need a
- // register immediately, split it and spill the first part of the range.
- if (pos == nullptr) {
- Spill(current);
- continue;
- } else if (pos->pos().Value() >
- current->Start().NextInstruction().Value()) {
- // Do not spill live range eagerly if use position that can benefit from
- // the register is too close to the start of live range.
- SpillBetween(current, current->Start(), pos->pos());
- if (!AllocationOk()) return;
- DCHECK(UnhandledIsSorted());
+void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
+ DelayedInsertionMap delayed_insertion_map(local_zone);
+ for (TopLevelLiveRange* top_range : data()->live_ranges()) {
+ if (top_range == nullptr) continue;
+ bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks();
+ LiveRange* first_range = top_range;
+ for (LiveRange *second_range = first_range->next(); second_range != nullptr;
+ first_range = second_range, second_range = second_range->next()) {
+ LifetimePosition pos = second_range->Start();
+ // Add gap move if the two live ranges touch and there is no block
+ // boundary.
+ if (!connect_spilled && second_range->spilled()) continue;
+ if (first_range->End() != pos) continue;
+ if (data()->IsBlockBoundary(pos) &&
+ !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
continue;
}
- }
-
- if (FLAG_turbo_reuse_spill_slots) {
- if (TryReuseSpillForPhi(current)) {
- continue;
- }
- if (!AllocationOk()) return;
- }
-
- for (size_t i = 0; i < active_live_ranges().size(); ++i) {
- auto cur_active = active_live_ranges()[i];
- if (cur_active->End().Value() <= position.Value()) {
- ActiveToHandled(cur_active);
- --i; // The live range was removed from the list of active live ranges.
- } else if (!cur_active->Covers(position)) {
- ActiveToInactive(cur_active);
- --i; // The live range was removed from the list of active live ranges.
- }
- }
-
- for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
- auto cur_inactive = inactive_live_ranges()[i];
- if (cur_inactive->End().Value() <= position.Value()) {
- InactiveToHandled(cur_inactive);
- --i; // Live range was removed from the list of inactive live ranges.
- } else if (cur_inactive->Covers(position)) {
- InactiveToActive(cur_inactive);
- --i; // Live range was removed from the list of inactive live ranges.
- }
- }
-
- DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
-
- bool result = TryAllocateFreeReg(current);
- if (!AllocationOk()) return;
-
- if (!result) AllocateBlockedReg(current);
- if (!AllocationOk()) return;
-
- if (current->HasRegisterAssigned()) {
- AddToActive(current);
- }
- }
-
- reusable_slots().clear();
- active_live_ranges().clear();
- inactive_live_ranges().clear();
-}
-
-
-const char* RegisterAllocator::RegisterName(int allocation_index) {
- if (mode_ == GENERAL_REGISTERS) {
- return config()->general_register_name(allocation_index);
- } else {
- return config()->double_register_name(allocation_index);
- }
-}
-
-
-bool RegisterAllocator::HasTaggedValue(int virtual_register) const {
- return code()->IsReference(virtual_register);
-}
-
-
-RegisterKind RegisterAllocator::RequiredRegisterKind(
- int virtual_register) const {
- return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
- : GENERAL_REGISTERS;
-}
-
-
-void RegisterAllocator::AddToActive(LiveRange* range) {
- TraceAlloc("Add live range %d to active\n", range->id());
- active_live_ranges().push_back(range);
-}
-
-
-void RegisterAllocator::AddToInactive(LiveRange* range) {
- TraceAlloc("Add live range %d to inactive\n", range->id());
- inactive_live_ranges().push_back(range);
-}
-
-
-void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
- if (range == nullptr || range->IsEmpty()) return;
- DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
- DCHECK(allocation_finger_.Value() <= range->Start().Value());
- for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
- --i) {
- auto cur_range = unhandled_live_ranges().at(i);
- if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
- TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
- auto it = unhandled_live_ranges().begin() + (i + 1);
- unhandled_live_ranges().insert(it, range);
- DCHECK(UnhandledIsSorted());
- return;
- }
- TraceAlloc("Add live range %d to unhandled at start\n", range->id());
- unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
- DCHECK(UnhandledIsSorted());
-}
-
-
-void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
- if (range == nullptr || range->IsEmpty()) return;
- DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
- TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
- unhandled_live_ranges().push_back(range);
-}
-
-
-static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
- DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
- if (a->ShouldBeAllocatedBefore(b)) return false;
- if (b->ShouldBeAllocatedBefore(a)) return true;
- return a->id() < b->id();
-}
-
-
-// Sort the unhandled live ranges so that the ranges to be processed first are
-// at the end of the array list. This is convenient for the register allocation
-// algorithm because it is efficient to remove elements from the end.
-void RegisterAllocator::SortUnhandled() {
- TraceAlloc("Sort unhandled\n");
- std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
- &UnhandledSortHelper);
-}
-
-
-bool RegisterAllocator::UnhandledIsSorted() {
- size_t len = unhandled_live_ranges().size();
- for (size_t i = 1; i < len; i++) {
- auto a = unhandled_live_ranges().at(i - 1);
- auto b = unhandled_live_ranges().at(i);
- if (a->Start().Value() < b->Start().Value()) return false;
- }
- return true;
-}
-
-
-void RegisterAllocator::FreeSpillSlot(LiveRange* range) {
- DCHECK(!FLAG_turbo_reuse_spill_slots);
- // Check that we are the last range.
- if (range->next() != nullptr) return;
- if (!range->TopLevel()->HasSpillOperand()) return;
- auto spill_operand = range->TopLevel()->GetSpillOperand();
- if (spill_operand->IsConstant()) return;
- if (spill_operand->index() >= 0) {
- reusable_slots().push_back(range);
- }
-}
-
-
-InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) {
- DCHECK(!FLAG_turbo_reuse_spill_slots);
- if (reusable_slots().empty()) return nullptr;
- if (reusable_slots().front()->End().Value() >
- range->TopLevel()->Start().Value()) {
- return nullptr;
- }
- auto result = reusable_slots().front()->TopLevel()->GetSpillOperand();
- reusable_slots().erase(reusable_slots().begin());
- return result;
-}
-
-
-void RegisterAllocator::ActiveToHandled(LiveRange* range) {
- RemoveElement(&active_live_ranges(), range);
- TraceAlloc("Moving live range %d from active to handled\n", range->id());
- if (!FLAG_turbo_reuse_spill_slots) FreeSpillSlot(range);
-}
-
-
-void RegisterAllocator::ActiveToInactive(LiveRange* range) {
- RemoveElement(&active_live_ranges(), range);
- inactive_live_ranges().push_back(range);
- TraceAlloc("Moving live range %d from active to inactive\n", range->id());
-}
-
-
-void RegisterAllocator::InactiveToHandled(LiveRange* range) {
- RemoveElement(&inactive_live_ranges(), range);
- TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
- if (!FLAG_turbo_reuse_spill_slots) FreeSpillSlot(range);
-}
-
-
-void RegisterAllocator::InactiveToActive(LiveRange* range) {
- RemoveElement(&inactive_live_ranges(), range);
- active_live_ranges().push_back(range);
- TraceAlloc("Moving live range %d from inactive to active\n", range->id());
-}
-
-
-bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];
-
- for (int i = 0; i < num_registers_; i++) {
- free_until_pos[i] = LifetimePosition::MaxPosition();
- }
-
- for (auto cur_active : active_live_ranges()) {
- free_until_pos[cur_active->assigned_register()] =
- LifetimePosition::FromInstructionIndex(0);
- }
-
- for (auto cur_inactive : inactive_live_ranges()) {
- DCHECK(cur_inactive->End().Value() > current->Start().Value());
- auto next_intersection = cur_inactive->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
- int cur_reg = cur_inactive->assigned_register();
- free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
- }
-
- auto hint = current->FirstHint();
- if (hint != nullptr && (hint->IsRegister() || hint->IsDoubleRegister())) {
- int register_index = hint->index();
- TraceAlloc(
- "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
- RegisterName(register_index), free_until_pos[register_index].Value(),
- current->id(), current->End().Value());
-
- // The desired register is free until the end of the current live range.
- if (free_until_pos[register_index].Value() >= current->End().Value()) {
- TraceAlloc("Assigning preferred reg %s to live range %d\n",
- RegisterName(register_index), current->id());
- SetLiveRangeAssignedRegister(current, register_index);
- return true;
- }
- }
-
- // Find the register which stays free for the longest time.
- int reg = 0;
- for (int i = 1; i < RegisterCount(); ++i) {
- if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
- reg = i;
- }
- }
-
- auto pos = free_until_pos[reg];
-
- if (pos.Value() <= current->Start().Value()) {
- // All registers are blocked.
- return false;
- }
-
- if (pos.Value() < current->End().Value()) {
- // Register reg is available at the range start but becomes blocked before
- // the range end. Split current at position where it becomes blocked.
- auto tail = SplitRangeAt(current, pos);
- if (!AllocationOk()) return false;
- AddToUnhandledSorted(tail);
- }
-
- // Register reg is available at the range start and is free until
- // the range end.
- DCHECK(pos.Value() >= current->End().Value());
- TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg),
- current->id());
- SetLiveRangeAssignedRegister(current, reg);
-
- return true;
-}
-
-
-void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
- auto register_use = current->NextRegisterPosition(current->Start());
- if (register_use == nullptr) {
- // There is no use in the current live range that requires a register.
- // We can just spill it.
- Spill(current);
- return;
- }
-
- LifetimePosition use_pos[RegisterConfiguration::kMaxDoubleRegisters];
- LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];
-
- for (int i = 0; i < num_registers_; i++) {
- use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
- }
-
- for (auto range : active_live_ranges()) {
- int cur_reg = range->assigned_register();
- if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
- block_pos[cur_reg] = use_pos[cur_reg] =
- LifetimePosition::FromInstructionIndex(0);
- } else {
- auto next_use =
- range->NextUsePositionRegisterIsBeneficial(current->Start());
- if (next_use == nullptr) {
- use_pos[cur_reg] = range->End();
+ InstructionOperand prev_operand = first_range->GetAssignedOperand();
+ InstructionOperand cur_operand = second_range->GetAssignedOperand();
+ if (prev_operand.Equals(cur_operand)) continue;
+ bool delay_insertion = false;
+ Instruction::GapPosition gap_pos;
+ int gap_index = pos.ToInstructionIndex();
+ if (pos.IsGapPosition()) {
+ gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
} else {
- use_pos[cur_reg] = next_use->pos();
- }
- }
- }
-
- for (auto range : inactive_live_ranges()) {
- DCHECK(range->End().Value() > current->Start().Value());
- auto next_intersection = range->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
- int cur_reg = range->assigned_register();
- if (range->IsFixed()) {
- block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
- use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
- } else {
- use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
- }
- }
-
- int reg = 0;
- for (int i = 1; i < RegisterCount(); ++i) {
- if (use_pos[i].Value() > use_pos[reg].Value()) {
- reg = i;
- }
- }
-
- auto pos = use_pos[reg];
-
- if (pos.Value() < register_use->pos().Value()) {
- // All registers are blocked before the first use that requires a register.
- // Spill starting part of live range up to that use.
- SpillBetween(current, current->Start(), register_use->pos());
- return;
- }
-
- if (block_pos[reg].Value() < current->End().Value()) {
- // Register becomes blocked before the current range end. Split before that
- // position.
- LiveRange* tail = SplitBetween(current, current->Start(),
- block_pos[reg].InstructionStart());
- if (!AllocationOk()) return;
- AddToUnhandledSorted(tail);
- }
-
- // Register reg is not blocked for the whole range.
- DCHECK(block_pos[reg].Value() >= current->End().Value());
- TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
- current->id());
- SetLiveRangeAssignedRegister(current, reg);
-
- // This register was not free. Thus we need to find and spill
- // parts of active and inactive live regions that use the same register
- // at the same lifetime positions as current.
- SplitAndSpillIntersecting(current);
-}
-
-
-static const InstructionBlock* GetContainingLoop(
- const InstructionSequence* sequence, const InstructionBlock* block) {
- auto index = block->loop_header();
- if (!index.IsValid()) return nullptr;
- return sequence->InstructionBlockAt(index);
-}
-
-
-LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
- LiveRange* range, LifetimePosition pos) {
- auto block = GetInstructionBlock(pos.InstructionStart());
- auto loop_header =
- block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
-
- if (loop_header == nullptr) return pos;
-
- auto prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
-
- while (loop_header != nullptr) {
- // We are going to spill live range inside the loop.
- // If possible try to move spilling position backwards to loop header.
- // This will reduce number of memory moves on the back edge.
- auto loop_start = LifetimePosition::FromInstructionIndex(
- loop_header->first_instruction_index());
-
- if (range->Covers(loop_start)) {
- if (prev_use == nullptr || prev_use->pos().Value() < loop_start.Value()) {
- // No register beneficial use inside the loop before the pos.
- pos = loop_start;
- }
- }
-
- // Try hoisting out to an outer loop.
- loop_header = GetContainingLoop(code(), loop_header);
- }
-
- return pos;
-}
-
-
-void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
- DCHECK(current->HasRegisterAssigned());
- int reg = current->assigned_register();
- auto split_pos = current->Start();
- for (size_t i = 0; i < active_live_ranges().size(); ++i) {
- auto range = active_live_ranges()[i];
- if (range->assigned_register() == reg) {
- auto next_pos = range->NextRegisterPosition(current->Start());
- auto spill_pos = FindOptimalSpillingPos(range, split_pos);
- if (next_pos == nullptr) {
- SpillAfter(range, spill_pos);
- } else {
- // When spilling between spill_pos and next_pos ensure that the range
- // remains spilled at least until the start of the current live range.
- // This guarantees that we will not introduce new unhandled ranges that
- // start before the current range as this violates allocation invariant
- // and will lead to an inconsistent state of active and inactive
- // live-ranges: ranges are allocated in order of their start positions,
- // ranges are retired from active/inactive when the start of the
- // current live-range is larger than their end.
- SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
- }
- if (!AllocationOk()) return;
- ActiveToHandled(range);
- --i;
- }
- }
-
- for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
- auto range = inactive_live_ranges()[i];
- DCHECK(range->End().Value() > current->Start().Value());
- if (range->assigned_register() == reg && !range->IsFixed()) {
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (next_intersection.IsValid()) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- if (next_pos == nullptr) {
- SpillAfter(range, split_pos);
+ if (pos.IsStart()) {
+ delay_insertion = true;
} else {
- next_intersection = Min(next_intersection, next_pos->pos());
- SpillBetween(range, split_pos, next_intersection);
+ gap_index++;
}
- if (!AllocationOk()) return;
- InactiveToHandled(range);
- --i;
+ gap_pos = delay_insertion ? Instruction::END : Instruction::START;
+ }
+ // Fills or spills for spilled in deferred blocks ranges must happen
+ // only in deferred blocks.
+ DCHECK_IMPLIES(
+ connect_spilled &&
+ !(prev_operand.IsAnyRegister() && cur_operand.IsAnyRegister()),
+ code()->GetInstructionBlock(gap_index)->IsDeferred());
+
+ ParallelMove* move =
+ code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
+ gap_pos, code_zone());
+ if (!delay_insertion) {
+ move->AddMove(prev_operand, cur_operand);
+ } else {
+ delayed_insertion_map.insert(
+ std::make_pair(std::make_pair(move, prev_operand), cur_operand));
}
}
}
-}
-
-
-bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
- return pos.IsInstructionStart() &&
- InstructionAt(pos.InstructionIndex())->IsBlockStart();
-}
-
-
-LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
- LifetimePosition pos) {
- DCHECK(!range->IsFixed());
- TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
-
- if (pos.Value() <= range->Start().Value()) return range;
-
- // We can't properly connect liveranges if split occured at the end
- // of control instruction.
- DCHECK(pos.IsInstructionStart() ||
- !InstructionAt(pos.InstructionIndex())->IsControl());
-
- int vreg = GetVirtualRegister();
- if (!AllocationOk()) return nullptr;
- auto result = LiveRangeFor(vreg);
- range->SplitAt(pos, result, local_zone());
- return result;
-}
-
-
-LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
- DCHECK(!range->IsFixed());
- TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
- range->id(), start.Value(), end.Value());
-
- auto split_pos = FindOptimalSplitPos(start, end);
- DCHECK(split_pos.Value() >= start.Value());
- return SplitRangeAt(range, split_pos);
-}
-
-
-LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
- LifetimePosition end) {
- int start_instr = start.InstructionIndex();
- int end_instr = end.InstructionIndex();
- DCHECK(start_instr <= end_instr);
-
- // We have no choice
- if (start_instr == end_instr) return end;
-
- auto start_block = GetInstructionBlock(start);
- auto end_block = GetInstructionBlock(end);
-
- if (end_block == start_block) {
- // The interval is split in the same basic block. Split at the latest
- // possible position.
- return end;
- }
-
- auto block = end_block;
- // Find header of outermost loop.
- // TODO(titzer): fix redundancy below.
- while (GetContainingLoop(code(), block) != nullptr &&
- GetContainingLoop(code(), block)->rpo_number().ToInt() >
- start_block->rpo_number().ToInt()) {
- block = GetContainingLoop(code(), block);
- }
-
- // We did not find any suitable outer loop. Split at the latest possible
- // position unless end_block is a loop header itself.
- if (block == end_block && !end_block->IsLoopHeader()) return end;
-
- return LifetimePosition::FromInstructionIndex(
- block->first_instruction_index());
-}
-
-
-void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
- auto second_part = SplitRangeAt(range, pos);
- if (!AllocationOk()) return;
- Spill(second_part);
-}
-
-
-void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
- LifetimePosition end) {
- SpillBetweenUntil(range, start, start, end);
-}
-
-
-void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
- LifetimePosition start,
- LifetimePosition until,
- LifetimePosition end) {
- CHECK(start.Value() < end.Value());
- auto second_part = SplitRangeAt(range, start);
- if (!AllocationOk()) return;
-
- if (second_part->Start().Value() < end.Value()) {
- // The split result intersects with [start, end[.
- // Split it at position between ]start+1, end[, spill the middle part
- // and put the rest to unhandled.
- auto third_part = SplitBetween(
- second_part, Max(second_part->Start().InstructionEnd(), until),
- end.PrevInstruction().InstructionEnd());
- if (!AllocationOk()) return;
-
- DCHECK(third_part != second_part);
-
- Spill(second_part);
- AddToUnhandledSorted(third_part);
- } else {
- // The split result does not intersect with [start, end[.
- // Nothing to spill. Just put it to unhandled as whole.
- AddToUnhandledSorted(second_part);
- }
-}
-
-
-void RegisterAllocator::Spill(LiveRange* range) {
- DCHECK(!range->IsSpilled());
- TraceAlloc("Spilling live range %d\n", range->id());
- auto first = range->TopLevel();
- if (first->HasNoSpillType()) {
- if (FLAG_turbo_reuse_spill_slots) {
- AssignSpillRangeToLiveRange(first);
- } else {
- auto op = TryReuseSpillSlot(range);
- if (op == nullptr) {
- // Allocate a new operand referring to the spill slot.
- RegisterKind kind = range->Kind();
- int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
- auto op_kind = kind == DOUBLE_REGISTERS
- ? InstructionOperand::DOUBLE_STACK_SLOT
- : InstructionOperand::STACK_SLOT;
- op = new (code_zone()) InstructionOperand(op_kind, index);
+ if (delayed_insertion_map.empty()) return;
+ // Insert all the moves which should occur after the stored move.
+ ZoneVector<MoveOperands*> to_insert(local_zone);
+ ZoneVector<MoveOperands*> to_eliminate(local_zone);
+ to_insert.reserve(4);
+ to_eliminate.reserve(4);
+ ParallelMove* moves = delayed_insertion_map.begin()->first.first;
+ for (auto it = delayed_insertion_map.begin();; ++it) {
+ bool done = it == delayed_insertion_map.end();
+ if (done || it->first.first != moves) {
+ // Commit the MoveOperands for current ParallelMove.
+ for (MoveOperands* move : to_eliminate) {
+ move->Eliminate();
}
- first->SetSpillOperand(op);
+ for (MoveOperands* move : to_insert) {
+ moves->push_back(move);
+ }
+ if (done) break;
+ // Reset state.
+ to_eliminate.clear();
+ to_insert.clear();
+ moves = it->first.first;
}
- }
- range->MakeSpilled();
-}
-
-
-int RegisterAllocator::RegisterCount() const { return num_registers_; }
-
-
-#ifdef DEBUG
-
-
-void RegisterAllocator::Verify() const {
- for (auto current : live_ranges()) {
- if (current != nullptr) current->Verify();
+ // Gather all MoveOperands for a single ParallelMove.
+ MoveOperands* move =
+ new (code_zone()) MoveOperands(it->first.second, it->second);
+ MoveOperands* eliminate = moves->PrepareInsertAfter(move);
+ to_insert.push_back(move);
+ if (eliminate != nullptr) to_eliminate.push_back(eliminate);
}
}
-#endif
-
-
-void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
- int reg) {
- if (range->Kind() == DOUBLE_REGISTERS) {
- assigned_double_registers_->Add(reg);
- } else {
- DCHECK(range->Kind() == GENERAL_REGISTERS);
- assigned_registers_->Add(reg);
- }
- range->set_assigned_register(reg, code_zone());
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index b17837b..b96a43c 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -6,6 +6,8 @@
#define V8_REGISTER_ALLOCATOR_H_
#include "src/compiler/instruction.h"
+#include "src/ostreams.h"
+#include "src/register-configuration.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -13,64 +15,92 @@
namespace compiler {
enum RegisterKind {
- UNALLOCATED_REGISTERS,
GENERAL_REGISTERS,
DOUBLE_REGISTERS
};
// This class represents a single point of a InstructionOperand's lifetime. For
-// each instruction there are exactly two lifetime positions: the beginning and
-// the end of the instruction. Lifetime positions for different instructions are
-// disjoint.
-class LifetimePosition FINAL {
+// each instruction there are four lifetime positions:
+//
+// [[START, END], [START, END]]
+//
+// Where the first half position corresponds to
+//
+// [GapPosition::START, GapPosition::END]
+//
+// and the second half position corresponds to
+//
+// [Lifetime::USED_AT_START, Lifetime::USED_AT_END]
+//
+class LifetimePosition final {
public:
// Return the lifetime position that corresponds to the beginning of
- // the instruction with the given index.
- static LifetimePosition FromInstructionIndex(int index) {
+ // the gap with the given index.
+ static LifetimePosition GapFromInstructionIndex(int index) {
return LifetimePosition(index * kStep);
}
+ // Return the lifetime position that corresponds to the beginning of
+ // the instruction with the given index.
+ static LifetimePosition InstructionFromInstructionIndex(int index) {
+ return LifetimePosition(index * kStep + kHalfStep);
+ }
// Returns a numeric representation of this lifetime position.
- int Value() const { return value_; }
+ int value() const { return value_; }
// Returns the index of the instruction to which this lifetime position
// corresponds.
- int InstructionIndex() const {
+ int ToInstructionIndex() const {
DCHECK(IsValid());
return value_ / kStep;
}
- // Returns true if this lifetime position corresponds to the instruction
- // start.
- bool IsInstructionStart() const { return (value_ & (kStep - 1)) == 0; }
+ // Returns true if this lifetime position corresponds to a START value
+ bool IsStart() const { return (value_ & (kHalfStep - 1)) == 0; }
+ // Returns true if this lifetime position corresponds to an END value
+ bool IsEnd() const { return (value_ & (kHalfStep - 1)) == 1; }
+ // Returns true if this lifetime position corresponds to a gap START value
+ bool IsFullStart() const { return (value_ & (kStep - 1)) == 0; }
- // Returns the lifetime position for the start of the instruction which
- // corresponds to this lifetime position.
- LifetimePosition InstructionStart() const {
+ bool IsGapPosition() const { return (value_ & 0x2) == 0; }
+ bool IsInstructionPosition() const { return !IsGapPosition(); }
+
+ // Returns the lifetime position for the current START.
+ LifetimePosition Start() const {
+ DCHECK(IsValid());
+ return LifetimePosition(value_ & ~(kHalfStep - 1));
+ }
+
+ // Returns the lifetime position for the current gap START.
+ LifetimePosition FullStart() const {
DCHECK(IsValid());
return LifetimePosition(value_ & ~(kStep - 1));
}
- // Returns the lifetime position for the end of the instruction which
- // corresponds to this lifetime position.
- LifetimePosition InstructionEnd() const {
+ // Returns the lifetime position for the current END.
+ LifetimePosition End() const {
DCHECK(IsValid());
- return LifetimePosition(InstructionStart().Value() + kStep / 2);
+ return LifetimePosition(Start().value_ + kHalfStep / 2);
}
- // Returns the lifetime position for the beginning of the next instruction.
- LifetimePosition NextInstruction() const {
+ // Returns the lifetime position for the beginning of the next START.
+ LifetimePosition NextStart() const {
DCHECK(IsValid());
- return LifetimePosition(InstructionStart().Value() + kStep);
+ return LifetimePosition(Start().value_ + kHalfStep);
}
- // Returns the lifetime position for the beginning of the previous
- // instruction.
- LifetimePosition PrevInstruction() const {
+ // Returns the lifetime position for the beginning of the next gap START.
+ LifetimePosition NextFullStart() const {
DCHECK(IsValid());
- DCHECK(value_ > 1);
- return LifetimePosition(InstructionStart().Value() - kStep);
+ return LifetimePosition(FullStart().value_ + kStep);
+ }
+
+ // Returns the lifetime position for the beginning of the previous START.
+ LifetimePosition PrevStart() const {
+ DCHECK(IsValid());
+ DCHECK(value_ >= kHalfStep);
+ return LifetimePosition(Start().value_ - kHalfStep);
}
// Constructs the lifetime position which does not correspond to any
@@ -81,6 +111,32 @@
// instruction.
bool IsValid() const { return value_ != -1; }
+ bool operator<(const LifetimePosition& that) const {
+ return this->value_ < that.value_;
+ }
+
+ bool operator<=(const LifetimePosition& that) const {
+ return this->value_ <= that.value_;
+ }
+
+ bool operator==(const LifetimePosition& that) const {
+ return this->value_ == that.value_;
+ }
+
+ bool operator!=(const LifetimePosition& that) const {
+ return this->value_ != that.value_;
+ }
+
+ bool operator>(const LifetimePosition& that) const {
+ return this->value_ > that.value_;
+ }
+
+ bool operator>=(const LifetimePosition& that) const {
+ return this->value_ >= that.value_;
+ }
+
+ void Print() const;
+
static inline LifetimePosition Invalid() { return LifetimePosition(); }
static inline LifetimePosition MaxPosition() {
@@ -89,11 +145,16 @@
return LifetimePosition(kMaxInt);
}
- private:
- static const int kStep = 2;
+ static inline LifetimePosition FromInt(int value) {
+ return LifetimePosition(value);
+ }
- // Code relies on kStep being a power of two.
- STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
+ private:
+ static const int kHalfStep = 2;
+ static const int kStep = 2 * kHalfStep;
+
+ // Code relies on kStep and kHalfStep being a power of two.
+ STATIC_ASSERT(IS_POWER_OF_TWO(kHalfStep));
explicit LifetimePosition(int value) : value_(value) {}
@@ -101,153 +162,227 @@
};
+std::ostream& operator<<(std::ostream& os, const LifetimePosition pos);
+
+
// Representation of the non-empty interval [start,end[.
-class UseInterval FINAL : public ZoneObject {
+class UseInterval final : public ZoneObject {
public:
UseInterval(LifetimePosition start, LifetimePosition end)
: start_(start), end_(end), next_(nullptr) {
- DCHECK(start.Value() < end.Value());
+ DCHECK(start < end);
}
LifetimePosition start() const { return start_; }
+ void set_start(LifetimePosition start) { start_ = start; }
LifetimePosition end() const { return end_; }
+ void set_end(LifetimePosition end) { end_ = end; }
UseInterval* next() const { return next_; }
+ void set_next(UseInterval* next) { next_ = next; }
// Split this interval at the given position without effecting the
// live range that owns it. The interval must contain the position.
- void SplitAt(LifetimePosition pos, Zone* zone);
+ UseInterval* SplitAt(LifetimePosition pos, Zone* zone);
// If this interval intersects with other return smallest position
// that belongs to both of them.
LifetimePosition Intersect(const UseInterval* other) const {
- if (other->start().Value() < start_.Value()) return other->Intersect(this);
- if (other->start().Value() < end_.Value()) return other->start();
+ if (other->start() < start_) return other->Intersect(this);
+ if (other->start() < end_) return other->start();
return LifetimePosition::Invalid();
}
bool Contains(LifetimePosition point) const {
- return start_.Value() <= point.Value() && point.Value() < end_.Value();
+ return start_ <= point && point < end_;
}
- void set_start(LifetimePosition start) { start_ = start; }
- void set_next(UseInterval* next) { next_ = next; }
+ // Returns the index of the first gap covered by this interval.
+ int FirstGapIndex() const {
+ int ret = start_.ToInstructionIndex();
+ if (start_.IsInstructionPosition()) {
+ ++ret;
+ }
+ return ret;
+ }
+ // Returns the index of the last gap covered by this interval.
+ int LastGapIndex() const {
+ int ret = end_.ToInstructionIndex();
+ if (end_.IsGapPosition() && end_.IsStart()) {
+ --ret;
+ }
+ return ret;
+ }
+
+ private:
LifetimePosition start_;
LifetimePosition end_;
UseInterval* next_;
- private:
DISALLOW_COPY_AND_ASSIGN(UseInterval);
};
+enum class UsePositionType : uint8_t { kAny, kRequiresRegister, kRequiresSlot };
+
+
+enum class UsePositionHintType : uint8_t {
+ kNone,
+ kOperand,
+ kUsePos,
+ kPhi,
+ kUnresolved
+};
+
+
+static const int32_t kUnassignedRegister =
+ RegisterConfiguration::kMaxGeneralRegisters;
+
+
+static_assert(kUnassignedRegister <= RegisterConfiguration::kMaxDoubleRegisters,
+ "kUnassignedRegister too small");
+
+
// Representation of a use position.
-class UsePosition FINAL : public ZoneObject {
+class UsePosition final : public ZoneObject {
public:
- UsePosition(LifetimePosition pos, InstructionOperand* operand,
- InstructionOperand* hint);
+ UsePosition(LifetimePosition pos, InstructionOperand* operand, void* hint,
+ UsePositionHintType hint_type);
InstructionOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != nullptr; }
- InstructionOperand* hint() const { return hint_; }
- bool HasHint() const;
- bool RequiresRegister() const;
- bool RegisterIsBeneficial() const;
+ bool RegisterIsBeneficial() const {
+ return RegisterBeneficialField::decode(flags_);
+ }
+ UsePositionType type() const { return TypeField::decode(flags_); }
+ void set_type(UsePositionType type, bool register_beneficial);
LifetimePosition pos() const { return pos_; }
- UsePosition* next() const { return next_; }
+ UsePosition* next() const { return next_; }
void set_next(UsePosition* next) { next_ = next; }
- InstructionOperand* const operand_;
- InstructionOperand* const hint_;
- LifetimePosition const pos_;
- UsePosition* next_;
- bool requires_reg_ : 1;
- bool register_beneficial_ : 1;
+ // For hinting only.
+ void set_assigned_register(int register_code) {
+ flags_ = AssignedRegisterField::update(flags_, register_code);
+ }
+
+ UsePositionHintType hint_type() const {
+ return HintTypeField::decode(flags_);
+ }
+ bool HasHint() const;
+ bool HintRegister(int* register_code) const;
+ void ResolveHint(UsePosition* use_pos);
+ bool IsResolved() const {
+ return hint_type() != UsePositionHintType::kUnresolved;
+ }
+ static UsePositionHintType HintTypeForOperand(const InstructionOperand& op);
private:
+ typedef BitField<UsePositionType, 0, 2> TypeField;
+ typedef BitField<UsePositionHintType, 2, 3> HintTypeField;
+ typedef BitField<bool, 5, 1> RegisterBeneficialField;
+ typedef BitField<int32_t, 6, 6> AssignedRegisterField;
+
+ InstructionOperand* const operand_;
+ void* hint_;
+ UsePosition* next_;
+ LifetimePosition const pos_;
+ uint32_t flags_;
+
DISALLOW_COPY_AND_ASSIGN(UsePosition);
};
+
class SpillRange;
+class RegisterAllocationData;
+class TopLevelLiveRange;
+class LiveRangeGroup;
// Representation of SSA values' live ranges as a collection of (continuous)
// intervals over the instruction ordering.
-class LiveRange FINAL : public ZoneObject {
+class LiveRange : public ZoneObject {
public:
- static const int kInvalidAssignment = 0x7fffffff;
-
- LiveRange(int id, Zone* zone);
-
UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; }
- LiveRange* parent() const { return parent_; }
- LiveRange* TopLevel() { return (parent_ == nullptr) ? this : parent_; }
- const LiveRange* TopLevel() const {
- return (parent_ == nullptr) ? this : parent_;
- }
+ TopLevelLiveRange* TopLevel() { return top_level_; }
+ const TopLevelLiveRange* TopLevel() const { return top_level_; }
+
+ bool IsTopLevel() const;
+
LiveRange* next() const { return next_; }
- bool IsChild() const { return parent() != nullptr; }
- int id() const { return id_; }
- bool IsFixed() const { return id_ < 0; }
+
+ int relative_id() const { return relative_id_; }
+
bool IsEmpty() const { return first_interval() == nullptr; }
- InstructionOperand* CreateAssignedOperand(Zone* zone) const;
- int assigned_register() const { return assigned_register_; }
- int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg, Zone* zone);
- void MakeSpilled();
- bool is_phi() const { return is_phi_; }
- void set_is_phi(bool is_phi) { is_phi_ = is_phi; }
- bool is_non_loop_phi() const { return is_non_loop_phi_; }
- void set_is_non_loop_phi(bool is_non_loop_phi) {
- is_non_loop_phi_ = is_non_loop_phi;
+
+ InstructionOperand GetAssignedOperand() const;
+
+ MachineRepresentation representation() const {
+ return RepresentationField::decode(bits_);
}
+ int assigned_register() const { return AssignedRegisterField::decode(bits_); }
+ bool HasRegisterAssigned() const {
+ return assigned_register() != kUnassignedRegister;
+ }
+ void set_assigned_register(int reg);
+ void UnsetAssignedRegister();
+
+ bool spilled() const { return SpilledField::decode(bits_); }
+ void Spill();
+
+ RegisterKind kind() const;
+
// Returns use position in this live range that follows both start
// and last processed use position.
- // Modifies internal state of live range!
- UsePosition* NextUsePosition(LifetimePosition start);
+ UsePosition* NextUsePosition(LifetimePosition start) const;
// Returns use position for which register is required in this live
// range and which follows both start and last processed use position
- // Modifies internal state of live range!
- UsePosition* NextRegisterPosition(LifetimePosition start);
+ UsePosition* NextRegisterPosition(LifetimePosition start) const;
+
+ // Returns the first use position requiring stack slot, or nullptr.
+ UsePosition* NextSlotPosition(LifetimePosition start) const;
// Returns use position for which register is beneficial in this live
// range and which follows both start and last processed use position
- // Modifies internal state of live range!
- UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
+ UsePosition* NextUsePositionRegisterIsBeneficial(
+ LifetimePosition start) const;
// Returns use position for which register is beneficial in this live
// range and which precedes start.
- UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start);
+ UsePosition* PreviousUsePositionRegisterIsBeneficial(
+ LifetimePosition start) const;
// Can this live range be spilled at this position.
- bool CanBeSpilled(LifetimePosition pos);
+ bool CanBeSpilled(LifetimePosition pos) const;
- // Split this live range at the given position which must follow the start of
- // the range.
+ // Splitting primitive used by both splitting and splintering members.
+ // Performs the split, but does not link the resulting ranges.
+ // The given position must follow the start of the range.
// All uses following the given position will be moved from this
// live range to the result live range.
- void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
+ // The current range will terminate at position, while result will start from
+ // position.
+ UsePosition* DetachAt(LifetimePosition position, LiveRange* result,
+ Zone* zone);
- RegisterKind Kind() const { return kind_; }
- bool HasRegisterAssigned() const {
- return assigned_register_ != kInvalidAssignment;
- }
- bool IsSpilled() const { return spilled_; }
+ // Detaches at position, and then links the resulting ranges. Returns the
+ // child, which starts at position.
+ LiveRange* SplitAt(LifetimePosition position, Zone* zone);
- InstructionOperand* current_hint_operand() const {
- DCHECK(current_hint_operand_ == FirstHint());
- return current_hint_operand_;
+ // Returns nullptr when no register is hinted, otherwise sets register_index.
+ UsePosition* FirstHintPosition(int* register_index) const;
+ UsePosition* FirstHintPosition() const {
+ int register_index;
+ return FirstHintPosition(®ister_index);
}
- InstructionOperand* FirstHint() const {
- UsePosition* pos = first_pos_;
- while (pos != nullptr && !pos->HasHint()) pos = pos->next();
- if (pos != nullptr) return pos->hint();
- return nullptr;
+
+ UsePosition* current_hint_position() const {
+ DCHECK(current_hint_position_ == FirstHintPosition());
+ return current_hint_position_;
}
LifetimePosition Start() const {
@@ -260,135 +395,464 @@
return last_interval_->end();
}
- enum class SpillType { kNoSpillType, kSpillOperand, kSpillRange };
- SpillType spill_type() const { return spill_type_; }
- InstructionOperand* GetSpillOperand() const {
- return spill_type_ == SpillType::kSpillOperand ? spill_operand_ : nullptr;
- }
- SpillRange* GetSpillRange() const {
- return spill_type_ == SpillType::kSpillRange ? spill_range_ : nullptr;
- }
- bool HasNoSpillType() const { return spill_type_ == SpillType::kNoSpillType; }
- bool HasSpillOperand() const {
- return spill_type_ == SpillType::kSpillOperand;
- }
- bool HasSpillRange() const { return spill_type_ == SpillType::kSpillRange; }
-
- void SpillAtDefinition(Zone* zone, int gap_index,
- InstructionOperand* operand);
- void SetSpillOperand(InstructionOperand* operand);
- void SetSpillRange(SpillRange* spill_range);
- void CommitSpillOperand(InstructionOperand* operand);
- void CommitSpillsAtDefinition(InstructionSequence* sequence,
- InstructionOperand* operand);
-
- void SetSpillStartIndex(int start) {
- spill_start_index_ = Min(start, spill_start_index_);
- }
-
bool ShouldBeAllocatedBefore(const LiveRange* other) const;
bool CanCover(LifetimePosition position) const;
- bool Covers(LifetimePosition position);
- LifetimePosition FirstIntersection(LiveRange* other);
+ bool Covers(LifetimePosition position) const;
+ LifetimePosition FirstIntersection(LiveRange* other) const;
- // Add a new interval or a new use position to this live range.
- void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
- void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
- void AddUsePosition(LifetimePosition pos, InstructionOperand* operand,
- InstructionOperand* hint, Zone* zone);
+ void VerifyChildStructure() const {
+ VerifyIntervals();
+ VerifyPositions();
+ }
- // Shorten the most recently added interval by setting a new start.
- void ShortenTo(LifetimePosition start);
+ void ConvertUsesToOperand(const InstructionOperand& op,
+ const InstructionOperand& spill_op);
+ void SetUseHints(int register_index);
+ void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
-#ifdef DEBUG
- // True if target overlaps an existing interval.
- bool HasOverlap(UseInterval* target) const;
- void Verify() const;
-#endif
+ // Used solely by the Greedy Allocator:
+ unsigned GetSize();
+ float weight() const { return weight_; }
+ void set_weight(float weight) { weight_ = weight; }
+ LiveRangeGroup* group() const { return group_; }
+ void set_group(LiveRangeGroup* group) { group_ = group; }
+ void Print(const RegisterConfiguration* config, bool with_children) const;
+ void Print(bool with_children) const;
+
+ static const int kInvalidSize = -1;
+ static const float kInvalidWeight;
+ static const float kMaxWeight;
private:
- struct SpillAtDefinitionList;
+ friend class TopLevelLiveRange;
+ explicit LiveRange(int relative_id, MachineRepresentation rep,
+ TopLevelLiveRange* top_level);
- void ConvertUsesToOperand(InstructionOperand* op);
+ void UpdateParentForAllChildren(TopLevelLiveRange* new_top_level);
+
+ void set_spilled(bool value) { bits_ = SpilledField::update(bits_, value); }
+
UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
void AdvanceLastProcessedMarker(UseInterval* to_start_of,
LifetimePosition but_not_past) const;
- // TODO(dcarney): pack this structure better.
- int id_;
- bool spilled_;
- bool is_phi_;
- bool is_non_loop_phi_;
- RegisterKind kind_;
- int assigned_register_;
+ void VerifyPositions() const;
+ void VerifyIntervals() const;
+
+ typedef BitField<bool, 0, 1> SpilledField;
+ typedef BitField<int32_t, 6, 6> AssignedRegisterField;
+ typedef BitField<MachineRepresentation, 12, 8> RepresentationField;
+
+ // Unique among children and splinters of the same virtual register.
+ int relative_id_;
+ uint32_t bits_;
UseInterval* last_interval_;
UseInterval* first_interval_;
UsePosition* first_pos_;
- LiveRange* parent_;
+ TopLevelLiveRange* top_level_;
LiveRange* next_;
// This is used as a cache, it doesn't affect correctness.
mutable UseInterval* current_interval_;
- UsePosition* last_processed_use_;
+ // This is used as a cache, it doesn't affect correctness.
+ mutable UsePosition* last_processed_use_;
// This is used as a cache, it's invalid outside of BuildLiveRanges.
- InstructionOperand* current_hint_operand_;
- int spill_start_index_;
- SpillType spill_type_;
- union {
- InstructionOperand* spill_operand_;
- SpillRange* spill_range_;
- };
- SpillAtDefinitionList* spills_at_definition_;
+ mutable UsePosition* current_hint_position_;
+ // Cache the last position splintering stopped at.
+ mutable UsePosition* splitting_pointer_;
+ // greedy: the number of LifetimePositions covered by this range. Used to
+ // prioritize selecting live ranges for register assignment, as well as
+ // in weight calculations.
+ int size_;
- friend class RegisterAllocator; // Assigns to kind_.
+ // greedy: a metric for resolving conflicts between ranges with an assigned
+ // register and ranges that intersect them and need a register.
+ float weight_;
+
+ // greedy: groupping
+ LiveRangeGroup* group_;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
-class SpillRange FINAL : public ZoneObject {
+class LiveRangeGroup final : public ZoneObject {
public:
- SpillRange(LiveRange* range, Zone* zone);
+ explicit LiveRangeGroup(Zone* zone) : ranges_(zone) {}
+ ZoneVector<LiveRange*>& ranges() { return ranges_; }
+ const ZoneVector<LiveRange*>& ranges() const { return ranges_; }
+
+ // TODO(mtrofin): populate assigned register and use in weight calculation.
+ int assigned_register() const { return assigned_register_; }
+ void set_assigned_register(int reg) { assigned_register_ = reg; }
+
+ private:
+ ZoneVector<LiveRange*> ranges_;
+ int assigned_register_;
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeGroup);
+};
+
+
+class TopLevelLiveRange final : public LiveRange {
+ public:
+ explicit TopLevelLiveRange(int vreg, MachineRepresentation rep);
+ int spill_start_index() const { return spill_start_index_; }
+
+ bool IsFixed() const { return vreg_ < 0; }
+
+ bool is_phi() const { return IsPhiField::decode(bits_); }
+ void set_is_phi(bool value) { bits_ = IsPhiField::update(bits_, value); }
+
+ bool is_non_loop_phi() const { return IsNonLoopPhiField::decode(bits_); }
+ void set_is_non_loop_phi(bool value) {
+ bits_ = IsNonLoopPhiField::update(bits_, value);
+ }
+
+ bool has_slot_use() const { return HasSlotUseField::decode(bits_); }
+ void set_has_slot_use(bool value) {
+ bits_ = HasSlotUseField::update(bits_, value);
+ }
+
+ // Add a new interval or a new use position to this live range.
+ void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+ void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+ void AddUsePosition(UsePosition* pos);
+
+ // Shorten the most recently added interval by setting a new start.
+ void ShortenTo(LifetimePosition start);
+
+ // Detaches between start and end, and attributes the resulting range to
+ // result.
+ // The current range is pointed to as "splintered_from". No parent/child
+ // relationship is established between this and result.
+ void Splinter(LifetimePosition start, LifetimePosition end, Zone* zone);
+
+ // Assuming other was splintered from this range, embeds other and its
+ // children as part of the children sequence of this range.
+ void Merge(TopLevelLiveRange* other, Zone* zone);
+
+ // Spill range management.
+ void SetSpillRange(SpillRange* spill_range);
+ enum class SpillType { kNoSpillType, kSpillOperand, kSpillRange };
+ void set_spill_type(SpillType value) {
+ bits_ = SpillTypeField::update(bits_, value);
+ }
+ SpillType spill_type() const { return SpillTypeField::decode(bits_); }
+ InstructionOperand* GetSpillOperand() const {
+ DCHECK(spill_type() == SpillType::kSpillOperand);
+ return spill_operand_;
+ }
+
+ SpillRange* GetAllocatedSpillRange() const {
+ DCHECK(spill_type() != SpillType::kSpillOperand);
+ return spill_range_;
+ }
+
+ SpillRange* GetSpillRange() const {
+ DCHECK(spill_type() == SpillType::kSpillRange);
+ return spill_range_;
+ }
+ bool HasNoSpillType() const {
+ return spill_type() == SpillType::kNoSpillType;
+ }
+ bool HasSpillOperand() const {
+ return spill_type() == SpillType::kSpillOperand;
+ }
+ bool HasSpillRange() const { return spill_type() == SpillType::kSpillRange; }
+
+ AllocatedOperand GetSpillRangeOperand() const;
+
+ void RecordSpillLocation(Zone* zone, int gap_index,
+ InstructionOperand* operand);
+ void SetSpillOperand(InstructionOperand* operand);
+ void SetSpillStartIndex(int start) {
+ spill_start_index_ = Min(start, spill_start_index_);
+ }
+
+ void CommitSpillMoves(InstructionSequence* sequence,
+ const InstructionOperand& operand,
+ bool might_be_duplicated);
+
+ // If all the children of this range are spilled in deferred blocks, and if
+ // for any non-spilled child with a use position requiring a slot, that range
+ // is contained in a deferred block, mark the range as
+ // IsSpilledOnlyInDeferredBlocks, so that we avoid spilling at definition,
+ // and instead let the LiveRangeConnector perform the spills within the
+ // deferred blocks. If so, we insert here spills for non-spilled ranges
+ // with slot use positions.
+ void MarkSpilledInDeferredBlock() {
+ spill_start_index_ = -1;
+ spilled_in_deferred_blocks_ = true;
+ spill_move_insertion_locations_ = nullptr;
+ }
+
+ bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
+ const InstructionOperand& spill_operand);
+
+ TopLevelLiveRange* splintered_from() const { return splintered_from_; }
+ bool IsSplinter() const { return splintered_from_ != nullptr; }
+ bool MayRequireSpillRange() const {
+ DCHECK(!IsSplinter());
+ return !HasSpillOperand() && spill_range_ == nullptr;
+ }
+ void UpdateSpillRangePostMerge(TopLevelLiveRange* merged);
+ int vreg() const { return vreg_; }
+
+#if DEBUG
+ int debug_virt_reg() const;
+#endif
+
+ void Verify() const;
+ void VerifyChildrenInOrder() const;
+
+ int GetNextChildId() {
+ return IsSplinter() ? splintered_from()->GetNextChildId()
+ : ++last_child_id_;
+ }
+
+ int GetChildCount() const { return last_child_id_ + 1; }
+
+ bool IsSpilledOnlyInDeferredBlocks() const {
+ return spilled_in_deferred_blocks_;
+ }
+
+ struct SpillMoveInsertionList;
+
+ SpillMoveInsertionList* spill_move_insertion_locations() const {
+ return spill_move_insertion_locations_;
+ }
+ TopLevelLiveRange* splinter() const { return splinter_; }
+ void SetSplinter(TopLevelLiveRange* splinter) {
+ DCHECK_NULL(splinter_);
+ DCHECK_NOT_NULL(splinter);
+
+ splinter_ = splinter;
+ splinter->relative_id_ = GetNextChildId();
+ splinter->set_spill_type(spill_type());
+ splinter->SetSplinteredFrom(this);
+ }
+
+ void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
+ bool has_preassigned_slot() const { return has_preassigned_slot_; }
+
+ private:
+ void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
+
+ typedef BitField<bool, 1, 1> HasSlotUseField;
+ typedef BitField<bool, 2, 1> IsPhiField;
+ typedef BitField<bool, 3, 1> IsNonLoopPhiField;
+ typedef BitField<SpillType, 4, 2> SpillTypeField;
+
+ int vreg_;
+ int last_child_id_;
+ TopLevelLiveRange* splintered_from_;
+ union {
+ // Correct value determined by spill_type()
+ InstructionOperand* spill_operand_;
+ SpillRange* spill_range_;
+ };
+ SpillMoveInsertionList* spill_move_insertion_locations_;
+ // TODO(mtrofin): generalize spilling after definition, currently specialized
+ // just for spill in a single deferred block.
+ bool spilled_in_deferred_blocks_;
+ int spill_start_index_;
+ UsePosition* last_pos_;
+ TopLevelLiveRange* splinter_;
+ bool has_preassigned_slot_;
+
+ DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
+};
+
+
+struct PrintableLiveRange {
+ const RegisterConfiguration* register_configuration_;
+ const LiveRange* range_;
+};
+
+
+std::ostream& operator<<(std::ostream& os,
+ const PrintableLiveRange& printable_range);
+
+
+class SpillRange final : public ZoneObject {
+ public:
+ static const int kUnassignedSlot = -1;
+ SpillRange(TopLevelLiveRange* range, Zone* zone);
UseInterval* interval() const { return use_interval_; }
- RegisterKind Kind() const { return live_ranges_[0]->Kind(); }
+ // Currently, only 4 or 8 byte slots are supported.
+ int ByteWidth() const;
bool IsEmpty() const { return live_ranges_.empty(); }
bool TryMerge(SpillRange* other);
- void SetOperand(InstructionOperand* op);
+ bool HasSlot() const { return assigned_slot_ != kUnassignedSlot; }
+
+ void set_assigned_slot(int index) {
+ DCHECK_EQ(kUnassignedSlot, assigned_slot_);
+ assigned_slot_ = index;
+ }
+ int assigned_slot() {
+ DCHECK_NE(kUnassignedSlot, assigned_slot_);
+ return assigned_slot_;
+ }
+ const ZoneVector<TopLevelLiveRange*>& live_ranges() const {
+ return live_ranges_;
+ }
+ ZoneVector<TopLevelLiveRange*>& live_ranges() { return live_ranges_; }
+ int byte_width() const { return byte_width_; }
+ RegisterKind kind() const { return kind_; }
+ void Print() const;
private:
LifetimePosition End() const { return end_position_; }
- ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
bool IsIntersectingWith(SpillRange* other) const;
// Merge intervals, making sure the use intervals are sorted
void MergeDisjointIntervals(UseInterval* other);
- ZoneVector<LiveRange*> live_ranges_;
+ ZoneVector<TopLevelLiveRange*> live_ranges_;
UseInterval* use_interval_;
LifetimePosition end_position_;
+ int assigned_slot_;
+ int byte_width_;
+ RegisterKind kind_;
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
-class RegisterAllocator FINAL : public ZoneObject {
+class RegisterAllocationData final : public ZoneObject {
public:
- explicit RegisterAllocator(const RegisterConfiguration* config,
- Zone* local_zone, Frame* frame,
- InstructionSequence* code,
- const char* debug_name = nullptr);
+ class PhiMapValue : public ZoneObject {
+ public:
+ PhiMapValue(PhiInstruction* phi, const InstructionBlock* block, Zone* zone);
- bool AllocationOk() { return allocation_ok_; }
+ const PhiInstruction* phi() const { return phi_; }
+ const InstructionBlock* block() const { return block_; }
- const ZoneVector<LiveRange*>& live_ranges() const { return live_ranges_; }
- const ZoneVector<LiveRange*>& fixed_live_ranges() const {
+ // For hinting.
+ int assigned_register() const { return assigned_register_; }
+ void set_assigned_register(int register_code) {
+ DCHECK_EQ(assigned_register_, kUnassignedRegister);
+ assigned_register_ = register_code;
+ }
+ void UnsetAssignedRegister() { assigned_register_ = kUnassignedRegister; }
+
+ void AddOperand(InstructionOperand* operand);
+ void CommitAssignment(const InstructionOperand& operand);
+
+ private:
+ PhiInstruction* const phi_;
+ const InstructionBlock* const block_;
+ ZoneVector<InstructionOperand*> incoming_operands_;
+ int assigned_register_;
+ };
+ typedef ZoneMap<int, PhiMapValue*> PhiMap;
+
+ struct DelayedReference {
+ ReferenceMap* map;
+ InstructionOperand* operand;
+ };
+ typedef ZoneVector<DelayedReference> DelayedReferences;
+ typedef ZoneVector<std::pair<TopLevelLiveRange*, int>>
+ RangesWithPreassignedSlots;
+
+ RegisterAllocationData(const RegisterConfiguration* config,
+ Zone* allocation_zone, Frame* frame,
+ InstructionSequence* code,
+ const char* debug_name = nullptr);
+
+ const ZoneVector<TopLevelLiveRange*>& live_ranges() const {
+ return live_ranges_;
+ }
+ ZoneVector<TopLevelLiveRange*>& live_ranges() { return live_ranges_; }
+ const ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() const {
return fixed_live_ranges_;
}
- const ZoneVector<LiveRange*>& fixed_double_live_ranges() const {
+ ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() {
+ return fixed_live_ranges_;
+ }
+ ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() {
return fixed_double_live_ranges_;
}
+ const ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() const {
+ return fixed_double_live_ranges_;
+ }
+ ZoneVector<BitVector*>& live_in_sets() { return live_in_sets_; }
+ ZoneVector<BitVector*>& live_out_sets() { return live_out_sets_; }
+ ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
+ DelayedReferences& delayed_references() { return delayed_references_; }
InstructionSequence* code() const { return code_; }
- // This zone is for datastructures only needed during register allocation.
- Zone* local_zone() const { return local_zone_; }
+ // This zone is for datastructures only needed during register allocation
+ // phases.
+ Zone* allocation_zone() const { return allocation_zone_; }
+ // This zone is for InstructionOperands and moves that live beyond register
+ // allocation.
+ Zone* code_zone() const { return code()->zone(); }
+ Frame* frame() const { return frame_; }
+ const char* debug_name() const { return debug_name_; }
+ const RegisterConfiguration* config() const { return config_; }
+
+ MachineRepresentation RepresentationFor(int virtual_register);
+
+ TopLevelLiveRange* GetOrCreateLiveRangeFor(int index);
+ // Creates a new live range.
+ TopLevelLiveRange* NewLiveRange(int index, MachineRepresentation rep);
+ TopLevelLiveRange* NextLiveRange(MachineRepresentation rep);
+
+ SpillRange* AssignSpillRangeToLiveRange(TopLevelLiveRange* range);
+ SpillRange* CreateSpillRangeForLiveRange(TopLevelLiveRange* range);
+
+ MoveOperands* AddGapMove(int index, Instruction::GapPosition position,
+ const InstructionOperand& from,
+ const InstructionOperand& to);
+
+ bool IsReference(TopLevelLiveRange* top_range) const {
+ return code()->IsReference(top_range->vreg());
+ }
+
+ bool ExistsUseWithoutDefinition();
+ bool RangesDefinedInDeferredStayInDeferred();
+
+ void MarkAllocated(RegisterKind kind, int index);
+
+ PhiMapValue* InitializePhiMap(const InstructionBlock* block,
+ PhiInstruction* phi);
+ PhiMapValue* GetPhiMapValueFor(TopLevelLiveRange* top_range);
+ PhiMapValue* GetPhiMapValueFor(int virtual_register);
+ bool IsBlockBoundary(LifetimePosition pos) const;
+
+ RangesWithPreassignedSlots& preassigned_slot_ranges() {
+ return preassigned_slot_ranges_;
+ }
+
+ private:
+ int GetNextLiveRangeId();
+
+ Zone* const allocation_zone_;
+ Frame* const frame_;
+ InstructionSequence* const code_;
+ const char* const debug_name_;
+ const RegisterConfiguration* const config_;
+ PhiMap phi_map_;
+ ZoneVector<int> allocatable_codes_;
+ ZoneVector<int> allocatable_double_codes_;
+ ZoneVector<BitVector*> live_in_sets_;
+ ZoneVector<BitVector*> live_out_sets_;
+ ZoneVector<TopLevelLiveRange*> live_ranges_;
+ ZoneVector<TopLevelLiveRange*> fixed_live_ranges_;
+ ZoneVector<TopLevelLiveRange*> fixed_double_live_ranges_;
+ ZoneVector<SpillRange*> spill_ranges_;
+ DelayedReferences delayed_references_;
+ BitVector* assigned_registers_;
+ BitVector* assigned_double_registers_;
+ int virtual_register_count_;
+ RangesWithPreassignedSlots preassigned_slot_ranges_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
+};
+
+
+class ConstraintBuilder final : public ZoneObject {
+ public:
+ explicit ConstraintBuilder(RegisterAllocationData* data);
// Phase 1 : insert moves to account for fixed register operands.
void MeetRegisterConstraints();
@@ -397,85 +861,174 @@
// of blocks containing phis.
void ResolvePhis();
- // Phase 3: compute liveness of all virtual register.
- void BuildLiveRanges();
- bool ExistsUseWithoutDefinition();
-
- // Phase 4: compute register assignments.
- void AllocateGeneralRegisters();
- void AllocateDoubleRegisters();
-
- // Phase 5: reassign spill splots for maximal reuse.
- void ReuseSpillSlots();
-
- // Phase 6: commit assignment.
- void CommitAssignment();
-
- // Phase 7: compute values for pointer maps.
- void PopulatePointerMaps(); // TODO(titzer): rename to PopulateReferenceMaps.
-
- // Phase 8: reconnect split ranges with moves.
- void ConnectRanges();
-
- // Phase 9: insert moves to connect ranges across basic blocks.
- void ResolveControlFlow();
-
private:
- int GetVirtualRegister() {
- int vreg = code()->NextVirtualRegister();
- if (vreg >= UnallocatedOperand::kMaxVirtualRegisters) {
- allocation_ok_ = false;
- // Maintain the invariant that we return something below the maximum.
- return 0;
- }
- return vreg;
- }
+ RegisterAllocationData* data() const { return data_; }
+ InstructionSequence* code() const { return data()->code(); }
+ Zone* allocation_zone() const { return data()->allocation_zone(); }
- // Checks whether the value of a given virtual register is a reference.
- // TODO(titzer): rename this to IsReference.
- bool HasTaggedValue(int virtual_register) const;
-
- // Returns the register kind required by the given virtual register.
- RegisterKind RequiredRegisterKind(int virtual_register) const;
-
- // This zone is for InstructionOperands and moves that live beyond register
- // allocation.
- Zone* code_zone() const { return code()->zone(); }
-
- BitVector* assigned_registers() { return assigned_registers_; }
- BitVector* assigned_double_registers() { return assigned_double_registers_; }
-
-#ifdef DEBUG
- void Verify() const;
-#endif
-
- void AllocateRegisters();
- bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const;
- bool SafePointsAreInOrder() const;
-
- // Liveness analysis support.
- BitVector* ComputeLiveOut(const InstructionBlock* block);
- void AddInitialIntervals(const InstructionBlock* block, BitVector* live_out);
- bool IsOutputRegisterOf(Instruction* instr, int index);
- bool IsOutputDoubleRegisterOf(Instruction* instr, int index);
- void ProcessInstructions(const InstructionBlock* block, BitVector* live);
+ InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos,
+ bool is_tagged);
void MeetRegisterConstraints(const InstructionBlock* block);
- void MeetConstraintsBetween(Instruction* first, Instruction* second,
- int gap_index);
+ void MeetConstraintsBefore(int index);
+ void MeetConstraintsAfter(int index);
void MeetRegisterConstraintsForLastInstructionInBlock(
const InstructionBlock* block);
void ResolvePhis(const InstructionBlock* block);
+ RegisterAllocationData* const data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConstraintBuilder);
+};
+
+
+class LiveRangeBuilder final : public ZoneObject {
+ public:
+ explicit LiveRangeBuilder(RegisterAllocationData* data, Zone* local_zone);
+
+ // Phase 3: compute liveness of all virtual register.
+ void BuildLiveRanges();
+ static BitVector* ComputeLiveOut(const InstructionBlock* block,
+ RegisterAllocationData* data);
+
+ private:
+ RegisterAllocationData* data() const { return data_; }
+ InstructionSequence* code() const { return data()->code(); }
+ Zone* allocation_zone() const { return data()->allocation_zone(); }
+ Zone* code_zone() const { return code()->zone(); }
+ const RegisterConfiguration* config() const { return data()->config(); }
+ ZoneVector<BitVector*>& live_in_sets() const {
+ return data()->live_in_sets();
+ }
+
+ void Verify() const;
+
+ // Liveness analysis support.
+ void AddInitialIntervals(const InstructionBlock* block, BitVector* live_out);
+ void ProcessInstructions(const InstructionBlock* block, BitVector* live);
+ void ProcessPhis(const InstructionBlock* block, BitVector* live);
+ void ProcessLoopHeader(const InstructionBlock* block, BitVector* live);
+
+ static int FixedLiveRangeID(int index) { return -index - 1; }
+ int FixedDoubleLiveRangeID(int index);
+ TopLevelLiveRange* FixedLiveRangeFor(int index);
+ TopLevelLiveRange* FixedDoubleLiveRangeFor(int index);
+
+ void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
+ void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
+
+ UsePosition* NewUsePosition(LifetimePosition pos, InstructionOperand* operand,
+ void* hint, UsePositionHintType hint_type);
+ UsePosition* NewUsePosition(LifetimePosition pos) {
+ return NewUsePosition(pos, nullptr, nullptr, UsePositionHintType::kNone);
+ }
+ TopLevelLiveRange* LiveRangeFor(InstructionOperand* operand);
// Helper methods for building intervals.
- InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos,
- bool is_tagged);
- LiveRange* LiveRangeFor(InstructionOperand* operand);
- void Define(LifetimePosition position, InstructionOperand* operand,
- InstructionOperand* hint);
+ UsePosition* Define(LifetimePosition position, InstructionOperand* operand,
+ void* hint, UsePositionHintType hint_type);
+ void Define(LifetimePosition position, InstructionOperand* operand) {
+ Define(position, operand, nullptr, UsePositionHintType::kNone);
+ }
+ UsePosition* Use(LifetimePosition block_start, LifetimePosition position,
+ InstructionOperand* operand, void* hint,
+ UsePositionHintType hint_type);
void Use(LifetimePosition block_start, LifetimePosition position,
- InstructionOperand* operand, InstructionOperand* hint);
- void AddGapMove(int index, GapInstruction::InnerPosition position,
- InstructionOperand* from, InstructionOperand* to);
+ InstructionOperand* operand) {
+ Use(block_start, position, operand, nullptr, UsePositionHintType::kNone);
+ }
+
+ RegisterAllocationData* const data_;
+ ZoneMap<InstructionOperand*, UsePosition*> phi_hints_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeBuilder);
+};
+
+
+class RegisterAllocator : public ZoneObject {
+ public:
+ explicit RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
+
+ protected:
+ RegisterAllocationData* data() const { return data_; }
+ InstructionSequence* code() const { return data()->code(); }
+ RegisterKind mode() const { return mode_; }
+ int num_registers() const { return num_registers_; }
+ int num_allocatable_registers() const { return num_allocatable_registers_; }
+ int allocatable_register_code(int allocatable_index) const {
+ return allocatable_register_codes_[allocatable_index];
+ }
+
+ // TODO(mtrofin): explain why splitting in gap START is always OK.
+ LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
+ int instruction_index);
+
+ Zone* allocation_zone() const { return data()->allocation_zone(); }
+
+ // Find the optimal split for ranges defined by a memory operand, e.g.
+ // constants or function parameters passed on the stack.
+ void SplitAndSpillRangesDefinedByMemoryOperand(bool operands_only);
+
+ // Split the given range at the given position.
+ // If range starts at or after the given position then the
+ // original range is returned.
+ // Otherwise returns the live range that starts at pos and contains
+ // all uses from the original range that follow pos. Uses at pos will
+ // still be owned by the original range after splitting.
+ LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
+
+ bool CanProcessRange(LiveRange* range) const {
+ return range != nullptr && !range->IsEmpty() && range->kind() == mode();
+ }
+
+
+ // Split the given range in a position from the interval [start, end].
+ LiveRange* SplitBetween(LiveRange* range, LifetimePosition start,
+ LifetimePosition end);
+
+ // Find a lifetime position in the interval [start, end] which
+ // is optimal for splitting: it is either header of the outermost
+ // loop covered by this interval or the latest possible position.
+ LifetimePosition FindOptimalSplitPos(LifetimePosition start,
+ LifetimePosition end);
+
+ void Spill(LiveRange* range);
+
+ // If we are trying to spill a range inside the loop try to
+ // hoist spill position out to the point just before the loop.
+ LifetimePosition FindOptimalSpillingPos(LiveRange* range,
+ LifetimePosition pos);
+
+ const ZoneVector<TopLevelLiveRange*>& GetFixedRegisters() const;
+ const char* RegisterName(int allocation_index) const;
+
+ private:
+ RegisterAllocationData* const data_;
+ const RegisterKind mode_;
+ const int num_registers_;
+ int num_allocatable_registers_;
+ const int* allocatable_register_codes_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
+};
+
+
+class LinearScanAllocator final : public RegisterAllocator {
+ public:
+ LinearScanAllocator(RegisterAllocationData* data, RegisterKind kind,
+ Zone* local_zone);
+
+ // Phase 4: compute register assignments.
+ void AllocateRegisters();
+
+ private:
+ ZoneVector<LiveRange*>& unhandled_live_ranges() {
+ return unhandled_live_ranges_;
+ }
+ ZoneVector<LiveRange*>& active_live_ranges() { return active_live_ranges_; }
+ ZoneVector<LiveRange*>& inactive_live_ranges() {
+ return inactive_live_ranges_;
+ }
+
+ void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
// Helper methods for updating the life range lists.
void AddToActive(LiveRange* range);
@@ -490,32 +1043,9 @@
void InactiveToActive(LiveRange* range);
// Helper methods for allocating registers.
- bool TryReuseSpillForPhi(LiveRange* range);
+ bool TryReuseSpillForPhi(TopLevelLiveRange* range);
bool TryAllocateFreeReg(LiveRange* range);
void AllocateBlockedReg(LiveRange* range);
- SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
- void FreeSpillSlot(LiveRange* range);
- InstructionOperand* TryReuseSpillSlot(LiveRange* range);
-
- // Live range splitting helpers.
-
- // Split the given range at the given position.
- // If range starts at or after the given position then the
- // original range is returned.
- // Otherwise returns the live range that starts at pos and contains
- // all uses from the original range that follow pos. Uses at pos will
- // still be owned by the original range after splitting.
- LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
-
- // Split the given range in a position from the interval [start, end].
- LiveRange* SplitBetween(LiveRange* range, LifetimePosition start,
- LifetimePosition end);
-
- // Find a lifetime position in the interval [start, end] which
- // is optimal for splitting: it is either header of the outermost
- // loop covered by this interval or the latest possible position.
- LifetimePosition FindOptimalSplitPos(LifetimePosition start,
- LifetimePosition end);
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos);
@@ -531,108 +1061,105 @@
void SplitAndSpillIntersecting(LiveRange* range);
- // If we are trying to spill a range inside the loop try to
- // hoist spill position out to the point just before the loop.
- LifetimePosition FindOptimalSpillingPos(LiveRange* range,
- LifetimePosition pos);
-
- void Spill(LiveRange* range);
- bool IsBlockBoundary(LifetimePosition pos);
-
- // Helper methods for resolving control flow.
- void ResolveControlFlow(const InstructionBlock* block,
- InstructionOperand* cur_op,
- const InstructionBlock* pred,
- InstructionOperand* pred_op);
-
- void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
-
- // Return parallel move that should be used to connect ranges split at the
- // given position.
- ParallelMove* GetConnectingParallelMove(LifetimePosition pos);
-
- // Return the block which contains give lifetime position.
- const InstructionBlock* GetInstructionBlock(LifetimePosition pos);
-
- // Helper methods for the fixed registers.
- int RegisterCount() const;
- static int FixedLiveRangeID(int index) { return -index - 1; }
- int FixedDoubleLiveRangeID(int index);
- LiveRange* FixedLiveRangeFor(int index);
- LiveRange* FixedDoubleLiveRangeFor(int index);
- LiveRange* LiveRangeFor(int index);
- GapInstruction* GetLastGap(const InstructionBlock* block);
-
- const char* RegisterName(int allocation_index);
-
- Instruction* InstructionAt(int index) { return code()->InstructionAt(index); }
-
- Frame* frame() const { return frame_; }
- const char* debug_name() const { return debug_name_; }
- const RegisterConfiguration* config() const { return config_; }
- ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
- ZoneVector<LiveRange*>& fixed_live_ranges() { return fixed_live_ranges_; }
- ZoneVector<LiveRange*>& fixed_double_live_ranges() {
- return fixed_double_live_ranges_;
- }
- ZoneVector<LiveRange*>& unhandled_live_ranges() {
- return unhandled_live_ranges_;
- }
- ZoneVector<LiveRange*>& active_live_ranges() { return active_live_ranges_; }
- ZoneVector<LiveRange*>& inactive_live_ranges() {
- return inactive_live_ranges_;
- }
- ZoneVector<LiveRange*>& reusable_slots() { return reusable_slots_; }
- ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
-
- struct PhiMapValue {
- PhiMapValue(PhiInstruction* phi, const InstructionBlock* block)
- : phi(phi), block(block) {}
- PhiInstruction* const phi;
- const InstructionBlock* const block;
- };
- typedef std::map<int, PhiMapValue, std::less<int>,
- zone_allocator<std::pair<int, PhiMapValue>>> PhiMap;
-
- Zone* const local_zone_;
- Frame* const frame_;
- InstructionSequence* const code_;
- const char* const debug_name_;
-
- const RegisterConfiguration* config_;
-
- PhiMap phi_map_;
-
- // During liveness analysis keep a mapping from block id to live_in sets
- // for blocks already analyzed.
- ZoneVector<BitVector*> live_in_sets_;
-
- // Liveness analysis results.
- ZoneVector<LiveRange*> live_ranges_;
-
- // Lists of live ranges
- ZoneVector<LiveRange*> fixed_live_ranges_;
- ZoneVector<LiveRange*> fixed_double_live_ranges_;
ZoneVector<LiveRange*> unhandled_live_ranges_;
ZoneVector<LiveRange*> active_live_ranges_;
ZoneVector<LiveRange*> inactive_live_ranges_;
- ZoneVector<LiveRange*> reusable_slots_;
- ZoneVector<SpillRange*> spill_ranges_;
-
- RegisterKind mode_;
- int num_registers_;
-
- BitVector* assigned_registers_;
- BitVector* assigned_double_registers_;
-
- // Indicates success or failure during register allocation.
- bool allocation_ok_;
#ifdef DEBUG
LifetimePosition allocation_finger_;
#endif
- DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
+ DISALLOW_COPY_AND_ASSIGN(LinearScanAllocator);
+};
+
+
+class SpillSlotLocator final : public ZoneObject {
+ public:
+ explicit SpillSlotLocator(RegisterAllocationData* data);
+
+ void LocateSpillSlots();
+
+ private:
+ RegisterAllocationData* data() const { return data_; }
+
+ RegisterAllocationData* const data_;
+
+ DISALLOW_COPY_AND_ASSIGN(SpillSlotLocator);
+};
+
+
+class OperandAssigner final : public ZoneObject {
+ public:
+ explicit OperandAssigner(RegisterAllocationData* data);
+
+ // Phase 5: assign spill splots.
+ void AssignSpillSlots();
+
+ // Phase 6: commit assignment.
+ void CommitAssignment();
+
+ private:
+ RegisterAllocationData* data() const { return data_; }
+
+ RegisterAllocationData* const data_;
+
+ DISALLOW_COPY_AND_ASSIGN(OperandAssigner);
+};
+
+
+class ReferenceMapPopulator final : public ZoneObject {
+ public:
+ explicit ReferenceMapPopulator(RegisterAllocationData* data);
+
+ // Phase 7: compute values for pointer maps.
+ void PopulateReferenceMaps();
+
+ private:
+ RegisterAllocationData* data() const { return data_; }
+
+ bool SafePointsAreInOrder() const;
+
+ RegisterAllocationData* const data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReferenceMapPopulator);
+};
+
+
+// Insert moves of the form
+//
+// Operand(child_(k+1)) = Operand(child_k)
+//
+// where child_k and child_(k+1) are consecutive children of a range (so
+// child_k->next() == child_(k+1)), and Operand(...) refers to the
+// assigned operand, be it a register or a slot.
+class LiveRangeConnector final : public ZoneObject {
+ public:
+ explicit LiveRangeConnector(RegisterAllocationData* data);
+
+ // Phase 8: reconnect split ranges with moves, when the control flow
+ // between the ranges is trivial (no branches).
+ void ConnectRanges(Zone* local_zone);
+
+ // Phase 9: insert moves to connect ranges across basic blocks, when the
+ // control flow between them cannot be trivially resolved, such as joining
+ // branches.
+ void ResolveControlFlow(Zone* local_zone);
+
+ private:
+ RegisterAllocationData* data() const { return data_; }
+ InstructionSequence* code() const { return data()->code(); }
+ Zone* code_zone() const { return code()->zone(); }
+
+ bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const;
+
+ int ResolveControlFlow(const InstructionBlock* block,
+ const InstructionOperand& cur_op,
+ const InstructionBlock* pred,
+ const InstructionOperand& pred_op);
+
+ RegisterAllocationData* const data_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector);
};
} // namespace compiler
diff --git a/src/compiler/register-configuration.cc b/src/compiler/register-configuration.cc
deleted file mode 100644
index e7d8bbd..0000000
--- a/src/compiler/register-configuration.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/register-configuration.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
- Register::kNumRegisters);
-STATIC_ASSERT(RegisterConfiguration::kMaxDoubleRegisters >=
- DoubleRegister::kMaxNumRegisters);
-
-class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
- public:
- ArchDefaultRegisterConfiguration()
- : RegisterConfiguration(Register::kMaxNumAllocatableRegisters,
- DoubleRegister::kMaxNumAllocatableRegisters,
- DoubleRegister::NumAllocatableAliasedRegisters(),
- general_register_name_table_,
- double_register_name_table_) {
- DCHECK_EQ(Register::kMaxNumAllocatableRegisters,
- Register::NumAllocatableRegisters());
- for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
- general_register_name_table_[i] = Register::AllocationIndexToString(i);
- }
- for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
- double_register_name_table_[i] =
- DoubleRegister::AllocationIndexToString(i);
- }
- }
-
- const char*
- general_register_name_table_[Register::kMaxNumAllocatableRegisters];
- const char*
- double_register_name_table_[DoubleRegister::kMaxNumAllocatableRegisters];
-};
-
-
-static base::LazyInstance<ArchDefaultRegisterConfiguration>::type
- kDefaultRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
-
-} // namepace
-
-
-const RegisterConfiguration* RegisterConfiguration::ArchDefault() {
- return &kDefaultRegisterConfiguration.Get();
-}
-
-RegisterConfiguration::RegisterConfiguration(
- int num_general_registers, int num_double_registers,
- int num_aliased_double_registers, const char* const* general_register_names,
- const char* const* double_register_names)
- : num_general_registers_(num_general_registers),
- num_double_registers_(num_double_registers),
- num_aliased_double_registers_(num_aliased_double_registers),
- general_register_names_(general_register_names),
- double_register_names_(double_register_names) {}
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/register-configuration.h b/src/compiler/register-configuration.h
deleted file mode 100644
index 8178ba2..0000000
--- a/src/compiler/register-configuration.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_REGISTER_CONFIGURATION_H_
-#define V8_COMPILER_REGISTER_CONFIGURATION_H_
-
-#include "src/v8.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// An architecture independent representation of the sets of registers available
-// for instruction creation.
-class RegisterConfiguration {
- public:
- // Architecture independent maxes.
- static const int kMaxGeneralRegisters = 32;
- static const int kMaxDoubleRegisters = 32;
-
- static const RegisterConfiguration* ArchDefault();
-
- RegisterConfiguration(int num_general_registers, int num_double_registers,
- int num_aliased_double_registers,
- const char* const* general_register_name,
- const char* const* double_register_name);
-
- int num_general_registers() const { return num_general_registers_; }
- int num_double_registers() const { return num_double_registers_; }
- int num_aliased_double_registers() const {
- return num_aliased_double_registers_;
- }
-
- const char* general_register_name(int offset) const {
- DCHECK(offset >= 0 && offset < kMaxGeneralRegisters);
- return general_register_names_[offset];
- }
- const char* double_register_name(int offset) const {
- DCHECK(offset >= 0 && offset < kMaxDoubleRegisters);
- return double_register_names_[offset];
- }
-
- private:
- const int num_general_registers_;
- const int num_double_registers_;
- const int num_aliased_double_registers_;
- const char* const* general_register_names_;
- const char* const* double_register_names_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_REGISTER_CONFIGURATION_H_
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
new file mode 100644
index 0000000..5dab60f
--- /dev/null
+++ b/src/compiler/representation-change.cc
@@ -0,0 +1,537 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/representation-change.h"
+
+#include <sstream>
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/compiler/machine-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+const char* Truncation::description() const {
+ switch (kind()) {
+ case TruncationKind::kNone:
+ return "no-value-use";
+ case TruncationKind::kBool:
+ return "truncate-to-bool";
+ case TruncationKind::kWord32:
+ return "truncate-to-word32";
+ case TruncationKind::kWord64:
+ return "truncate-to-word64";
+ case TruncationKind::kFloat32:
+ return "truncate-to-float32";
+ case TruncationKind::kFloat64:
+ return "truncate-to-float64";
+ case TruncationKind::kAny:
+ return "no-truncation";
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+// Partial order for truncations:
+//
+// kWord64 kAny
+// ^ ^
+// \ |
+// \ kFloat64 <--+
+// \ ^ ^ |
+// \ / | |
+// kWord32 kFloat32 kBool
+// ^ ^ ^
+// \ | /
+// \ | /
+// \ | /
+// \ | /
+// \ | /
+// kNone
+
+// static
+Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
+ TruncationKind rep2) {
+ if (LessGeneral(rep1, rep2)) return rep2;
+ if (LessGeneral(rep2, rep1)) return rep1;
+ // Handle the generalization of float64-representable values.
+ if (LessGeneral(rep1, TruncationKind::kFloat64) &&
+ LessGeneral(rep2, TruncationKind::kFloat64)) {
+ return TruncationKind::kFloat64;
+ }
+ // All other combinations are illegal.
+ FATAL("Tried to combine incompatible truncations");
+ return TruncationKind::kNone;
+}
+
+
+// static
+bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
+ switch (rep1) {
+ case TruncationKind::kNone:
+ return true;
+ case TruncationKind::kBool:
+ return rep2 == TruncationKind::kBool || rep2 == TruncationKind::kAny;
+ case TruncationKind::kWord32:
+ return rep2 == TruncationKind::kWord32 ||
+ rep2 == TruncationKind::kWord64 ||
+ rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ case TruncationKind::kWord64:
+ return rep2 == TruncationKind::kWord64;
+ case TruncationKind::kFloat32:
+ return rep2 == TruncationKind::kFloat32 ||
+ rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ case TruncationKind::kFloat64:
+ return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ case TruncationKind::kAny:
+ return rep2 == TruncationKind::kAny;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+namespace {
+
+// TODO(titzer): should Word64 also be implicitly convertable to others?
+bool IsWord(MachineRepresentation rep) {
+ return rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kWord16 ||
+ rep == MachineRepresentation::kWord32;
+}
+
+} // namespace
+
+
+// Changes representation from {output_rep} to {use_rep}. The {truncation}
+// parameter is only used for sanity checking - if the changer cannot figure
+// out signedness for the word32->float64 conversion, then we check that the
+// uses truncate to word32 (so they do not care about signedness).
+Node* RepresentationChanger::GetRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ MachineRepresentation use_rep, Truncation truncation) {
+ if (output_rep == MachineRepresentation::kNone) {
+ // The output representation should be set.
+ return TypeError(node, output_rep, output_type, use_rep);
+ }
+ if (use_rep == output_rep) {
+ // Representations are the same. That's a no-op.
+ return node;
+ }
+ if (IsWord(use_rep) && IsWord(output_rep)) {
+ // Both are words less than or equal to 32-bits.
+ // Since loads of integers from memory implicitly sign or zero extend the
+ // value to the full machine word size and stores implicitly truncate,
+ // no representation change is necessary.
+ return node;
+ }
+ switch (use_rep) {
+ case MachineRepresentation::kTagged:
+ return GetTaggedRepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kFloat32:
+ return GetFloat32RepresentationFor(node, output_rep, output_type,
+ truncation);
+ case MachineRepresentation::kFloat64:
+ return GetFloat64RepresentationFor(node, output_rep, output_type,
+ truncation);
+ case MachineRepresentation::kBit:
+ return GetBitRepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return GetWord32RepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kWord64:
+ return GetWord64RepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kNone:
+ return node;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+Node* RepresentationChanger::GetTaggedRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kHeapConstant:
+ return node; // No change necessary.
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Signed32())) {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Constant(value);
+ } else if (output_type->Is(Type::Unsigned32())) {
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Constant(static_cast<double>(value));
+ } else if (output_rep == MachineRepresentation::kBit) {
+ return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
+ : jsgraph()->TrueConstant();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
+ case IrOpcode::kFloat64Constant:
+ return jsgraph()->Constant(OpParameter<double>(node));
+ case IrOpcode::kFloat32Constant:
+ return jsgraph()->Constant(OpParameter<float>(node));
+ default:
+ break;
+ }
+ // Select the correct X -> Tagged operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kBit) {
+ op = simplified()->ChangeBitToBool();
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeUint32ToTagged();
+ } else if (output_type->Is(Type::Signed32())) {
+ op = simplified()->ChangeInt32ToTagged();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
+ } else if (output_rep ==
+ MachineRepresentation::kFloat32) { // float32 -> float64 -> tagged
+ node = InsertChangeFloat32ToFloat64(node);
+ op = simplified()->ChangeFloat64ToTagged();
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ op = simplified()->ChangeFloat64ToTagged();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetFloat32RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Truncation truncation) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kNumberConstant:
+ return jsgraph()->Float32Constant(
+ DoubleToFloat32(OpParameter<double>(node)));
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Unsigned32())) {
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Float32Constant(static_cast<float>(value));
+ } else {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Float32Constant(static_cast<float>(value));
+ }
+ case IrOpcode::kFloat32Constant:
+ return node; // No change necessary.
+ default:
+ break;
+ }
+ // Select the correct X -> Float32 operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kBit) {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat32);
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Signed32())) {
+ op = machine()->ChangeInt32ToFloat64();
+ } else {
+ // Either the output is int32 or the uses only care about the
+ // low 32 bits (so we can pick int32 safely).
+ DCHECK(output_type->Is(Type::Unsigned32()) ||
+ truncation.TruncatesToWord32());
+ op = machine()->ChangeUint32ToFloat64();
+ }
+ // int32 -> float64 -> float32
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ op = simplified()->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ op = machine()->TruncateFloat64ToFloat32();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat32);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetFloat64RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Truncation truncation) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ return jsgraph()->Float64Constant(OpParameter<double>(node));
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Signed32())) {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Float64Constant(value);
+ } else {
+ DCHECK(output_type->Is(Type::Unsigned32()));
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Float64Constant(static_cast<double>(value));
+ }
+ case IrOpcode::kFloat64Constant:
+ return node; // No change necessary.
+ case IrOpcode::kFloat32Constant:
+ return jsgraph()->Float64Constant(OpParameter<float>(node));
+ default:
+ break;
+ }
+ // Select the correct X -> Float64 operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kBit) {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat64);
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Signed32())) {
+ op = machine()->ChangeInt32ToFloat64();
+ } else {
+ // Either the output is int32 or the uses only care about the
+ // low 32 bits (so we can pick int32 safely).
+ DCHECK(output_type->Is(Type::Unsigned32()) ||
+ truncation.TruncatesToWord32());
+ op = machine()->ChangeUint32ToFloat64();
+ }
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ op = simplified()->ChangeTaggedToFloat64();
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ op = machine()->ChangeFloat32ToFloat64();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat64);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::MakeTruncatedInt32Constant(double value) {
+ return jsgraph()->Int32Constant(DoubleToInt32(value));
+}
+
+
+Node* RepresentationChanger::GetWord32RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ return node; // No change necessary.
+ case IrOpcode::kFloat32Constant:
+ return MakeTruncatedInt32Constant(OpParameter<float>(node));
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat64Constant:
+ return MakeTruncatedInt32Constant(OpParameter<double>(node));
+ default:
+ break;
+ }
+ // Select the correct X -> Word32 operator.
+ const Operator* op;
+ Type* type = NodeProperties::GetType(node);
+
+ if (output_rep == MachineRepresentation::kBit) {
+ return node; // Sloppy comparison -> word32
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ // TODO(jarin) Use only output_type here, once we intersect it with the
+ // type inferred by the typer.
+ if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (output_type->Is(Type::Signed32()) ||
+ type->Is(Type::Signed32())) {
+ op = machine()->ChangeFloat64ToInt32();
+ } else {
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ }
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
+ if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (output_type->Is(Type::Signed32()) ||
+ type->Is(Type::Signed32())) {
+ op = machine()->ChangeFloat64ToInt32();
+ } else {
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ }
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeTaggedToUint32();
+ } else if (output_type->Is(Type::Signed32()) ||
+ type->Is(Type::Signed32())) {
+ op = simplified()->ChangeTaggedToInt32();
+ } else {
+ node = InsertChangeTaggedToFloat64(node);
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ }
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetBitRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
+ DCHECK(value.is_identical_to(factory()->true_value()) ||
+ value.is_identical_to(factory()->false_value()));
+ return jsgraph()->Int32Constant(
+ value.is_identical_to(factory()->true_value()) ? 1 : 0);
+ }
+ default:
+ break;
+ }
+ // Select the correct X -> Bit operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kTagged) {
+ op = simplified()->ChangeBoolToBit();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kBit);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetWord64RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ if (output_rep == MachineRepresentation::kBit) {
+ return node; // Sloppy comparison -> word64
+ }
+ // Can't really convert Word64 to anything else. Purported to be internal.
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord64);
+}
+
+
+const Operator* RepresentationChanger::Int32OperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Int32Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Int32Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Int32Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Int32Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Int32Mod();
+ case IrOpcode::kNumberBitwiseOr:
+ return machine()->Word32Or();
+ case IrOpcode::kNumberBitwiseXor:
+ return machine()->Word32Xor();
+ case IrOpcode::kNumberBitwiseAnd:
+ return machine()->Word32And();
+ case IrOpcode::kNumberEqual:
+ return machine()->Word32Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Int32LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Int32LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+const Operator* RepresentationChanger::Uint32OperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Int32Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Int32Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Int32Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Uint32Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Uint32Mod();
+ case IrOpcode::kNumberEqual:
+ return machine()->Word32Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Uint32LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Uint32LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+const Operator* RepresentationChanger::Float64OperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Float64Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Float64Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Float64Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Float64Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Float64Mod();
+ case IrOpcode::kNumberEqual:
+ return machine()->Float64Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Float64LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Float64LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+Node* RepresentationChanger::TypeError(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type,
+ MachineRepresentation use) {
+ type_error_ = true;
+ if (!testing_type_errors_) {
+ std::ostringstream out_str;
+ out_str << output_rep << " (";
+ output_type->PrintTo(out_str, Type::SEMANTIC_DIM);
+ out_str << ")";
+
+ std::ostringstream use_str;
+ use_str << use;
+
+ V8_Fatal(__FILE__, __LINE__,
+ "RepresentationChangerError: node #%d:%s of "
+ "%s cannot be changed to %s",
+ node->id(), node->op()->mnemonic(), out_str.str().c_str(),
+ use_str.str().c_str());
+ }
+ return node;
+}
+
+
+Node* RepresentationChanger::InsertChangeFloat32ToFloat64(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(), node);
+}
+
+
+Node* RepresentationChanger::InsertChangeTaggedToFloat64(Node* node) {
+ return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+ node);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index 8720afd..62ea3b4 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -5,429 +5,106 @@
#ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_
#define V8_COMPILER_REPRESENTATION_CHANGE_H_
-#include <sstream>
-
-#include "src/base/bits.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
namespace compiler {
+class Truncation final {
+ public:
+ // Constructors.
+ static Truncation None() { return Truncation(TruncationKind::kNone); }
+ static Truncation Bool() { return Truncation(TruncationKind::kBool); }
+ static Truncation Word32() { return Truncation(TruncationKind::kWord32); }
+ static Truncation Word64() { return Truncation(TruncationKind::kWord64); }
+ static Truncation Float32() { return Truncation(TruncationKind::kFloat32); }
+ static Truncation Float64() { return Truncation(TruncationKind::kFloat64); }
+ static Truncation Any() { return Truncation(TruncationKind::kAny); }
+
+ static Truncation Generalize(Truncation t1, Truncation t2) {
+ return Truncation(Generalize(t1.kind(), t2.kind()));
+ }
+
+ // Queries.
+ bool TruncatesToWord32() const {
+ return LessGeneral(kind_, TruncationKind::kWord32);
+ }
+ bool TruncatesNaNToZero() {
+ return LessGeneral(kind_, TruncationKind::kWord32) ||
+ LessGeneral(kind_, TruncationKind::kBool);
+ }
+ bool TruncatesUndefinedToZeroOrNaN() {
+ return LessGeneral(kind_, TruncationKind::kFloat64) ||
+ LessGeneral(kind_, TruncationKind::kWord64);
+ }
+
+ // Operators.
+ bool operator==(Truncation other) const { return kind() == other.kind(); }
+ bool operator!=(Truncation other) const { return !(*this == other); }
+
+ // Debug utilities.
+ const char* description() const;
+ bool IsLessGeneralThan(Truncation other) {
+ return LessGeneral(kind(), other.kind());
+ }
+
+ private:
+ enum class TruncationKind : uint8_t {
+ kNone,
+ kBool,
+ kWord32,
+ kWord64,
+ kFloat32,
+ kFloat64,
+ kAny
+ };
+
+ explicit Truncation(TruncationKind kind) : kind_(kind) {}
+ TruncationKind kind() const { return kind_; }
+
+ TruncationKind kind_;
+
+ static TruncationKind Generalize(TruncationKind rep1, TruncationKind rep2);
+ static bool LessGeneral(TruncationKind rep1, TruncationKind rep2);
+};
+
+
// Contains logic related to changing the representation of values for constants
// and other nodes, as well as lowering Simplified->Machine operators.
// Eagerly folds any representation changes for constants.
-class RepresentationChanger {
+class RepresentationChanger final {
public:
- RepresentationChanger(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified,
- Isolate* isolate)
+ RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
: jsgraph_(jsgraph),
- simplified_(simplified),
isolate_(isolate),
testing_type_errors_(false),
type_error_(false) {}
- // TODO(titzer): should Word64 also be implicitly convertable to others?
- static const MachineTypeUnion rWord =
- kRepBit | kRepWord8 | kRepWord16 | kRepWord32;
-
- Node* GetRepresentationFor(Node* node, MachineTypeUnion output_type,
- MachineTypeUnion use_type) {
- if (!base::bits::IsPowerOfTwo32(output_type & kRepMask)) {
- // There should be only one output representation.
- return TypeError(node, output_type, use_type);
- }
- if ((use_type & kRepMask) == (output_type & kRepMask)) {
- // Representations are the same. That's a no-op.
- return node;
- }
- if ((use_type & rWord) && (output_type & rWord)) {
- // Both are words less than or equal to 32-bits.
- // Since loads of integers from memory implicitly sign or zero extend the
- // value to the full machine word size and stores implicitly truncate,
- // no representation change is necessary.
- return node;
- }
- if (use_type & kRepTagged) {
- return GetTaggedRepresentationFor(node, output_type);
- } else if (use_type & kRepFloat32) {
- return GetFloat32RepresentationFor(node, output_type);
- } else if (use_type & kRepFloat64) {
- return GetFloat64RepresentationFor(node, output_type);
- } else if (use_type & kRepBit) {
- return GetBitRepresentationFor(node, output_type);
- } else if (use_type & rWord) {
- return GetWord32RepresentationFor(node, output_type,
- use_type & kTypeUint32);
- } else if (use_type & kRepWord64) {
- return GetWord64RepresentationFor(node, output_type);
- } else {
- return node;
- }
- }
-
- Node* GetTaggedRepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kNumberConstant:
- case IrOpcode::kHeapConstant:
- return node; // No change necessary.
- case IrOpcode::kInt32Constant:
- if (output_type & kTypeUint32) {
- uint32_t value = OpParameter<uint32_t>(node);
- return jsgraph()->Constant(static_cast<double>(value));
- } else if (output_type & kTypeInt32) {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Constant(value);
- } else if (output_type & kRepBit) {
- return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
- : jsgraph()->TrueConstant();
- } else {
- return TypeError(node, output_type, kRepTagged);
- }
- case IrOpcode::kFloat64Constant:
- return jsgraph()->Constant(OpParameter<double>(node));
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Constant(OpParameter<float>(node));
- default:
- break;
- }
- // Select the correct X -> Tagged operator.
- const Operator* op;
- if (output_type & kRepBit) {
- op = simplified()->ChangeBitToBool();
- } else if (output_type & rWord) {
- if (output_type & kTypeUint32) {
- op = simplified()->ChangeUint32ToTagged();
- } else if (output_type & kTypeInt32) {
- op = simplified()->ChangeInt32ToTagged();
- } else {
- return TypeError(node, output_type, kRepTagged);
- }
- } else if (output_type & kRepFloat32) { // float32 -> float64 -> tagged
- node = InsertChangeFloat32ToFloat64(node);
- op = simplified()->ChangeFloat64ToTagged();
- } else if (output_type & kRepFloat64) {
- op = simplified()->ChangeFloat64ToTagged();
- } else {
- return TypeError(node, output_type, kRepTagged);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
-
- Node* GetFloat32RepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kFloat64Constant:
- case IrOpcode::kNumberConstant:
- return jsgraph()->Float32Constant(
- DoubleToFloat32(OpParameter<double>(node)));
- case IrOpcode::kInt32Constant:
- if (output_type & kTypeUint32) {
- uint32_t value = OpParameter<uint32_t>(node);
- return jsgraph()->Float32Constant(static_cast<float>(value));
- } else {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Float32Constant(static_cast<float>(value));
- }
- case IrOpcode::kFloat32Constant:
- return node; // No change necessary.
- default:
- break;
- }
- // Select the correct X -> Float32 operator.
- const Operator* op;
- if (output_type & kRepBit) {
- return TypeError(node, output_type, kRepFloat32);
- } else if (output_type & rWord) {
- if (output_type & kTypeUint32) {
- op = machine()->ChangeUint32ToFloat64();
- } else {
- op = machine()->ChangeInt32ToFloat64();
- }
- // int32 -> float64 -> float32
- node = jsgraph()->graph()->NewNode(op, node);
- op = machine()->TruncateFloat64ToFloat32();
- } else if (output_type & kRepTagged) {
- op = simplified()
- ->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
- node = jsgraph()->graph()->NewNode(op, node);
- op = machine()->TruncateFloat64ToFloat32();
- } else if (output_type & kRepFloat64) {
- op = machine()->TruncateFloat64ToFloat32();
- } else {
- return TypeError(node, output_type, kRepFloat32);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
-
- Node* GetFloat64RepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kNumberConstant:
- return jsgraph()->Float64Constant(OpParameter<double>(node));
- case IrOpcode::kInt32Constant:
- if (output_type & kTypeUint32) {
- uint32_t value = OpParameter<uint32_t>(node);
- return jsgraph()->Float64Constant(static_cast<double>(value));
- } else {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Float64Constant(value);
- }
- case IrOpcode::kFloat64Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Float64Constant(OpParameter<float>(node));
- default:
- break;
- }
- // Select the correct X -> Float64 operator.
- const Operator* op;
- if (output_type & kRepBit) {
- return TypeError(node, output_type, kRepFloat64);
- } else if (output_type & rWord) {
- if (output_type & kTypeUint32) {
- op = machine()->ChangeUint32ToFloat64();
- } else {
- op = machine()->ChangeInt32ToFloat64();
- }
- } else if (output_type & kRepTagged) {
- op = simplified()->ChangeTaggedToFloat64();
- } else if (output_type & kRepFloat32) {
- op = machine()->ChangeFloat32ToFloat64();
- } else {
- return TypeError(node, output_type, kRepFloat64);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
-
- Node* MakeInt32Constant(double value) {
- if (value < 0) {
- DCHECK(IsInt32Double(value));
- int32_t iv = static_cast<int32_t>(value);
- return jsgraph()->Int32Constant(iv);
- } else {
- DCHECK(IsUint32Double(value));
- int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value));
- return jsgraph()->Int32Constant(iv);
- }
- }
-
- Node* GetTruncatedWord32For(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold truncations for constants.
- switch (node->opcode()) {
- case IrOpcode::kInt32Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Int32Constant(
- DoubleToInt32(OpParameter<float>(node)));
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- return jsgraph()->Int32Constant(
- DoubleToInt32(OpParameter<double>(node)));
- default:
- break;
- }
- // Select the correct X -> Word32 truncation operator.
- const Operator* op = NULL;
- if (output_type & kRepFloat64) {
- op = machine()->TruncateFloat64ToInt32();
- } else if (output_type & kRepFloat32) {
- node = InsertChangeFloat32ToFloat64(node);
- op = machine()->TruncateFloat64ToInt32();
- } else if (output_type & kRepTagged) {
- node = InsertChangeTaggedToFloat64(node);
- op = machine()->TruncateFloat64ToInt32();
- } else {
- return TypeError(node, output_type, kRepWord32);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
-
- Node* GetWord32RepresentationFor(Node* node, MachineTypeUnion output_type,
- bool use_unsigned) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kInt32Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return MakeInt32Constant(OpParameter<float>(node));
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- return MakeInt32Constant(OpParameter<double>(node));
- default:
- break;
- }
- // Select the correct X -> Word32 operator.
- const Operator* op = NULL;
- if (output_type & kRepFloat64) {
- if (output_type & kTypeUint32 || use_unsigned) {
- op = machine()->ChangeFloat64ToUint32();
- } else {
- op = machine()->ChangeFloat64ToInt32();
- }
- } else if (output_type & kRepFloat32) {
- node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
- if (output_type & kTypeUint32 || use_unsigned) {
- op = machine()->ChangeFloat64ToUint32();
- } else {
- op = machine()->ChangeFloat64ToInt32();
- }
- } else if (output_type & kRepTagged) {
- if (output_type & kTypeUint32 || use_unsigned) {
- op = simplified()->ChangeTaggedToUint32();
- } else {
- op = simplified()->ChangeTaggedToInt32();
- }
- } else {
- return TypeError(node, output_type, kRepWord32);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
-
- Node* GetBitRepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kInt32Constant: {
- int32_t value = OpParameter<int32_t>(node);
- if (value == 0 || value == 1) return node;
- return jsgraph()->Int32Constant(1); // value != 0
- }
- case IrOpcode::kNumberConstant: {
- double value = OpParameter<double>(node);
- if (std::isnan(value) || value == 0.0) {
- return jsgraph()->Int32Constant(0);
- }
- return jsgraph()->Int32Constant(1);
- }
- case IrOpcode::kHeapConstant: {
- Handle<Object> handle = OpParameter<Unique<Object> >(node).handle();
- DCHECK(*handle == isolate()->heap()->true_value() ||
- *handle == isolate()->heap()->false_value());
- return jsgraph()->Int32Constant(
- *handle == isolate()->heap()->true_value() ? 1 : 0);
- }
- default:
- break;
- }
- // Select the correct X -> Bit operator.
- const Operator* op;
- if (output_type & rWord) {
- return node; // No change necessary.
- } else if (output_type & kRepWord64) {
- return node; // TODO(titzer): No change necessary, on 64-bit.
- } else if (output_type & kRepTagged) {
- op = simplified()->ChangeBoolToBit();
- } else {
- return TypeError(node, output_type, kRepBit);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
-
- Node* GetWord64RepresentationFor(Node* node, MachineTypeUnion output_type) {
- if (output_type & kRepBit) {
- return node; // Sloppy comparison -> word64
- }
- // Can't really convert Word64 to anything else. Purported to be internal.
- return TypeError(node, output_type, kRepWord64);
- }
-
- const Operator* Int32OperatorFor(IrOpcode::Value opcode) {
- switch (opcode) {
- case IrOpcode::kNumberAdd:
- return machine()->Int32Add();
- case IrOpcode::kNumberSubtract:
- return machine()->Int32Sub();
- case IrOpcode::kNumberMultiply:
- return machine()->Int32Mul();
- case IrOpcode::kNumberDivide:
- return machine()->Int32Div();
- case IrOpcode::kNumberModulus:
- return machine()->Int32Mod();
- case IrOpcode::kNumberEqual:
- return machine()->Word32Equal();
- case IrOpcode::kNumberLessThan:
- return machine()->Int32LessThan();
- case IrOpcode::kNumberLessThanOrEqual:
- return machine()->Int32LessThanOrEqual();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-
- const Operator* Uint32OperatorFor(IrOpcode::Value opcode) {
- switch (opcode) {
- case IrOpcode::kNumberAdd:
- return machine()->Int32Add();
- case IrOpcode::kNumberSubtract:
- return machine()->Int32Sub();
- case IrOpcode::kNumberMultiply:
- return machine()->Int32Mul();
- case IrOpcode::kNumberDivide:
- return machine()->Uint32Div();
- case IrOpcode::kNumberModulus:
- return machine()->Uint32Mod();
- case IrOpcode::kNumberEqual:
- return machine()->Word32Equal();
- case IrOpcode::kNumberLessThan:
- return machine()->Uint32LessThan();
- case IrOpcode::kNumberLessThanOrEqual:
- return machine()->Uint32LessThanOrEqual();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-
- const Operator* Float64OperatorFor(IrOpcode::Value opcode) {
- switch (opcode) {
- case IrOpcode::kNumberAdd:
- return machine()->Float64Add();
- case IrOpcode::kNumberSubtract:
- return machine()->Float64Sub();
- case IrOpcode::kNumberMultiply:
- return machine()->Float64Mul();
- case IrOpcode::kNumberDivide:
- return machine()->Float64Div();
- case IrOpcode::kNumberModulus:
- return machine()->Float64Mod();
- case IrOpcode::kNumberEqual:
- return machine()->Float64Equal();
- case IrOpcode::kNumberLessThan:
- return machine()->Float64LessThan();
- case IrOpcode::kNumberLessThanOrEqual:
- return machine()->Float64LessThanOrEqual();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
+ // Changes representation from {output_type} to {use_rep}. The {truncation}
+ // parameter is only used for sanity checking - if the changer cannot figure
+ // out signedness for the word32->float64 conversion, then we check that the
+ // uses truncate to word32 (so they do not care about signedness).
+ Node* GetRepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type, MachineRepresentation use_rep,
+ Truncation truncation = Truncation::None());
+ const Operator* Int32OperatorFor(IrOpcode::Value opcode);
+ const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
+ const Operator* Float64OperatorFor(IrOpcode::Value opcode);
MachineType TypeForBasePointer(const FieldAccess& access) {
- return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
+ return access.tag() != 0 ? MachineType::AnyTagged()
+ : MachineType::Pointer();
}
MachineType TypeForBasePointer(const ElementAccess& access) {
- return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
- }
-
- MachineType TypeFromUpperBound(Type* type) {
- if (type->Is(Type::None()))
- return kTypeAny; // TODO(titzer): should be an error
- if (type->Is(Type::Signed32())) return kTypeInt32;
- if (type->Is(Type::Unsigned32())) return kTypeUint32;
- if (type->Is(Type::Number())) return kTypeNumber;
- if (type->Is(Type::Boolean())) return kTypeBool;
- return kTypeAny;
+ return access.tag() != 0 ? MachineType::AnyTagged()
+ : MachineType::Pointer();
}
private:
JSGraph* jsgraph_;
- SimplifiedOperatorBuilder* simplified_;
Isolate* isolate_;
friend class RepresentationChangerTester; // accesses the below fields.
@@ -435,38 +112,30 @@
bool testing_type_errors_; // If {true}, don't abort on a type error.
bool type_error_; // Set when a type error is detected.
- Node* TypeError(Node* node, MachineTypeUnion output_type,
- MachineTypeUnion use) {
- type_error_ = true;
- if (!testing_type_errors_) {
- std::ostringstream out_str;
- out_str << static_cast<MachineType>(output_type);
+ Node* GetTaggedRepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* GetFloat32RepresentationFor(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type, Truncation truncation);
+ Node* GetFloat64RepresentationFor(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type, Truncation truncation);
+ Node* GetWord32RepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* GetBitRepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* TypeError(Node* node, MachineRepresentation output_rep,
+ Type* output_type, MachineRepresentation use);
+ Node* MakeTruncatedInt32Constant(double value);
+ Node* InsertChangeFloat32ToFloat64(Node* node);
+ Node* InsertChangeTaggedToFloat64(Node* node);
- std::ostringstream use_str;
- use_str << static_cast<MachineType>(use);
-
- V8_Fatal(__FILE__, __LINE__,
- "RepresentationChangerError: node #%d:%s of "
- "%s cannot be changed to %s",
- node->id(), node->op()->mnemonic(), out_str.str().c_str(),
- use_str.str().c_str());
- }
- return node;
- }
-
- Node* InsertChangeFloat32ToFloat64(Node* node) {
- return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(),
- node);
- }
-
- Node* InsertChangeTaggedToFloat64(Node* node) {
- return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
- node);
- }
-
- JSGraph* jsgraph() { return jsgraph_; }
- Isolate* isolate() { return isolate_; }
- SimplifiedOperatorBuilder* simplified() { return simplified_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const { return isolate_; }
+ Factory* factory() const { return isolate()->factory(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
};
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index 30bfbc8..455fcd1 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/schedule.h"
+
#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/schedule.h"
#include "src/ostreams.h"
namespace v8 {
@@ -17,13 +17,13 @@
rpo_number_(-1),
deferred_(false),
dominator_depth_(-1),
- dominator_(NULL),
- rpo_next_(NULL),
- loop_header_(NULL),
- loop_end_(NULL),
+ dominator_(nullptr),
+ rpo_next_(nullptr),
+ loop_header_(nullptr),
+ loop_end_(nullptr),
loop_depth_(0),
control_(kNone),
- control_input_(NULL),
+ control_input_(nullptr),
nodes_(zone),
successors_(zone),
predecessors_(zone),
@@ -34,7 +34,7 @@
// RPO numbers must be initialized.
DCHECK(rpo_number_ >= 0);
DCHECK(block->rpo_number_ >= 0);
- if (loop_end_ == NULL) return false; // This is not a loop.
+ if (loop_end_ == nullptr) return false; // This is not a loop.
return block->rpo_number_ >= rpo_number_ &&
block->rpo_number_ < loop_end_->rpo_number_;
}
@@ -81,14 +81,35 @@
}
+// static
+BasicBlock* BasicBlock::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
+ while (b1 != b2) {
+ if (b1->dominator_depth() < b2->dominator_depth()) {
+ b2 = b2->dominator();
+ } else {
+ b1 = b1->dominator();
+ }
+ }
+ return b1;
+}
+
+
std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
switch (c) {
case BasicBlock::kNone:
return os << "none";
case BasicBlock::kGoto:
return os << "goto";
+ case BasicBlock::kCall:
+ return os << "call";
case BasicBlock::kBranch:
return os << "branch";
+ case BasicBlock::kSwitch:
+ return os << "switch";
+ case BasicBlock::kDeoptimize:
+ return os << "deoptimize";
+ case BasicBlock::kTailCall:
+ return os << "tailcall";
case BasicBlock::kReturn:
return os << "return";
case BasicBlock::kThrow:
@@ -104,11 +125,6 @@
}
-std::ostream& operator<<(std::ostream& os, const BasicBlock::RpoNumber& rpo) {
- return os << rpo.ToSize();
-}
-
-
Schedule::Schedule(Zone* zone, size_t node_count_hint)
: zone_(zone),
all_blocks_(zone),
@@ -124,14 +140,13 @@
if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
return nodeid_to_block_[node->id()];
}
- return NULL;
+ return nullptr;
}
bool Schedule::IsScheduled(Node* node) {
- int length = static_cast<int>(nodeid_to_block_.size());
- if (node->id() >= length) return false;
- return nodeid_to_block_[node->id()] != NULL;
+ if (node->id() >= nodeid_to_block_.size()) return false;
+ return nodeid_to_block_[node->id()] != nullptr;
}
@@ -143,7 +158,7 @@
bool Schedule::SameBasicBlock(Node* a, Node* b) const {
BasicBlock* block = this->block(a);
- return block != NULL && block == this->block(b);
+ return block != nullptr && block == this->block(b);
}
@@ -161,7 +176,7 @@
os << "Planning #" << node->id() << ":" << node->op()->mnemonic()
<< " for future add to B" << block->id() << "\n";
}
- DCHECK(this->block(node) == NULL);
+ DCHECK(this->block(node) == nullptr);
SetBlockForNode(block, node);
}
@@ -172,23 +187,34 @@
os << "Adding #" << node->id() << ":" << node->op()->mnemonic() << " to B"
<< block->id() << "\n";
}
- DCHECK(this->block(node) == NULL || this->block(node) == block);
+ DCHECK(this->block(node) == nullptr || this->block(node) == block);
block->AddNode(node);
SetBlockForNode(block, node);
}
void Schedule::AddGoto(BasicBlock* block, BasicBlock* succ) {
- DCHECK(block->control() == BasicBlock::kNone);
+ DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kGoto);
AddSuccessor(block, succ);
}
+void Schedule::AddCall(BasicBlock* block, Node* call, BasicBlock* success_block,
+ BasicBlock* exception_block) {
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ DCHECK_EQ(IrOpcode::kCall, call->opcode());
+ block->set_control(BasicBlock::kCall);
+ AddSuccessor(block, success_block);
+ AddSuccessor(block, exception_block);
+ SetControlInput(block, call);
+}
+
+
void Schedule::AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
BasicBlock* fblock) {
- DCHECK(block->control() == BasicBlock::kNone);
- DCHECK(branch->opcode() == IrOpcode::kBranch);
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
block->set_control(BasicBlock::kBranch);
AddSuccessor(block, tblock);
AddSuccessor(block, fblock);
@@ -196,16 +222,44 @@
}
+void Schedule::AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
+ size_t succ_count) {
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
+ block->set_control(BasicBlock::kSwitch);
+ for (size_t index = 0; index < succ_count; ++index) {
+ AddSuccessor(block, succ_blocks[index]);
+ }
+ SetControlInput(block, sw);
+}
+
+
+void Schedule::AddTailCall(BasicBlock* block, Node* input) {
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ block->set_control(BasicBlock::kTailCall);
+ SetControlInput(block, input);
+ if (block != end()) AddSuccessor(block, end());
+}
+
+
void Schedule::AddReturn(BasicBlock* block, Node* input) {
- DCHECK(block->control() == BasicBlock::kNone);
+ DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kReturn);
SetControlInput(block, input);
if (block != end()) AddSuccessor(block, end());
}
+void Schedule::AddDeoptimize(BasicBlock* block, Node* input) {
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ block->set_control(BasicBlock::kDeoptimize);
+ SetControlInput(block, input);
+ if (block != end()) AddSuccessor(block, end());
+}
+
+
void Schedule::AddThrow(BasicBlock* block, Node* input) {
- DCHECK(block->control() == BasicBlock::kNone);
+ DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kThrow);
SetControlInput(block, input);
if (block != end()) AddSuccessor(block, end());
@@ -214,20 +268,37 @@
void Schedule::InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
BasicBlock* tblock, BasicBlock* fblock) {
- DCHECK(block->control() != BasicBlock::kNone);
- DCHECK(end->control() == BasicBlock::kNone);
+ DCHECK_NE(BasicBlock::kNone, block->control());
+ DCHECK_EQ(BasicBlock::kNone, end->control());
end->set_control(block->control());
block->set_control(BasicBlock::kBranch);
MoveSuccessors(block, end);
AddSuccessor(block, tblock);
AddSuccessor(block, fblock);
- if (block->control_input() != NULL) {
+ if (block->control_input() != nullptr) {
SetControlInput(end, block->control_input());
}
SetControlInput(block, branch);
}
+void Schedule::InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
+ BasicBlock** succ_blocks, size_t succ_count) {
+ DCHECK_NE(BasicBlock::kNone, block->control());
+ DCHECK_EQ(BasicBlock::kNone, end->control());
+ end->set_control(block->control());
+ block->set_control(BasicBlock::kSwitch);
+ MoveSuccessors(block, end);
+ for (size_t index = 0; index < succ_count; ++index) {
+ AddSuccessor(block, succ_blocks[index]);
+ }
+ if (block->control_input() != nullptr) {
+ SetControlInput(end, block->control_input());
+ }
+ SetControlInput(block, sw);
+}
+
+
void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
block->AddSuccessor(succ);
succ->AddPredecessor(block);
@@ -235,13 +306,10 @@
void Schedule::MoveSuccessors(BasicBlock* from, BasicBlock* to) {
- for (BasicBlock::Predecessors::iterator i = from->successors_begin();
- i != from->successors_end(); ++i) {
- BasicBlock* succ = *i;
- to->AddSuccessor(succ);
- for (BasicBlock::Predecessors::iterator j = succ->predecessors_begin();
- j != succ->predecessors_end(); ++j) {
- if (*j == from) *j = to;
+ for (BasicBlock* const successor : from->successors()) {
+ to->AddSuccessor(successor);
+ for (BasicBlock*& predecessor : successor->predecessors()) {
+ if (predecessor == from) predecessor = to;
}
}
from->ClearSuccessors();
@@ -255,8 +323,7 @@
void Schedule::SetBlockForNode(BasicBlock* block, Node* node) {
- int length = static_cast<int>(nodeid_to_block_.size());
- if (node->id() >= length) {
+ if (node->id() >= nodeid_to_block_.size()) {
nodeid_to_block_.resize(node->id() + 1);
}
nodeid_to_block_[node->id()] = block;
@@ -264,51 +331,40 @@
std::ostream& operator<<(std::ostream& os, const Schedule& s) {
- // TODO(svenpanne) Const-correct the RPO stuff/iterators.
- BasicBlockVector* rpo = const_cast<Schedule*>(&s)->rpo_order();
- for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
- BasicBlock* block = *i;
- os << "--- BLOCK B" << block->id();
+ for (BasicBlock* block : *s.rpo_order()) {
+ os << "--- BLOCK B" << block->rpo_number();
if (block->deferred()) os << " (deferred)";
if (block->PredecessorCount() != 0) os << " <- ";
bool comma = false;
- for (BasicBlock::Predecessors::iterator j = block->predecessors_begin();
- j != block->predecessors_end(); ++j) {
+ for (BasicBlock const* predecessor : block->predecessors()) {
if (comma) os << ", ";
comma = true;
- os << "B" << (*j)->id();
+ os << "B" << predecessor->rpo_number();
}
os << " ---\n";
- for (BasicBlock::const_iterator j = block->begin(); j != block->end();
- ++j) {
- Node* node = *j;
+ for (Node* node : *block) {
os << " " << *node;
if (NodeProperties::IsTyped(node)) {
- Bounds bounds = NodeProperties::GetBounds(node);
+ Type* type = NodeProperties::GetType(node);
os << " : ";
- bounds.lower->PrintTo(os);
- if (!bounds.upper->Is(bounds.lower)) {
- os << "..";
- bounds.upper->PrintTo(os);
- }
+ type->PrintTo(os);
}
os << "\n";
}
BasicBlock::Control control = block->control();
if (control != BasicBlock::kNone) {
os << " ";
- if (block->control_input() != NULL) {
+ if (block->control_input() != nullptr) {
os << *block->control_input();
} else {
os << "Goto";
}
os << " -> ";
comma = false;
- for (BasicBlock::Successors::iterator j = block->successors_begin();
- j != block->successors_end(); ++j) {
+ for (BasicBlock const* successor : block->successors()) {
if (comma) os << ", ";
comma = true;
- os << "B" << (*j)->id();
+ os << "B" << successor->rpo_number();
}
os << "\n";
}
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index 0bba689..9624ff5 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -6,36 +6,40 @@
#define V8_COMPILER_SCHEDULE_H_
#include <iosfwd>
-#include <vector>
-#include "src/v8.h"
-
-#include "src/compiler/node.h"
-#include "src/compiler/opcodes.h"
-#include "src/zone.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
class BasicBlock;
class BasicBlockInstrumentor;
-class Graph;
-class ConstructScheduleData;
-class CodeGenerator; // Because of a namespace bug in clang.
+class Node;
+
+
+typedef ZoneVector<BasicBlock*> BasicBlockVector;
+typedef ZoneVector<Node*> NodeVector;
+
// A basic block contains an ordered list of nodes and ends with a control
// node. Note that if a basic block has phis, then all phis must appear as the
// first nodes in the block.
-class BasicBlock FINAL : public ZoneObject {
+class BasicBlock final : public ZoneObject {
public:
// Possible control nodes that can end a block.
enum Control {
- kNone, // Control not initialized yet.
- kGoto, // Goto a single successor block.
- kBranch, // Branch if true to first successor, otherwise second.
- kReturn, // Return a value from this method.
- kThrow // Throw an exception.
+ kNone, // Control not initialized yet.
+ kGoto, // Goto a single successor block.
+ kCall, // Call with continuation as first successor, exception
+ // second.
+ kBranch, // Branch if true to first successor, otherwise second.
+ kSwitch, // Table dispatch to one of the successor blocks.
+ kDeoptimize, // Return a value from this method.
+ kTailCall, // Tail call another method from this method.
+ kReturn, // Return a value from this method.
+ kThrow // Throw an exception.
};
class Id {
@@ -50,72 +54,36 @@
size_t index_;
};
- static const int kInvalidRpoNumber = -1;
- class RpoNumber FINAL {
- public:
- int ToInt() const {
- DCHECK(IsValid());
- return index_;
- }
- size_t ToSize() const {
- DCHECK(IsValid());
- return static_cast<size_t>(index_);
- }
- bool IsValid() const { return index_ >= 0; }
- static RpoNumber FromInt(int index) { return RpoNumber(index); }
- static RpoNumber Invalid() { return RpoNumber(kInvalidRpoNumber); }
-
- bool IsNext(const RpoNumber other) const {
- DCHECK(IsValid());
- return other.index_ == this->index_ + 1;
- }
-
- bool operator==(RpoNumber other) const {
- return this->index_ == other.index_;
- }
-
- private:
- explicit RpoNumber(int32_t index) : index_(index) {}
- int32_t index_;
- };
-
BasicBlock(Zone* zone, Id id);
Id id() const { return id_; }
- // Predecessors and successors.
- typedef ZoneVector<BasicBlock*> Predecessors;
- Predecessors::iterator predecessors_begin() { return predecessors_.begin(); }
- Predecessors::iterator predecessors_end() { return predecessors_.end(); }
- Predecessors::const_iterator predecessors_begin() const {
- return predecessors_.begin();
- }
- Predecessors::const_iterator predecessors_end() const {
- return predecessors_.end();
- }
+ // Predecessors.
+ BasicBlockVector& predecessors() { return predecessors_; }
+ const BasicBlockVector& predecessors() const { return predecessors_; }
size_t PredecessorCount() const { return predecessors_.size(); }
BasicBlock* PredecessorAt(size_t index) { return predecessors_[index]; }
void ClearPredecessors() { predecessors_.clear(); }
void AddPredecessor(BasicBlock* predecessor);
- typedef ZoneVector<BasicBlock*> Successors;
- Successors::iterator successors_begin() { return successors_.begin(); }
- Successors::iterator successors_end() { return successors_.end(); }
- Successors::const_iterator successors_begin() const {
- return successors_.begin();
- }
- Successors::const_iterator successors_end() const {
- return successors_.end();
- }
+ // Successors.
+ BasicBlockVector& successors() { return successors_; }
+ const BasicBlockVector& successors() const { return successors_; }
size_t SuccessorCount() const { return successors_.size(); }
BasicBlock* SuccessorAt(size_t index) { return successors_[index]; }
void ClearSuccessors() { successors_.clear(); }
void AddSuccessor(BasicBlock* successor);
// Nodes in the basic block.
+ typedef Node* value_type;
+ bool empty() const { return nodes_.empty(); }
+ size_t size() const { return nodes_.size(); }
Node* NodeAt(size_t index) { return nodes_[index]; }
size_t NodeCount() const { return nodes_.size(); }
+ value_type& front() { return nodes_.front(); }
+ value_type const& front() const { return nodes_.front(); }
+
typedef NodeVector::iterator iterator;
iterator begin() { return nodes_.begin(); }
iterator end() { return nodes_.end(); }
@@ -166,14 +134,17 @@
int32_t loop_number() const { return loop_number_; }
void set_loop_number(int32_t loop_number) { loop_number_ = loop_number; }
- RpoNumber GetRpoNumber() const { return RpoNumber::FromInt(rpo_number_); }
int32_t rpo_number() const { return rpo_number_; }
void set_rpo_number(int32_t rpo_number);
// Loop membership helpers.
- inline bool IsLoopHeader() const { return loop_end_ != NULL; }
+ inline bool IsLoopHeader() const { return loop_end_ != nullptr; }
bool LoopContains(BasicBlock* block) const;
+ // Computes the immediate common dominator of {b1} and {b2}. The worst time
+ // complexity is O(N) where N is the height of the dominator tree.
+ static BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
+
private:
int32_t loop_number_; // loop number of the block.
int32_t rpo_number_; // special RPO number of the block.
@@ -182,8 +153,8 @@
BasicBlock* dominator_; // Immediate dominator of the block.
BasicBlock* rpo_next_; // Link to next block in special RPO order.
BasicBlock* loop_header_; // Pointer to dominating loop header basic block,
- // NULL if none. For loop headers, this points to
- // enclosing loop header.
+ // nullptr if none. For loop headers, this points to
+ // enclosing loop header.
BasicBlock* loop_end_; // end of the loop, if this block is a loop header.
int32_t loop_depth_; // loop nesting, 0 is top-level
@@ -191,26 +162,22 @@
Node* control_input_; // Input value for control.
NodeVector nodes_; // nodes of this block in forward order.
- Successors successors_;
- Predecessors predecessors_;
+ BasicBlockVector successors_;
+ BasicBlockVector predecessors_;
Id id_;
DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
-std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c);
-std::ostream& operator<<(std::ostream& os, const BasicBlock::Id& id);
-std::ostream& operator<<(std::ostream& os, const BasicBlock::RpoNumber& rpo);
+std::ostream& operator<<(std::ostream&, const BasicBlock::Control&);
+std::ostream& operator<<(std::ostream&, const BasicBlock::Id&);
-typedef ZoneVector<BasicBlock*> BasicBlockVector;
-typedef BasicBlockVector::iterator BasicBlockVectorIter;
-typedef BasicBlockVector::reverse_iterator BasicBlockVectorRIter;
// A schedule represents the result of assigning nodes to basic blocks
// and ordering them within basic blocks. Prior to computing a schedule,
// a graph has no notion of control flow ordering other than that induced
// by the graph's dependencies. A schedule is required to generate code.
-class Schedule FINAL : public ZoneObject {
+class Schedule final : public ZoneObject {
public:
explicit Schedule(Zone* zone, size_t node_count_hint = 0);
@@ -239,10 +206,24 @@
// BasicBlock building: add a goto to the end of {block}.
void AddGoto(BasicBlock* block, BasicBlock* succ);
+ // BasicBlock building: add a call at the end of {block}.
+ void AddCall(BasicBlock* block, Node* call, BasicBlock* success_block,
+ BasicBlock* exception_block);
+
// BasicBlock building: add a branch at the end of {block}.
void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
BasicBlock* fblock);
+ // BasicBlock building: add a switch at the end of {block}.
+ void AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
+ size_t succ_count);
+
+ // BasicBlock building: add a deoptimize at the end of {block}.
+ void AddDeoptimize(BasicBlock* block, Node* input);
+
+ // BasicBlock building: add a tailcall at the end of {block}.
+ void AddTailCall(BasicBlock* block, Node* input);
+
// BasicBlock building: add a return at the end of {block}.
void AddReturn(BasicBlock* block, Node* input);
@@ -253,6 +234,10 @@
void InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
BasicBlock* tblock, BasicBlock* fblock);
+ // BasicBlock mutation: insert a switch into the end of {block}.
+ void InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
+ BasicBlock** succ_blocks, size_t succ_count);
+
// Exposed publicly for testing only.
void AddSuccessorForTesting(BasicBlock* block, BasicBlock* succ) {
return AddSuccessor(block, succ);
@@ -286,7 +271,7 @@
DISALLOW_COPY_AND_ASSIGN(Schedule);
};
-std::ostream& operator<<(std::ostream& os, const Schedule& s);
+std::ostream& operator<<(std::ostream&, const Schedule&);
} // namespace compiler
} // namespace internal
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index f12c631..80ce8b1 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -2,47 +2,44 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <deque>
-#include <queue>
-
#include "src/compiler/scheduler.h"
+#include <iomanip>
+
+#include "src/base/adapters.h"
#include "src/bit-vector.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/control-equivalence.h"
#include "src/compiler/graph.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/node.h"
+#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
-static inline void Trace(const char* msg, ...) {
- if (FLAG_trace_turbo_scheduler) {
- va_list arguments;
- va_start(arguments, msg);
- base::OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
+ } while (false)
-
-Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
: zone_(zone),
graph_(graph),
schedule_(schedule),
+ flags_(flags),
scheduled_nodes_(zone),
schedule_root_nodes_(zone),
schedule_queue_(zone),
node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
-Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph) {
+Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
Schedule* schedule = new (graph->zone())
Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
- Scheduler scheduler(zone, graph, schedule);
+ Scheduler scheduler(zone, graph, schedule, flags);
scheduler.BuildCFG();
scheduler.ComputeSpecialRPONumbering();
@@ -65,7 +62,6 @@
Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
- DCHECK(node->id() < static_cast<int>(node_data_.size()));
return &node_data_[node->id()];
}
@@ -75,7 +71,8 @@
if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
switch (node->opcode()) {
case IrOpcode::kParameter:
- // Parameters are always fixed to the start node.
+ case IrOpcode::kOsrValue:
+ // Parameters and OSR values are always fixed to the start block.
data->placement_ = kFixed;
break;
case IrOpcode::kPhi:
@@ -126,11 +123,10 @@
#undef DEFINE_CONTROL_CASE
{
// Control nodes force coupled uses to be placed.
- Node::Uses uses = node->uses();
- for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
- if (GetPlacement(*i) == Scheduler::kCoupled) {
- DCHECK_EQ(node, NodeProperties::GetControlInput(*i));
- UpdatePlacement(*i, placement);
+ for (auto use : node->uses()) {
+ if (GetPlacement(use) == Scheduler::kCoupled) {
+ DCHECK_EQ(node, NodeProperties::GetControlInput(use));
+ UpdatePlacement(use, placement);
}
}
break;
@@ -173,7 +169,7 @@
++(GetData(node)->unscheduled_count_);
if (FLAG_trace_turbo_scheduler) {
- Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
+ TRACE(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
GetData(node)->unscheduled_count_);
}
@@ -197,31 +193,17 @@
DCHECK(GetData(node)->unscheduled_count_ > 0);
--(GetData(node)->unscheduled_count_);
if (FLAG_trace_turbo_scheduler) {
- Trace(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
+ TRACE(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
GetData(node)->unscheduled_count_);
}
if (GetData(node)->unscheduled_count_ == 0) {
- Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
+ TRACE(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
schedule_queue_.push(node);
}
}
-BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
- while (b1 != b2) {
- int32_t b1_depth = b1->dominator_depth();
- int32_t b2_depth = b2->dominator_depth();
- if (b1_depth < b2_depth) {
- b2 = b2->dominator();
- } else {
- b1 = b1->dominator();
- }
- }
- return b1;
-}
-
-
// -----------------------------------------------------------------------------
// Phase 1: Build control-flow graph.
@@ -233,14 +215,15 @@
class CFGBuilder : public ZoneObject {
public:
CFGBuilder(Zone* zone, Scheduler* scheduler)
- : scheduler_(scheduler),
+ : zone_(zone),
+ scheduler_(scheduler),
schedule_(scheduler->schedule_),
queued_(scheduler->graph_, 2),
queue_(zone),
control_(zone),
- component_entry_(NULL),
- component_start_(NULL),
- component_end_(NULL) {}
+ component_entry_(nullptr),
+ component_start_(nullptr),
+ component_end_(nullptr) {}
// Run the control flow graph construction algorithm by walking the graph
// backwards from end through control edges, building and connecting the
@@ -270,7 +253,7 @@
ResetDataStructures();
Queue(exit);
- component_entry_ = NULL;
+ component_entry_ = nullptr;
component_start_ = block;
component_end_ = schedule_->block(exit);
scheduler_->equivalence_->Run(exit);
@@ -281,8 +264,8 @@
// Use control dependence equivalence to find a canonical single-entry
// single-exit region that makes up a minimal component to be scheduled.
if (IsSingleEntrySingleExitRegion(node, exit)) {
- Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
- DCHECK_EQ(NULL, component_entry_);
+ TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
+ DCHECK(!component_entry_);
component_entry_ = node;
continue;
}
@@ -292,7 +275,7 @@
Queue(node->InputAt(i));
}
}
- DCHECK_NE(NULL, component_entry_);
+ DCHECK(component_entry_);
for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
ConnectBlocks(*i); // Connect block to its predecessor/successors.
@@ -300,7 +283,7 @@
}
private:
- // TODO(mstarzinger): Only for Scheduler::FuseFloatingControl.
+ friend class ScheduleLateNodeVisitor;
friend class Scheduler;
void FixNode(BasicBlock* block, Node* node) {
@@ -338,7 +321,13 @@
break;
}
case IrOpcode::kBranch:
- BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
+ case IrOpcode::kSwitch:
+ BuildBlocksForSuccessors(node);
+ break;
+ case IrOpcode::kCall:
+ if (NodeProperties::IsExceptionalCall(node)) {
+ BuildBlocksForSuccessors(node);
+ }
break;
default:
break;
@@ -355,10 +344,32 @@
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectBranch(node);
break;
+ case IrOpcode::kSwitch:
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectSwitch(node);
+ break;
+ case IrOpcode::kDeoptimize:
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectDeoptimize(node);
+ break;
+ case IrOpcode::kTailCall:
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectTailCall(node);
+ break;
case IrOpcode::kReturn:
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectReturn(node);
break;
+ case IrOpcode::kThrow:
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectThrow(node);
+ break;
+ case IrOpcode::kCall:
+ if (NodeProperties::IsExceptionalCall(node)) {
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectCall(node);
+ }
+ break;
default:
break;
}
@@ -366,58 +377,62 @@
BasicBlock* BuildBlockForNode(Node* node) {
BasicBlock* block = schedule_->block(node);
- if (block == NULL) {
+ if (block == nullptr) {
block = schedule_->NewBasicBlock();
- Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(),
+ TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
node->op()->mnemonic());
FixNode(block, node);
}
return block;
}
- void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
- IrOpcode::Value b) {
- Node* successors[2];
- CollectSuccessorProjections(node, successors, a, b);
- BuildBlockForNode(successors[0]);
- BuildBlockForNode(successors[1]);
- }
-
- // Collect the branch-related projections from a node, such as IfTrue,
- // IfFalse.
- // TODO(titzer): consider moving this to node.h
- void CollectSuccessorProjections(Node* node, Node** buffer,
- IrOpcode::Value true_opcode,
- IrOpcode::Value false_opcode) {
- buffer[0] = NULL;
- buffer[1] = NULL;
- for (Node* use : node->uses()) {
- if (use->opcode() == true_opcode) {
- DCHECK_EQ(NULL, buffer[0]);
- buffer[0] = use;
- }
- if (use->opcode() == false_opcode) {
- DCHECK_EQ(NULL, buffer[1]);
- buffer[1] = use;
- }
+ void BuildBlocksForSuccessors(Node* node) {
+ size_t const successor_cnt = node->op()->ControlOutputCount();
+ Node** successors = zone_->NewArray<Node*>(successor_cnt);
+ NodeProperties::CollectControlProjections(node, successors, successor_cnt);
+ for (size_t index = 0; index < successor_cnt; ++index) {
+ BuildBlockForNode(successors[index]);
}
- DCHECK_NE(NULL, buffer[0]);
- DCHECK_NE(NULL, buffer[1]);
}
- void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
- IrOpcode::Value true_opcode,
- IrOpcode::Value false_opcode) {
- Node* successors[2];
- CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
- buffer[0] = schedule_->block(successors[0]);
- buffer[1] = schedule_->block(successors[1]);
+ void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
+ size_t successor_cnt) {
+ Node** successors = reinterpret_cast<Node**>(successor_blocks);
+ NodeProperties::CollectControlProjections(node, successors, successor_cnt);
+ for (size_t index = 0; index < successor_cnt; ++index) {
+ successor_blocks[index] = schedule_->block(successors[index]);
+ }
+ }
+
+ BasicBlock* FindPredecessorBlock(Node* node) {
+ BasicBlock* predecessor_block = nullptr;
+ while (true) {
+ predecessor_block = schedule_->block(node);
+ if (predecessor_block != nullptr) break;
+ node = NodeProperties::GetControlInput(node);
+ }
+ return predecessor_block;
+ }
+
+ void ConnectCall(Node* call) {
+ BasicBlock* successor_blocks[2];
+ CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
+
+ // Consider the exception continuation to be deferred.
+ successor_blocks[1]->set_deferred(true);
+
+ Node* call_control = NodeProperties::GetControlInput(call);
+ BasicBlock* call_block = FindPredecessorBlock(call_control);
+ TraceConnect(call, call_block, successor_blocks[0]);
+ TraceConnect(call, call_block, successor_blocks[1]);
+ schedule_->AddCall(call_block, call, successor_blocks[0],
+ successor_blocks[1]);
}
void ConnectBranch(Node* branch) {
BasicBlock* successor_blocks[2];
- CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
- IrOpcode::kIfFalse);
+ CollectSuccessorBlocks(branch, successor_blocks,
+ arraysize(successor_blocks));
// Consider branch hints.
switch (BranchHintOf(branch->op())) {
@@ -437,10 +452,8 @@
schedule_->InsertBranch(component_start_, component_end_, branch,
successor_blocks[0], successor_blocks[1]);
} else {
- Node* branch_block_node = NodeProperties::GetControlInput(branch);
- BasicBlock* branch_block = schedule_->block(branch_block_node);
- DCHECK(branch_block != NULL);
-
+ Node* branch_control = NodeProperties::GetControlInput(branch);
+ BasicBlock* branch_block = FindPredecessorBlock(branch_control);
TraceConnect(branch, branch_block, successor_blocks[0]);
TraceConnect(branch, branch_block, successor_blocks[1]);
schedule_->AddBranch(branch_block, branch, successor_blocks[0],
@@ -448,36 +461,79 @@
}
}
+ void ConnectSwitch(Node* sw) {
+ size_t const successor_count = sw->op()->ControlOutputCount();
+ BasicBlock** successor_blocks =
+ zone_->NewArray<BasicBlock*>(successor_count);
+ CollectSuccessorBlocks(sw, successor_blocks, successor_count);
+
+ if (sw == component_entry_) {
+ for (size_t index = 0; index < successor_count; ++index) {
+ TraceConnect(sw, component_start_, successor_blocks[index]);
+ }
+ schedule_->InsertSwitch(component_start_, component_end_, sw,
+ successor_blocks, successor_count);
+ } else {
+ Node* switch_control = NodeProperties::GetControlInput(sw);
+ BasicBlock* switch_block = FindPredecessorBlock(switch_control);
+ for (size_t index = 0; index < successor_count; ++index) {
+ TraceConnect(sw, switch_block, successor_blocks[index]);
+ }
+ schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
+ }
+ }
+
void ConnectMerge(Node* merge) {
// Don't connect the special merge at the end to its predecessors.
if (IsFinalMerge(merge)) return;
BasicBlock* block = schedule_->block(merge);
- DCHECK(block != NULL);
+ DCHECK_NOT_NULL(block);
// For all of the merge's control inputs, add a goto at the end to the
// merge's basic block.
for (Node* const input : merge->inputs()) {
- BasicBlock* predecessor_block = schedule_->block(input);
+ BasicBlock* predecessor_block = FindPredecessorBlock(input);
TraceConnect(merge, predecessor_block, block);
schedule_->AddGoto(predecessor_block, block);
}
}
+ void ConnectTailCall(Node* call) {
+ Node* call_control = NodeProperties::GetControlInput(call);
+ BasicBlock* call_block = FindPredecessorBlock(call_control);
+ TraceConnect(call, call_block, nullptr);
+ schedule_->AddTailCall(call_block, call);
+ }
+
void ConnectReturn(Node* ret) {
- Node* return_block_node = NodeProperties::GetControlInput(ret);
- BasicBlock* return_block = schedule_->block(return_block_node);
- TraceConnect(ret, return_block, NULL);
+ Node* return_control = NodeProperties::GetControlInput(ret);
+ BasicBlock* return_block = FindPredecessorBlock(return_control);
+ TraceConnect(ret, return_block, nullptr);
schedule_->AddReturn(return_block, ret);
}
+ void ConnectDeoptimize(Node* deopt) {
+ Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
+ BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
+ TraceConnect(deopt, deoptimize_block, nullptr);
+ schedule_->AddDeoptimize(deoptimize_block, deopt);
+ }
+
+ void ConnectThrow(Node* thr) {
+ Node* throw_control = NodeProperties::GetControlInput(thr);
+ BasicBlock* throw_block = FindPredecessorBlock(throw_control);
+ TraceConnect(thr, throw_block, nullptr);
+ schedule_->AddThrow(throw_block, thr);
+ }
+
void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
- DCHECK_NE(NULL, block);
- if (succ == NULL) {
- Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
- block->id().ToInt());
+ DCHECK_NOT_NULL(block);
+ if (succ == nullptr) {
+ TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
+ node->op()->mnemonic(), block->id().ToInt());
} else {
- Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
- block->id().ToInt(), succ->id().ToInt());
+ TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
+ node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
}
}
@@ -498,6 +554,7 @@
DCHECK(control_.empty());
}
+ Zone* zone_;
Scheduler* scheduler_;
Schedule* schedule_;
NodeMarker<bool> queued_; // Mark indicating whether node is queued.
@@ -510,7 +567,7 @@
void Scheduler::BuildCFG() {
- Trace("--- CREATING CFG -------------------------------------------\n");
+ TRACE("--- CREATING CFG -------------------------------------------\n");
// Instantiate a new control equivalence algorithm for the graph.
equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
@@ -545,18 +602,19 @@
SpecialRPONumberer(Zone* zone, Schedule* schedule)
: zone_(zone),
schedule_(schedule),
- order_(NULL),
- beyond_end_(NULL),
+ order_(nullptr),
+ beyond_end_(nullptr),
loops_(zone),
backedges_(zone),
stack_(zone),
- previous_block_count_(0) {}
+ previous_block_count_(0),
+ empty_(0, zone) {}
// Computes the special reverse-post-order for the main control flow graph,
// that is for the graph spanned between the schedule's start and end blocks.
void ComputeSpecialRPO() {
DCHECK(schedule_->end()->SuccessorCount() == 0);
- DCHECK_EQ(NULL, order_); // Main order does not exist yet.
+ DCHECK(!order_); // Main order does not exist yet.
ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
}
@@ -564,7 +622,7 @@
// that is for the graph spanned between the given {entry} and {end} blocks,
// then updates the existing ordering with this new information.
void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
- DCHECK_NE(NULL, order_); // Main order to be updated is present.
+ DCHECK(order_); // Main order to be updated is present.
ComputeAndInsertSpecialRPO(entry, end);
}
@@ -572,7 +630,7 @@
// numbering for basic blocks into the final schedule.
void SerializeRPOIntoSchedule() {
int32_t number = 0;
- for (BasicBlock* b = order_; b != NULL; b = b->rpo_next()) {
+ for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
b->set_rpo_number(number++);
schedule_->rpo_order()->push_back(b);
}
@@ -587,6 +645,14 @@
#endif
}
+ const ZoneVector<BasicBlock*>& GetOutgoingBlocks(BasicBlock* block) {
+ if (HasLoopNumber(block)) {
+ LoopInfo const& loop = loops_[GetLoopNumber(block)];
+ if (loop.outgoing) return *loop.outgoing;
+ }
+ return empty_;
+ }
+
private:
typedef std::pair<BasicBlock*, size_t> Backedge;
@@ -604,17 +670,18 @@
struct LoopInfo {
BasicBlock* header;
- ZoneList<BasicBlock*>* outgoing;
+ ZoneVector<BasicBlock*>* outgoing;
BitVector* members;
LoopInfo* prev;
BasicBlock* end;
BasicBlock* start;
void AddOutgoing(Zone* zone, BasicBlock* block) {
- if (outgoing == NULL) {
- outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
+ if (outgoing == nullptr) {
+ outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
+ ZoneVector<BasicBlock*>(zone);
}
- outgoing->Add(block, zone);
+ outgoing->push_back(block);
}
};
@@ -646,7 +713,7 @@
// use the schedule's end block in actual control flow (e.g. with end having
// successors). Once this has been cleaned up we can use the end block here.
BasicBlock* BeyondEndSentinel() {
- if (beyond_end_ == NULL) {
+ if (beyond_end_ == nullptr) {
BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
}
@@ -710,7 +777,7 @@
// Initialize the "loop stack". Note the entry could be a loop header.
LoopInfo* loop =
- HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
+ HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
order = insertion_point;
// Perform an iterative post-order traversal, visiting loop bodies before
@@ -721,7 +788,7 @@
while (stack_depth > 0) {
SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
BasicBlock* block = frame->block;
- BasicBlock* succ = NULL;
+ BasicBlock* succ = nullptr;
if (block != end && frame->index < block->SuccessorCount()) {
// Process the next normal successor.
@@ -731,7 +798,7 @@
if (block->rpo_number() == kBlockOnStack) {
// Finish the loop body the first time the header is left on the
// stack.
- DCHECK(loop != NULL && loop->header == block);
+ DCHECK(loop != nullptr && loop->header == block);
loop->start = PushFront(order, block);
order = loop->end;
block->set_rpo_number(kBlockVisited2);
@@ -743,23 +810,22 @@
}
// Use the next outgoing edge if there are any.
- int outgoing_index =
- static_cast<int>(frame->index - block->SuccessorCount());
+ size_t outgoing_index = frame->index - block->SuccessorCount();
LoopInfo* info = &loops_[GetLoopNumber(block)];
DCHECK(loop != info);
- if (block != entry && info->outgoing != NULL &&
- outgoing_index < info->outgoing->length()) {
+ if (block != entry && info->outgoing != nullptr &&
+ outgoing_index < info->outgoing->size()) {
succ = info->outgoing->at(outgoing_index);
frame->index++;
}
}
- if (succ != NULL) {
+ if (succ != nullptr) {
// Process the next successor.
if (succ->rpo_number() == kBlockOnStack) continue;
if (succ->rpo_number() == kBlockVisited2) continue;
DCHECK(succ->rpo_number() == kBlockUnvisited2);
- if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) {
+ if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
// The successor is not in the current loop or any nested loop.
// Add it to the outgoing edges of this loop and visit it later.
loop->AddOutgoing(zone_, succ);
@@ -799,10 +865,10 @@
}
// Publish new order the first time.
- if (order_ == NULL) order_ = order;
+ if (order_ == nullptr) order_ = order;
// Compute the correct loop headers and set the correct loop ends.
- LoopInfo* current_loop = NULL;
+ LoopInfo* current_loop = nullptr;
BasicBlock* current_header = entry->loop_header();
int32_t loop_depth = entry->loop_depth();
if (entry->IsLoopHeader()) --loop_depth; // Entry might be a loop header.
@@ -813,11 +879,13 @@
current->set_rpo_number(kBlockUnvisited1);
// Finish the previous loop(s) if we just exited them.
- while (current_header != NULL && current == current_header->loop_end()) {
+ while (current_header != nullptr &&
+ current == current_header->loop_end()) {
DCHECK(current_header->IsLoopHeader());
- DCHECK(current_loop != NULL);
+ DCHECK_NOT_NULL(current_loop);
current_loop = current_loop->prev;
- current_header = current_loop == NULL ? NULL : current_loop->header;
+ current_header =
+ current_loop == nullptr ? nullptr : current_loop->header;
--loop_depth;
}
current->set_loop_header(current_header);
@@ -827,20 +895,21 @@
++loop_depth;
current_loop = &loops_[GetLoopNumber(current)];
BasicBlock* end = current_loop->end;
- current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
+ current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
current_header = current_loop->header;
- Trace("B%d is a loop header, increment loop depth to %d\n",
+ TRACE("id:%d is a loop header, increment loop depth to %d\n",
current->id().ToInt(), loop_depth);
}
current->set_loop_depth(loop_depth);
- if (current->loop_header() == NULL) {
- Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(),
+ if (current->loop_header() == nullptr) {
+ TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
current->loop_depth());
} else {
- Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(),
- current->loop_header()->id().ToInt(), current->loop_depth());
+ TRACE("id:%d has loop header id:%d, (depth == %d)\n",
+ current->id().ToInt(), current->loop_header()->id().ToInt(),
+ current->loop_depth());
}
}
}
@@ -865,7 +934,7 @@
BasicBlock* member = backedges->at(i).first;
BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
size_t loop_num = GetLoopNumber(header);
- if (loops_[loop_num].header == NULL) {
+ if (loops_[loop_num].header == nullptr) {
loops_[loop_num].header = header;
loops_[loop_num].members = new (zone_)
BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
@@ -906,30 +975,28 @@
os << " (";
for (size_t i = 0; i < loops_.size(); i++) {
if (i > 0) os << " ";
- os << "B" << loops_[i].header->id();
+ os << "id:" << loops_[i].header->id();
}
os << ")";
}
os << ":\n";
- for (BasicBlock* block = order_; block != NULL; block = block->rpo_next()) {
- BasicBlock::Id bid = block->id();
- // TODO(jarin,svenpanne): Add formatting here once we have support for
- // that in streams (we want an equivalent of PrintF("%5d:", x) here).
- os << " " << block->rpo_number() << ":";
+ for (BasicBlock* block = order_; block != nullptr;
+ block = block->rpo_next()) {
+ os << std::setw(5) << "B" << block->rpo_number() << ":";
for (size_t i = 0; i < loops_.size(); i++) {
bool range = loops_[i].header->LoopContains(block);
bool membership = loops_[i].header != block && range;
os << (membership ? " |" : " ");
os << (range ? "x" : " ");
}
- os << " B" << bid << ": ";
- if (block->loop_end() != NULL) {
- os << " range: [" << block->rpo_number() << ", "
+ os << " id:" << block->id() << ": ";
+ if (block->loop_end() != nullptr) {
+ os << " range: [B" << block->rpo_number() << ", B"
<< block->loop_end()->rpo_number() << ")";
}
- if (block->loop_header() != NULL) {
- os << " header: B" << block->loop_header()->id();
+ if (block->loop_header() != nullptr) {
+ os << " header: id:" << block->loop_header()->id();
}
if (block->loop_depth() > 0) {
os << " depth: " << block->loop_depth();
@@ -948,10 +1015,10 @@
BasicBlock* header = loop->header;
BasicBlock* end = header->loop_end();
- DCHECK(header != NULL);
+ DCHECK_NOT_NULL(header);
DCHECK(header->rpo_number() >= 0);
DCHECK(header->rpo_number() < static_cast<int>(order->size()));
- DCHECK(end != NULL);
+ DCHECK_NOT_NULL(end);
DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
DCHECK(end->rpo_number() > header->rpo_number());
DCHECK(header->loop_header() != header);
@@ -962,7 +1029,7 @@
DCHECK_EQ(header, block);
bool end_found;
while (true) {
- if (block == NULL || block == loop->end) {
+ if (block == nullptr || block == loop->end) {
end_found = (loop->end == block);
break;
}
@@ -970,7 +1037,7 @@
DCHECK(block->rpo_number() == links + header->rpo_number());
links++;
block = block->rpo_next();
- DCHECK(links < static_cast<int>(2 * order->size())); // cycle?
+ DCHECK_LT(links, static_cast<int>(2 * order->size())); // cycle?
}
DCHECK(links > 0);
DCHECK(links == end->rpo_number() - header->rpo_number());
@@ -978,7 +1045,7 @@
// Check loop depth of the header.
int loop_depth = 0;
- for (LoopInfo* outer = loop; outer != NULL; outer = outer->prev) {
+ for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
loop_depth++;
}
DCHECK_EQ(loop_depth, header->loop_depth());
@@ -1009,6 +1076,7 @@
ZoneVector<Backedge> backedges_;
ZoneVector<SpecialRPOStackFrame> stack_;
size_t previous_block_count_;
+ ZoneVector<BasicBlock*> const empty_;
};
@@ -1022,7 +1090,7 @@
void Scheduler::ComputeSpecialRPONumbering() {
- Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n");
+ TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
// Compute the special reverse-post-order for basic blocks.
special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
@@ -1031,31 +1099,32 @@
void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
- for (/*nop*/; block != NULL; block = block->rpo_next()) {
- BasicBlock::Predecessors::iterator pred = block->predecessors_begin();
- BasicBlock::Predecessors::iterator end = block->predecessors_end();
+ for (/*nop*/; block != nullptr; block = block->rpo_next()) {
+ auto pred = block->predecessors().begin();
+ auto end = block->predecessors().end();
DCHECK(pred != end); // All blocks except start have predecessors.
BasicBlock* dominator = *pred;
+ bool deferred = dominator->deferred();
// For multiple predecessors, walk up the dominator tree until a common
// dominator is found. Visitation order guarantees that all predecessors
// except for backwards edges have been visited.
for (++pred; pred != end; ++pred) {
// Don't examine backwards edges.
if ((*pred)->dominator_depth() < 0) continue;
- dominator = GetCommonDominator(dominator, *pred);
+ dominator = BasicBlock::GetCommonDominator(dominator, *pred);
+ deferred = deferred & (*pred)->deferred();
}
block->set_dominator(dominator);
block->set_dominator_depth(dominator->dominator_depth() + 1);
- // Propagate "deferredness" of the dominator.
- if (dominator->deferred()) block->set_deferred(true);
- Trace("Block B%d's idom is B%d, depth = %d\n", block->id().ToInt(),
+ block->set_deferred(deferred | block->deferred());
+ TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
dominator->id().ToInt(), block->dominator_depth());
}
}
void Scheduler::GenerateImmediateDominatorTree() {
- Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
+ TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
// Seed start block to be the first dominator.
schedule_->start()->set_dominator_depth(0);
@@ -1069,7 +1138,7 @@
// Phase 3: Prepare use counts for nodes.
-class PrepareUsesVisitor : public NullNodeVisitor {
+class PrepareUsesVisitor {
public:
explicit PrepareUsesVisitor(Scheduler* scheduler)
: scheduler_(scheduler), schedule_(scheduler->schedule_) {}
@@ -1080,14 +1149,14 @@
scheduler_->schedule_root_nodes_.push_back(node);
if (!schedule_->IsScheduled(node)) {
// Make sure root nodes are scheduled in their respective blocks.
- Trace("Scheduling fixed position node #%d:%s\n", node->id(),
+ TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
node->op()->mnemonic());
IrOpcode::Value opcode = node->opcode();
BasicBlock* block =
opcode == IrOpcode::kParameter
? schedule_->start()
: schedule_->block(NodeProperties::GetControlInput(node));
- DCHECK(block != NULL);
+ DCHECK_NOT_NULL(block);
schedule_->AddNode(block, node);
}
}
@@ -1110,12 +1179,31 @@
void Scheduler::PrepareUses() {
- Trace("--- PREPARE USES -------------------------------------------\n");
+ TRACE("--- PREPARE USES -------------------------------------------\n");
- // Count the uses of every node, it will be used to ensure that all of a
+ // Count the uses of every node, which is used to ensure that all of a
// node's uses are scheduled before the node itself.
PrepareUsesVisitor prepare_uses(this);
- graph_->VisitNodeInputsFromEnd(&prepare_uses);
+
+ // TODO(turbofan): simplify the careful pre/post ordering here.
+ BoolVector visited(graph_->NodeCount(), false, zone_);
+ ZoneStack<Node::InputEdges::iterator> stack(zone_);
+ Node* node = graph_->end();
+ prepare_uses.Pre(node);
+ visited[node->id()] = true;
+ stack.push(node->input_edges().begin());
+ while (!stack.empty()) {
+ Edge edge = *stack.top();
+ Node* node = edge.to();
+ if (visited[node->id()]) {
+ prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
+ if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
+ } else {
+ prepare_uses.Pre(node);
+ visited[node->id()] = true;
+ if (node->InputCount() > 0) stack.push(node->input_edges().begin());
+ }
+ }
}
@@ -1130,8 +1218,8 @@
// Run the schedule early algorithm on a set of fixed root nodes.
void Run(NodeVector* roots) {
- for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
- queue_.push(*i);
+ for (Node* const root : *roots) {
+ queue_.push(root);
while (!queue_.empty()) {
VisitNode(queue_.front());
queue_.pop();
@@ -1148,7 +1236,7 @@
// Fixed nodes already know their schedule early position.
if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
data->minimum_block_ = schedule_->block(node);
- Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+ TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
node->id(), node->op()->mnemonic(),
data->minimum_block_->id().ToInt(),
data->minimum_block_->dominator_depth());
@@ -1158,10 +1246,9 @@
if (data->minimum_block_ == schedule_->start()) return;
// Propagate schedule early position.
- DCHECK(data->minimum_block_ != NULL);
- Node::Uses uses = node->uses();
- for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
- PropagateMinimumPositionToNode(data->minimum_block_, *i);
+ DCHECK_NOT_NULL(data->minimum_block_);
+ for (auto use : node->uses()) {
+ PropagateMinimumPositionToNode(data->minimum_block_, use);
}
}
@@ -1187,7 +1274,7 @@
if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
data->minimum_block_ = block;
queue_.push(node);
- Trace("Propagating #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+ TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
node->id(), node->op()->mnemonic(),
data->minimum_block_->id().ToInt(),
data->minimum_block_->dominator_depth());
@@ -1196,7 +1283,7 @@
#if DEBUG
bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
- BasicBlock* dominator = scheduler_->GetCommonDominator(b1, b2);
+ BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
return dominator == b1 || dominator == b2;
}
#endif
@@ -1208,13 +1295,13 @@
void Scheduler::ScheduleEarly() {
- Trace("--- SCHEDULE EARLY -----------------------------------------\n");
+ TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
- Trace("roots: ");
+ TRACE("roots: ");
for (Node* node : schedule_root_nodes_) {
- Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+ TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
}
- Trace("\n");
+ TRACE("\n");
}
// Compute the minimum block for each node thereby determining the earliest
@@ -1231,12 +1318,15 @@
class ScheduleLateNodeVisitor {
public:
ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
- : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
+ : scheduler_(scheduler),
+ schedule_(scheduler_->schedule_),
+ marked_(scheduler->zone_),
+ marking_queue_(scheduler->zone_) {}
// Run the schedule late algorithm on a set of fixed root nodes.
void Run(NodeVector* roots) {
- for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
- ProcessQueue(*i);
+ for (Node* const root : *roots) {
+ ProcessQueue(root);
}
}
@@ -1253,10 +1343,11 @@
if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
queue->push(node);
- while (!queue->empty()) {
- VisitNode(queue->front());
+ do {
+ Node* const node = queue->front();
queue->pop();
- }
+ VisitNode(node);
+ } while (!queue->empty());
}
}
@@ -1272,86 +1363,213 @@
// Determine the dominating block for all of the uses of this node. It is
// the latest block that this node can be scheduled in.
- Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
+ TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
BasicBlock* block = GetCommonDominatorOfUses(node);
DCHECK_NOT_NULL(block);
// The schedule early block dominates the schedule late block.
BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
- DCHECK_EQ(min_block, scheduler_->GetCommonDominator(block, min_block));
- Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n",
- node->id(), node->op()->mnemonic(), block->id().ToInt(),
- block->loop_depth(), min_block->id().ToInt());
+ DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
+ TRACE(
+ "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
+ node->id(), node->op()->mnemonic(), block->id().ToInt(),
+ block->loop_depth(), min_block->id().ToInt());
// Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
// into enclosing loop pre-headers until they would preceed their schedule
// early position.
- BasicBlock* hoist_block = GetPreHeader(block);
- while (hoist_block != NULL &&
- hoist_block->dominator_depth() >= min_block->dominator_depth()) {
- Trace(" hoisting #%d:%s to block B%d\n", node->id(),
- node->op()->mnemonic(), hoist_block->id().ToInt());
- DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
- block = hoist_block;
- hoist_block = GetPreHeader(hoist_block);
+ BasicBlock* hoist_block = GetHoistBlock(block);
+ if (hoist_block &&
+ hoist_block->dominator_depth() >= min_block->dominator_depth()) {
+ do {
+ TRACE(" hoisting #%d:%s to block id:%d\n", node->id(),
+ node->op()->mnemonic(), hoist_block->id().ToInt());
+ DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
+ block = hoist_block;
+ hoist_block = GetHoistBlock(hoist_block);
+ } while (hoist_block &&
+ hoist_block->dominator_depth() >= min_block->dominator_depth());
+ } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
+ // Split the {node} if beneficial and return the new {block} for it.
+ block = SplitNode(block, node);
}
// Schedule the node or a floating control structure.
- if (NodeProperties::IsControl(node)) {
+ if (IrOpcode::IsMergeOpcode(node->opcode())) {
ScheduleFloatingControl(block, node);
+ } else if (node->opcode() == IrOpcode::kFinishRegion) {
+ ScheduleRegion(block, node);
} else {
ScheduleNode(block, node);
}
}
- BasicBlock* GetPreHeader(BasicBlock* block) {
- if (block->IsLoopHeader()) {
- return block->dominator();
- } else if (block->loop_header() != NULL) {
- return block->loop_header()->dominator();
- } else {
- return NULL;
+ // Mark {block} and push its non-marked predecessor on the marking queue.
+ void MarkBlock(BasicBlock* block) {
+ DCHECK_LT(block->id().ToSize(), marked_.size());
+ marked_[block->id().ToSize()] = true;
+ for (BasicBlock* pred_block : block->predecessors()) {
+ DCHECK_LT(pred_block->id().ToSize(), marked_.size());
+ if (marked_[pred_block->id().ToSize()]) continue;
+ marking_queue_.push_back(pred_block);
}
}
- BasicBlock* GetCommonDominatorOfUses(Node* node) {
- BasicBlock* block = NULL;
+ BasicBlock* SplitNode(BasicBlock* block, Node* node) {
+ // For now, we limit splitting to pure nodes.
+ if (!node->op()->HasProperty(Operator::kPure)) return block;
+ // TODO(titzer): fix the special case of splitting of projections.
+ if (node->opcode() == IrOpcode::kProjection) return block;
+
+ // The {block} is common dominator of all uses of {node}, so we cannot
+ // split anything unless the {block} has at least two successors.
+ DCHECK_EQ(block, GetCommonDominatorOfUses(node));
+ if (block->SuccessorCount() < 2) return block;
+
+ // Clear marking bits.
+ DCHECK(marking_queue_.empty());
+ std::fill(marked_.begin(), marked_.end(), false);
+ marked_.resize(schedule_->BasicBlockCount() + 1, false);
+
+ // Check if the {node} has uses in {block}.
for (Edge edge : node->use_edges()) {
BasicBlock* use_block = GetBlockForUse(edge);
- block = block == NULL ? use_block : use_block == NULL
- ? block
- : scheduler_->GetCommonDominator(
- block, use_block);
+ if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
+ if (use_block == block) {
+ TRACE(" not splitting #%d:%s, it is used in id:%d\n", node->id(),
+ node->op()->mnemonic(), block->id().ToInt());
+ marking_queue_.clear();
+ return block;
+ }
+ MarkBlock(use_block);
+ }
+
+ // Compute transitive marking closure; a block is marked if all its
+ // successors are marked.
+ do {
+ BasicBlock* top_block = marking_queue_.front();
+ marking_queue_.pop_front();
+ if (marked_[top_block->id().ToSize()]) continue;
+ bool marked = true;
+ for (BasicBlock* successor : top_block->successors()) {
+ if (!marked_[successor->id().ToSize()]) {
+ marked = false;
+ break;
+ }
+ }
+ if (marked) MarkBlock(top_block);
+ } while (!marking_queue_.empty());
+
+ // If the (common dominator) {block} is marked, we know that all paths from
+ // {block} to the end contain at least one use of {node}, and hence there's
+ // no point in splitting the {node} in this case.
+ if (marked_[block->id().ToSize()]) {
+ TRACE(" not splitting #%d:%s, its common dominator id:%d is perfect\n",
+ node->id(), node->op()->mnemonic(), block->id().ToInt());
+ return block;
+ }
+
+ // Split {node} for uses according to the previously computed marking
+ // closure. Every marking partition has a unique dominator, which get's a
+ // copy of the {node} with the exception of the first partition, which get's
+ // the {node} itself.
+ ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
+ for (Edge edge : node->use_edges()) {
+ BasicBlock* use_block = GetBlockForUse(edge);
+ if (use_block == nullptr) continue;
+ while (marked_[use_block->dominator()->id().ToSize()]) {
+ use_block = use_block->dominator();
+ }
+ auto& use_node = dominators[use_block];
+ if (use_node == nullptr) {
+ if (dominators.size() == 1u) {
+ // Place the {node} at {use_block}.
+ block = use_block;
+ use_node = node;
+ TRACE(" pushing #%d:%s down to id:%d\n", node->id(),
+ node->op()->mnemonic(), block->id().ToInt());
+ } else {
+ // Place a copy of {node} at {use_block}.
+ use_node = CloneNode(node);
+ TRACE(" cloning #%d:%s for id:%d\n", use_node->id(),
+ use_node->op()->mnemonic(), use_block->id().ToInt());
+ scheduler_->schedule_queue_.push(use_node);
+ }
+ }
+ edge.UpdateTo(use_node);
}
return block;
}
+ BasicBlock* GetHoistBlock(BasicBlock* block) {
+ if (block->IsLoopHeader()) return block->dominator();
+ // We have to check to make sure that the {block} dominates all
+ // of the outgoing blocks. If it doesn't, then there is a path
+ // out of the loop which does not execute this {block}, so we
+ // can't hoist operations from this {block} out of the loop, as
+ // that would introduce additional computations.
+ if (BasicBlock* header_block = block->loop_header()) {
+ for (BasicBlock* outgoing_block :
+ scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
+ if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
+ return nullptr;
+ }
+ }
+ return header_block->dominator();
+ }
+ return nullptr;
+ }
+
+ BasicBlock* GetCommonDominatorOfUses(Node* node) {
+ BasicBlock* block = nullptr;
+ for (Edge edge : node->use_edges()) {
+ BasicBlock* use_block = GetBlockForUse(edge);
+ block = block == nullptr
+ ? use_block
+ : use_block == nullptr
+ ? block
+ : BasicBlock::GetCommonDominator(block, use_block);
+ }
+ return block;
+ }
+
+ BasicBlock* FindPredecessorBlock(Node* node) {
+ return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
+ }
+
BasicBlock* GetBlockForUse(Edge edge) {
Node* use = edge.from();
- IrOpcode::Value opcode = use->opcode();
- if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+ if (IrOpcode::IsPhiOpcode(use->opcode())) {
// If the use is from a coupled (i.e. floating) phi, compute the common
// dominator of its uses. This will not recurse more than one level.
if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
- Trace(" inspecting uses of coupled #%d:%s\n", use->id(),
+ TRACE(" inspecting uses of coupled #%d:%s\n", use->id(),
use->op()->mnemonic());
DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
return GetCommonDominatorOfUses(use);
}
- // If the use is from a fixed (i.e. non-floating) phi, use the block
- // of the corresponding control input to the merge.
+ // If the use is from a fixed (i.e. non-floating) phi, we use the
+ // predecessor block of the corresponding control input to the merge.
if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
- Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
+ TRACE(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
use->op()->mnemonic());
Node* merge = NodeProperties::GetControlInput(use, 0);
- opcode = merge->opcode();
- DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
- use = NodeProperties::GetControlInput(merge, edge.index());
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ Node* input = NodeProperties::GetControlInput(merge, edge.index());
+ return FindPredecessorBlock(input);
+ }
+ } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
+ // If the use is from a fixed (i.e. non-floating) merge, we use the
+ // predecessor block of the current input to the merge.
+ if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
+ TRACE(" input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
+ use->op()->mnemonic());
+ return FindPredecessorBlock(edge.to());
}
}
BasicBlock* result = schedule_->block(use);
- if (result == NULL) return NULL;
- Trace(" must dominate use #%d:%s in B%d\n", use->id(),
+ if (result == nullptr) return nullptr;
+ TRACE(" must dominate use #%d:%s in id:%d\n", use->id(),
use->op()->mnemonic(), result->id().ToInt());
return result;
}
@@ -1360,25 +1578,70 @@
scheduler_->FuseFloatingControl(block, node);
}
+ void ScheduleRegion(BasicBlock* block, Node* region_end) {
+ // We only allow regions of instructions connected into a linear
+ // effect chain. The only value allowed to be produced by a node
+ // in the chain must be the value consumed by the FinishRegion node.
+
+ // We schedule back to front; we first schedule FinishRegion.
+ CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
+ ScheduleNode(block, region_end);
+
+ // Schedule the chain.
+ Node* node = NodeProperties::GetEffectInput(region_end);
+ while (node->opcode() != IrOpcode::kBeginRegion) {
+ DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
+ DCHECK_EQ(1, node->op()->EffectInputCount());
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+ DCHECK_EQ(0, node->op()->ControlOutputCount());
+ // The value output (if there is any) must be consumed
+ // by the EndRegion node.
+ DCHECK(node->op()->ValueOutputCount() == 0 ||
+ node == region_end->InputAt(0));
+ ScheduleNode(block, node);
+ node = NodeProperties::GetEffectInput(node);
+ }
+ // Schedule the BeginRegion node.
+ DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
+ ScheduleNode(block, node);
+ }
+
void ScheduleNode(BasicBlock* block, Node* node) {
schedule_->PlanNode(block, node);
scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
}
+ Node* CloneNode(Node* node) {
+ int const input_count = node->InputCount();
+ for (int index = 0; index < input_count; ++index) {
+ Node* const input = node->InputAt(index);
+ scheduler_->IncrementUnscheduledUseCount(input, index, node);
+ }
+ Node* const copy = scheduler_->graph_->CloneNode(node);
+ TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
+ copy->id());
+ scheduler_->node_data_.resize(copy->id() + 1,
+ scheduler_->DefaultSchedulerData());
+ scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
+ return copy;
+ }
+
Scheduler* scheduler_;
Schedule* schedule_;
+ BoolVector marked_;
+ ZoneDeque<BasicBlock*> marking_queue_;
};
void Scheduler::ScheduleLate() {
- Trace("--- SCHEDULE LATE ------------------------------------------\n");
+ TRACE("--- SCHEDULE LATE ------------------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
- Trace("roots: ");
+ TRACE("roots: ");
for (Node* node : schedule_root_nodes_) {
- Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+ TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
}
- Trace("\n");
+ TRACE("\n");
}
// Schedule: Places nodes in dominator block of all their uses.
@@ -1392,7 +1655,7 @@
void Scheduler::SealFinalSchedule() {
- Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n");
+ TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
// Serialize the assembly order and reverse-post-order numbering.
special_rpo_->SerializeRPOIntoSchedule();
@@ -1403,8 +1666,8 @@
for (NodeVector& nodes : scheduled_nodes_) {
BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
BasicBlock* block = schedule_->GetBlockById(id);
- for (NodeVectorRIter i = nodes.rbegin(); i != nodes.rend(); ++i) {
- schedule_->AddNode(block, *i);
+ for (Node* node : base::Reversed(nodes)) {
+ schedule_->AddNode(block, node);
}
}
}
@@ -1414,7 +1677,7 @@
void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
- Trace("--- FUSE FLOATING CONTROL ----------------------------------\n");
+ TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
OFStream os(stdout);
os << "Schedule before control flow fusion:\n" << *schedule_;
@@ -1426,9 +1689,9 @@
// Iterate on phase 2: Compute special RPO and dominator tree.
special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
// TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
- for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) {
+ for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
b->set_dominator_depth(-1);
- b->set_dominator(NULL);
+ b->set_dominator(nullptr);
}
PropagateImmediateDominators(block->rpo_next());
@@ -1439,18 +1702,15 @@
NodeVector propagation_roots(control_flow_builder_->control_);
for (Node* node : control_flow_builder_->control_) {
for (Node* use : node->uses()) {
- if (use->opcode() == IrOpcode::kPhi ||
- use->opcode() == IrOpcode::kEffectPhi) {
- propagation_roots.push_back(use);
- }
+ if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use);
}
}
if (FLAG_trace_turbo_scheduler) {
- Trace("propagation roots: ");
+ TRACE("propagation roots: ");
for (Node* node : propagation_roots) {
- Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+ TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
}
- Trace("\n");
+ TRACE("\n");
}
ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
schedule_early_visitor.Run(&propagation_roots);
@@ -1468,12 +1728,12 @@
void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
- Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(),
+ TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
to->id().ToInt());
NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
- for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) {
- schedule_->SetBlockForNode(to, *i);
- scheduled_nodes_[to->id().ToSize()].push_back(*i);
+ for (Node* const node : *nodes) {
+ schedule_->SetBlockForNode(to, node);
+ scheduled_nodes_[to->id().ToSize()].push_back(node);
}
nodes->clear();
}
diff --git a/src/compiler/scheduler.h b/src/compiler/scheduler.h
index 9da0b6d..269c271 100644
--- a/src/compiler/scheduler.h
+++ b/src/compiler/scheduler.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_SCHEDULER_H_
#define V8_COMPILER_SCHEDULER_H_
-#include "src/v8.h"
-
+#include "src/base/flags.h"
+#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/schedule.h"
#include "src/compiler/zone-pool.h"
@@ -16,17 +16,24 @@
namespace internal {
namespace compiler {
+// Forward declarations.
class CFGBuilder;
class ControlEquivalence;
+class Graph;
class SpecialRPONumberer;
+
// Computes a schedule from a graph, placing nodes into basic blocks and
// ordering the basic blocks in the special RPO order.
class Scheduler {
public:
+ // Flags that control the mode of operation.
+ enum Flag { kNoFlags = 0u, kSplitNodes = 1u << 1 };
+ typedef base::Flags<Flag> Flags;
+
// The complete scheduling algorithm. Creates a new schedule and places all
// nodes from the graph into it.
- static Schedule* ComputeSchedule(Zone* zone, Graph* graph);
+ static Schedule* ComputeSchedule(Zone* zone, Graph* graph, Flags flags);
// Compute the RPO of blocks in an existing schedule.
static BasicBlockVector* ComputeSpecialRPO(Zone* zone, Schedule* schedule);
@@ -56,6 +63,7 @@
Zone* zone_;
Graph* graph_;
Schedule* schedule_;
+ Flags flags_;
NodeVectorVector scheduled_nodes_; // Per-block list of nodes in reverse.
NodeVector schedule_root_nodes_; // Fixed root nodes seed the worklist.
ZoneQueue<Node*> schedule_queue_; // Worklist of schedulable nodes.
@@ -64,7 +72,7 @@
SpecialRPONumberer* special_rpo_; // Special RPO numbering of blocks.
ControlEquivalence* equivalence_; // Control dependence equivalence.
- Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
+ Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags);
inline SchedulerData DefaultSchedulerData();
inline SchedulerData* GetData(Node* node);
@@ -76,7 +84,6 @@
void IncrementUnscheduledUseCount(Node* node, int index, Node* from);
void DecrementUnscheduledUseCount(Node* node, int index, Node* from);
- BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
void PropagateImmediateDominators(BasicBlock* block);
// Phase 1: Build control-flow graph.
@@ -107,6 +114,9 @@
void MovePlannedNodes(BasicBlock* from, BasicBlock* to);
};
+
+DEFINE_OPERATORS_FOR_FLAGS(Scheduler::Flags)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/select-lowering.cc b/src/compiler/select-lowering.cc
index edecf58..0e8b36f 100644
--- a/src/compiler/select-lowering.cc
+++ b/src/compiler/select-lowering.cc
@@ -8,6 +8,7 @@
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -51,10 +52,10 @@
}
// Create a Phi hanging off the previously determined merge.
- node->set_op(common()->Phi(p.type(), 2));
node->ReplaceInput(0, vthen);
node->ReplaceInput(1, velse);
node->ReplaceInput(2, merge);
+ NodeProperties::ChangeOp(node, common()->Phi(p.representation(), 2));
return Changed(node);
}
@@ -62,7 +63,7 @@
bool SelectLowering::ReachableFrom(Node* const sink, Node* const source) {
// TODO(turbofan): This is probably horribly expensive, and it should be moved
// into node.h or somewhere else?!
- Zone zone(graph()->zone()->isolate());
+ Zone zone;
std::queue<Node*, NodeDeque> queue((NodeDeque(&zone)));
BoolVector visited(graph()->NodeCount(), false, &zone);
queue.push(source);
diff --git a/src/compiler/select-lowering.h b/src/compiler/select-lowering.h
index 05ea0e0..5894d35 100644
--- a/src/compiler/select-lowering.h
+++ b/src/compiler/select-lowering.h
@@ -20,12 +20,12 @@
// Lowers Select nodes to diamonds.
-class SelectLowering FINAL : public Reducer {
+class SelectLowering final : public Reducer {
public:
SelectLowering(Graph* graph, CommonOperatorBuilder* common);
~SelectLowering();
- Reduction Reduce(Node* node) OVERRIDE;
+ Reduction Reduce(Node* node) override;
private:
typedef std::multimap<Node*, Node*, std::less<Node*>,
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index 1461709..653fea8 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -10,22 +10,25 @@
#include "src/code-factory.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/diamond.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
#include "src/compiler/representation-change.h"
-#include "src/compiler/simplified-lowering.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/source-position.h"
#include "src/objects.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
namespace compiler {
// Macro for outputting trace information from representation inference.
-#define TRACE(x) \
- if (FLAG_trace_representation) PrintF x
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_representation) PrintF(__VA_ARGS__); \
+ } while (false)
// Representation selection and lowering of {Simplified} operators to machine
// operators are interwined. We use a fixpoint calculation to compute both the
@@ -54,64 +57,329 @@
};
+namespace {
+
+// The {UseInfo} class is used to describe a use of an input of a node.
+//
+// This information is used in two different ways, based on the phase:
+//
+// 1. During propagation, the use info is used to inform the input node
+// about what part of the input is used (we call this truncation) and what
+// is the preferred representation.
+//
+// 2. During lowering, the use info is used to properly convert the input
+// to the preferred representation. The preferred representation might be
+// insufficient to do the conversion (e.g. word32->float64 conv), so we also
+// need the signedness information to produce the correct value.
+class UseInfo {
+ public:
+ UseInfo(MachineRepresentation preferred, Truncation truncation)
+ : preferred_(preferred), truncation_(truncation) {}
+ static UseInfo TruncatingWord32() {
+ return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
+ }
+ static UseInfo TruncatingWord64() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+ }
+ static UseInfo Bool() {
+ return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
+ }
+ static UseInfo Float32() {
+ return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
+ }
+ static UseInfo Float64() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
+ }
+ static UseInfo PointerInt() {
+ return kPointerSize == 4 ? TruncatingWord32() : TruncatingWord64();
+ }
+ static UseInfo AnyTagged() {
+ return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
+ }
+
+ // Undetermined representation.
+ static UseInfo Any() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::Any());
+ }
+ static UseInfo None() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::None());
+ }
+
+ // Truncation to a representation that is smaller than the preferred
+ // one.
+ static UseInfo Float64TruncatingToWord32() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Word32());
+ }
+ static UseInfo Word64TruncatingToWord32() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word32());
+ }
+ static UseInfo AnyTruncatingToBool() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
+ }
+
+ MachineRepresentation preferred() const { return preferred_; }
+ Truncation truncation() const { return truncation_; }
+
+ private:
+ MachineRepresentation preferred_;
+ Truncation truncation_;
+};
+
+
+UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kTagged:
+ return UseInfo::AnyTagged();
+ case MachineRepresentation::kFloat64:
+ return UseInfo::Float64();
+ case MachineRepresentation::kFloat32:
+ return UseInfo::Float32();
+ case MachineRepresentation::kWord64:
+ return UseInfo::TruncatingWord64();
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return UseInfo::TruncatingWord32();
+ case MachineRepresentation::kBit:
+ return UseInfo::Bool();
+ case MachineRepresentation::kNone:
+ break;
+ }
+ UNREACHABLE();
+ return UseInfo::None();
+}
+
+
+UseInfo UseInfoForBasePointer(const FieldAccess& access) {
+ return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
+}
+
+
+UseInfo UseInfoForBasePointer(const ElementAccess& access) {
+ return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
+}
+
+
+#ifdef DEBUG
+// Helpers for monotonicity checking.
+bool MachineRepresentationIsSubtype(MachineRepresentation r1,
+ MachineRepresentation r2) {
+ switch (r1) {
+ case MachineRepresentation::kNone:
+ return true;
+ case MachineRepresentation::kBit:
+ return r2 == MachineRepresentation::kBit ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord8:
+ return r2 == MachineRepresentation::kWord8 ||
+ r2 == MachineRepresentation::kWord16 ||
+ r2 == MachineRepresentation::kWord32 ||
+ r2 == MachineRepresentation::kWord64 ||
+ r2 == MachineRepresentation::kFloat32 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord16:
+ return r2 == MachineRepresentation::kWord16 ||
+ r2 == MachineRepresentation::kWord32 ||
+ r2 == MachineRepresentation::kWord64 ||
+ r2 == MachineRepresentation::kFloat32 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord32:
+ return r2 == MachineRepresentation::kWord32 ||
+ r2 == MachineRepresentation::kWord64 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord64:
+ return r2 == MachineRepresentation::kWord64;
+ case MachineRepresentation::kFloat32:
+ return r2 == MachineRepresentation::kFloat32 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kFloat64:
+ return r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kTagged:
+ return r2 == MachineRepresentation::kTagged;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+class InputUseInfos {
+ public:
+ explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {}
+
+ void SetAndCheckInput(Node* node, int index, UseInfo use_info) {
+ if (input_use_infos_.empty()) {
+ input_use_infos_.resize(node->InputCount(), UseInfo::None());
+ }
+ // Check that the new use informatin is a super-type of the old
+ // one.
+ CHECK(IsUseLessGeneral(input_use_infos_[index], use_info));
+ input_use_infos_[index] = use_info;
+ }
+
+ private:
+ ZoneVector<UseInfo> input_use_infos_;
+
+ static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
+ return MachineRepresentationIsSubtype(use1.preferred(), use2.preferred()) &&
+ use1.truncation().IsLessGeneralThan(use2.truncation());
+ }
+};
+
+#endif // DEBUG
+
+} // namespace
+
+
class RepresentationSelector {
public:
// Information for each node tracked during the fixpoint.
- struct NodeInfo {
- MachineTypeUnion use : 15; // Union of all usages for the node.
- bool queued : 1; // Bookkeeping for the traversal.
- bool visited : 1; // Bookkeeping for the traversal.
- MachineTypeUnion output : 15; // Output type of the node.
+ class NodeOutputInfo {
+ public:
+ NodeOutputInfo(MachineRepresentation representation, Type* type)
+ : type_(type), representation_(representation) {}
+ NodeOutputInfo()
+ : type_(Type::None()), representation_(MachineRepresentation::kNone) {}
+
+ MachineRepresentation representation() const { return representation_; }
+ Type* type() const { return type_; }
+
+ static NodeOutputInfo None() {
+ return NodeOutputInfo(MachineRepresentation::kNone, Type::None());
+ }
+
+ static NodeOutputInfo Float32() {
+ return NodeOutputInfo(MachineRepresentation::kFloat32, Type::Number());
+ }
+
+ static NodeOutputInfo Float64() {
+ return NodeOutputInfo(MachineRepresentation::kFloat64, Type::Number());
+ }
+
+ static NodeOutputInfo NumberTruncatedToWord32() {
+ return NodeOutputInfo(MachineRepresentation::kWord32, Type::Number());
+ }
+
+ static NodeOutputInfo Int32() {
+ return NodeOutputInfo(MachineRepresentation::kWord32, Type::Signed32());
+ }
+
+ static NodeOutputInfo Uint32() {
+ return NodeOutputInfo(MachineRepresentation::kWord32, Type::Unsigned32());
+ }
+
+ static NodeOutputInfo Bool() {
+ return NodeOutputInfo(MachineRepresentation::kBit, Type::Boolean());
+ }
+
+ static NodeOutputInfo Int64() {
+ // TODO(jarin) Fix once we have a real int64 type.
+ return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
+ }
+
+ static NodeOutputInfo Uint64() {
+ // TODO(jarin) Fix once we have a real uint64 type.
+ return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
+ }
+
+ static NodeOutputInfo AnyTagged() {
+ return NodeOutputInfo(MachineRepresentation::kTagged, Type::Any());
+ }
+
+ static NodeOutputInfo NumberTagged() {
+ return NodeOutputInfo(MachineRepresentation::kTagged, Type::Number());
+ }
+
+ static NodeOutputInfo Pointer() {
+ return NodeOutputInfo(MachineType::PointerRepresentation(), Type::Any());
+ }
+
+ private:
+ Type* type_;
+ MachineRepresentation representation_;
+ };
+
+ class NodeInfo {
+ public:
+ // Adds new use to the node. Returns true if something has changed
+ // and the node has to be requeued.
+ bool AddUse(UseInfo info) {
+ Truncation old_truncation = truncation_;
+ truncation_ = Truncation::Generalize(truncation_, info.truncation());
+ return truncation_ != old_truncation;
+ }
+
+ void set_queued(bool value) { queued_ = value; }
+ bool queued() const { return queued_; }
+ void set_visited() { visited_ = true; }
+ bool visited() const { return visited_; }
+ Truncation truncation() const { return truncation_; }
+ void set_output_type(NodeOutputInfo output) { output_ = output; }
+
+ Type* output_type() const { return output_.type(); }
+ MachineRepresentation representation() const {
+ return output_.representation();
+ }
+
+ private:
+ bool queued_ = false; // Bookkeeping for the traversal.
+ bool visited_ = false; // Bookkeeping for the traversal.
+ NodeOutputInfo output_; // Output type and representation.
+ Truncation truncation_ = Truncation::None(); // Information about uses.
};
RepresentationSelector(JSGraph* jsgraph, Zone* zone,
- RepresentationChanger* changer)
+ RepresentationChanger* changer,
+ SourcePositionTable* source_positions)
: jsgraph_(jsgraph),
count_(jsgraph->graph()->NodeCount()),
- info_(zone->NewArray<NodeInfo>(count_)),
+ info_(count_, zone),
+#ifdef DEBUG
+ node_input_use_infos_(count_, InputUseInfos(zone), zone),
+#endif
nodes_(zone),
replacements_(zone),
phase_(PROPAGATE),
changer_(changer),
- queue_(zone) {
- memset(info_, 0, sizeof(NodeInfo) * count_);
-
- Factory* f = zone->isolate()->factory();
- safe_bit_range_ =
- Type::Union(Type::Boolean(),
- Type::Range(f->NewNumber(0), f->NewNumber(1), zone), zone);
- safe_int_additive_range_ =
- Type::Range(f->NewNumber(-std::pow(2.0, 52.0)),
- f->NewNumber(std::pow(2.0, 52.0)), zone);
+ queue_(zone),
+ source_positions_(source_positions),
+ type_cache_(TypeCache::Get()) {
}
void Run(SimplifiedLowering* lowering) {
// Run propagation phase to a fixpoint.
- TRACE(("--{Propagation phase}--\n"));
+ TRACE("--{Propagation phase}--\n");
phase_ = PROPAGATE;
- Enqueue(jsgraph_->graph()->end());
+ EnqueueInitial(jsgraph_->graph()->end());
// Process nodes from the queue until it is empty.
while (!queue_.empty()) {
Node* node = queue_.front();
NodeInfo* info = GetInfo(node);
queue_.pop();
- info->queued = false;
- TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
- VisitNode(node, info->use, NULL);
- TRACE((" ==> output "));
- PrintInfo(info->output);
- TRACE(("\n"));
+ info->set_queued(false);
+ TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
+ VisitNode(node, info->truncation(), nullptr);
+ TRACE(" ==> output ");
+ PrintOutputInfo(info);
+ TRACE("\n");
}
// Run lowering and change insertion phase.
- TRACE(("--{Simplified lowering phase}--\n"));
+ TRACE("--{Simplified lowering phase}--\n");
phase_ = LOWER;
// Process nodes from the collected {nodes_} vector.
for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
Node* node = *i;
- TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+ NodeInfo* info = GetInfo(node);
+ TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
// Reuse {VisitNode()} so the representation rules are in one place.
- VisitNode(node, GetUseInfo(node), lowering);
+ SourcePositionTable::Scope scope(
+ source_positions_, source_positions_->GetSourcePosition(node));
+ VisitNode(node, info->truncation(), lowering);
}
// Perform the final replacements.
@@ -120,108 +388,181 @@
Node* node = *i;
Node* replacement = *(++i);
node->ReplaceUses(replacement);
+ // We also need to replace the node in the rest of the vector.
+ for (NodeVector::iterator j = i + 1; j != replacements_.end(); ++j) {
+ ++j;
+ if (*j == node) *j = replacement;
+ }
}
}
- // Enqueue {node} if the {use} contains new information for that node.
- // Add {node} to {nodes_} if this is the first time it's been visited.
- void Enqueue(Node* node, MachineTypeUnion use = 0) {
+ void EnqueueInitial(Node* node) {
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ info->set_queued(true);
+ nodes_.push_back(node);
+ queue_.push(node);
+ }
+
+ // Enqueue {use_node}'s {index} input if the {use} contains new information
+ // for that input node. Add the input to {nodes_} if this is the first time
+ // it's been visited.
+ void EnqueueInput(Node* use_node, int index,
+ UseInfo use_info = UseInfo::None()) {
+ Node* node = use_node->InputAt(index);
if (phase_ != PROPAGATE) return;
NodeInfo* info = GetInfo(node);
- if (!info->visited) {
+#ifdef DEBUG
+ // Check monotonicity of input requirements.
+ node_input_use_infos_[use_node->id()].SetAndCheckInput(use_node, index,
+ use_info);
+#endif // DEBUG
+ if (!info->visited()) {
// First visit of this node.
- info->visited = true;
- info->queued = true;
+ info->set_visited();
+ info->set_queued(true);
nodes_.push_back(node);
queue_.push(node);
- TRACE((" initial: "));
- info->use |= use;
- PrintUseInfo(node);
+ TRACE(" initial: ");
+ info->AddUse(use_info);
+ PrintTruncation(info->truncation());
return;
}
- TRACE((" queue?: "));
- PrintUseInfo(node);
- if ((info->use & use) != use) {
+ TRACE(" queue?: ");
+ PrintTruncation(info->truncation());
+ if (info->AddUse(use_info)) {
// New usage information for the node is available.
- if (!info->queued) {
+ if (!info->queued()) {
queue_.push(node);
- info->queued = true;
- TRACE((" added: "));
+ info->set_queued(true);
+ TRACE(" added: ");
} else {
- TRACE((" inqueue: "));
+ TRACE(" inqueue: ");
}
- info->use |= use;
- PrintUseInfo(node);
+ PrintTruncation(info->truncation());
}
}
bool lower() { return phase_ == LOWER; }
- void Enqueue(Node* node, MachineType use) {
- Enqueue(node, static_cast<MachineTypeUnion>(use));
- }
-
- void SetOutput(Node* node, MachineTypeUnion output) {
- // Every node should have at most one output representation. Note that
- // phis can have 0, if they have not been used in a representation-inducing
- // instruction.
- DCHECK((output & kRepMask) == 0 ||
- base::bits::IsPowerOfTwo32(output & kRepMask));
- GetInfo(node)->output = output;
- }
-
- bool BothInputsAre(Node* node, Type* type) {
- DCHECK_EQ(2, node->InputCount());
- return NodeProperties::GetBounds(node->InputAt(0)).upper->Is(type) &&
- NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type);
- }
-
- void ProcessTruncateWord32Input(Node* node, int index, MachineTypeUnion use) {
- Node* input = node->InputAt(index);
- if (phase_ == PROPAGATE) {
- // In the propagate phase, propagate the usage information backward.
- Enqueue(input, use);
- } else {
- // In the change phase, insert a change before the use if necessary.
- MachineTypeUnion output = GetInfo(input)->output;
- if ((output & (kRepBit | kRepWord8 | kRepWord16 | kRepWord32)) == 0) {
- // Output representation doesn't match usage.
- TRACE((" truncate-to-int32: #%d:%s(@%d #%d:%s) ", node->id(),
- node->op()->mnemonic(), index, input->id(),
- input->op()->mnemonic()));
- TRACE((" from "));
- PrintInfo(output);
- TRACE((" to "));
- PrintInfo(use);
- TRACE(("\n"));
- Node* n = changer_->GetTruncatedWord32For(input, output);
- node->ReplaceInput(index, n);
+ void EnqueueUses(Node* node) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsValueEdge(edge)) {
+ Node* const user = edge.from();
+ if (user->id() < count_) {
+ // New type information for the node is available.
+ NodeInfo* info = GetInfo(user);
+ // Enqueue the node only if we are sure it is reachable from
+ // the end and it has not been queued yet.
+ if (info->visited() && !info->queued()) {
+ queue_.push(user);
+ info->set_queued(true);
+ }
+ }
}
}
}
- void ProcessInput(Node* node, int index, MachineTypeUnion use) {
+ void SetOutputFromMachineType(Node* node, MachineType machine_type) {
+ Type* type = Type::None();
+ switch (machine_type.semantic()) {
+ case MachineSemantic::kNone:
+ type = Type::None();
+ break;
+ case MachineSemantic::kBool:
+ type = Type::Boolean();
+ break;
+ case MachineSemantic::kInt32:
+ type = Type::Signed32();
+ break;
+ case MachineSemantic::kUint32:
+ type = Type::Unsigned32();
+ break;
+ case MachineSemantic::kInt64:
+ // TODO(jarin) Fix once we have proper int64.
+ type = Type::Internal();
+ break;
+ case MachineSemantic::kUint64:
+ // TODO(jarin) Fix once we have proper uint64.
+ type = Type::Internal();
+ break;
+ case MachineSemantic::kNumber:
+ type = Type::Number();
+ break;
+ case MachineSemantic::kAny:
+ type = Type::Any();
+ break;
+ }
+ return SetOutput(node, NodeOutputInfo(machine_type.representation(), type));
+ }
+
+ void SetOutput(Node* node, NodeOutputInfo output_info) {
+ // Every node should have at most one output representation. Note that
+ // phis can have 0, if they have not been used in a representation-inducing
+ // instruction.
+ Type* output_type = output_info.type();
+ if (NodeProperties::IsTyped(node)) {
+ output_type = Type::Intersect(NodeProperties::GetType(node),
+ output_info.type(), jsgraph_->zone());
+ }
+ NodeInfo* info = GetInfo(node);
+ DCHECK(info->output_type()->Is(output_type));
+ DCHECK(MachineRepresentationIsSubtype(info->representation(),
+ output_info.representation()));
+ if (!output_type->Is(info->output_type()) ||
+ output_info.representation() != info->representation()) {
+ EnqueueUses(node);
+ }
+ info->set_output_type(
+ NodeOutputInfo(output_info.representation(), output_type));
+ }
+
+ bool BothInputsAreSigned32(Node* node) {
+ DCHECK_EQ(2, node->InputCount());
+ return GetInfo(node->InputAt(0))->output_type()->Is(Type::Signed32()) &&
+ GetInfo(node->InputAt(1))->output_type()->Is(Type::Signed32());
+ }
+
+ bool BothInputsAreUnsigned32(Node* node) {
+ DCHECK_EQ(2, node->InputCount());
+ return GetInfo(node->InputAt(0))->output_type()->Is(Type::Unsigned32()) &&
+ GetInfo(node->InputAt(1))->output_type()->Is(Type::Unsigned32());
+ }
+
+ bool BothInputsAre(Node* node, Type* type) {
+ DCHECK_EQ(2, node->InputCount());
+ return GetInfo(node->InputAt(0))->output_type()->Is(type) &&
+ GetInfo(node->InputAt(1))->output_type()->Is(type);
+ }
+
+ void ConvertInput(Node* node, int index, UseInfo use) {
Node* input = node->InputAt(index);
+ // In the change phase, insert a change before the use if necessary.
+ if (use.preferred() == MachineRepresentation::kNone)
+ return; // No input requirement on the use.
+ NodeInfo* input_info = GetInfo(input);
+ MachineRepresentation input_rep = input_info->representation();
+ if (input_rep != use.preferred()) {
+ // Output representation doesn't match usage.
+ TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(),
+ index, input->id(), input->op()->mnemonic());
+ TRACE(" from ");
+ PrintOutputInfo(input_info);
+ TRACE(" to ");
+ PrintUseInfo(use);
+ TRACE("\n");
+ Node* n = changer_->GetRepresentationFor(
+ input, input_info->representation(), input_info->output_type(),
+ use.preferred(), use.truncation());
+ node->ReplaceInput(index, n);
+ }
+ }
+
+ void ProcessInput(Node* node, int index, UseInfo use) {
if (phase_ == PROPAGATE) {
- // In the propagate phase, propagate the usage information backward.
- Enqueue(input, use);
+ EnqueueInput(node, index, use);
} else {
- // In the change phase, insert a change before the use if necessary.
- if ((use & kRepMask) == 0) return; // No input requirement on the use.
- MachineTypeUnion output = GetInfo(input)->output;
- if ((output & kRepMask & use) == 0) {
- // Output representation doesn't match usage.
- TRACE((" change: #%d:%s(@%d #%d:%s) ", node->id(),
- node->op()->mnemonic(), index, input->id(),
- input->op()->mnemonic()));
- TRACE((" from "));
- PrintInfo(output);
- TRACE((" to "));
- PrintInfo(use);
- TRACE(("\n"));
- Node* n = changer_->GetRepresentationFor(input, output, use);
- node->ReplaceInput(index, n);
- }
+ ConvertInput(node, index, use);
}
}
@@ -230,175 +571,246 @@
DCHECK_GE(index, NodeProperties::PastContextIndex(node));
for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
i < NodeProperties::PastEffectIndex(node); ++i) {
- Enqueue(node->InputAt(i)); // Effect inputs: just visit
+ EnqueueInput(node, i); // Effect inputs: just visit
}
for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
i < NodeProperties::PastControlIndex(node); ++i) {
- Enqueue(node->InputAt(i)); // Control inputs: just visit
+ EnqueueInput(node, i); // Control inputs: just visit
}
}
// The default, most general visitation case. For {node}, process all value,
- // context, effect, and control inputs, assuming that value inputs should have
- // {kRepTagged} representation and can observe all output values {kTypeAny}.
+ // context, frame state, effect, and control inputs, assuming that value
+ // inputs should have {kRepTagged} representation and can observe all output
+ // values {kTypeAny}.
void VisitInputs(Node* node) {
- auto i = node->input_edges().begin();
- for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
- ProcessInput(node, (*i).index(), kMachAnyTagged); // Value inputs
+ int tagged_count = node->op()->ValueInputCount() +
+ OperatorProperties::GetContextInputCount(node->op());
+ // Visit value and context inputs as tagged.
+ for (int i = 0; i < tagged_count; i++) {
+ ProcessInput(node, i, UseInfo::AnyTagged());
}
- for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
- ++i, j--) {
- ProcessInput(node, (*i).index(), kMachAnyTagged); // Context inputs
+ // Only enqueue other inputs (framestates, effects, control).
+ for (int i = tagged_count; i < node->InputCount(); i++) {
+ EnqueueInput(node, i);
}
- for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
- Enqueue((*i).to()); // Effect inputs: just visit
- }
- for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
- Enqueue((*i).to()); // Control inputs: just visit
- }
- SetOutput(node, kMachAnyTagged);
}
- // Helper for binops of the I x I -> O variety.
- void VisitBinop(Node* node, MachineTypeUnion input_use,
- MachineTypeUnion output) {
- DCHECK_EQ(2, node->InputCount());
- ProcessInput(node, 0, input_use);
- ProcessInput(node, 1, input_use);
+ // Helper for binops of the R x L -> O variety.
+ void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
+ NodeOutputInfo output) {
+ DCHECK_EQ(2, node->op()->ValueInputCount());
+ ProcessInput(node, 0, left_use);
+ ProcessInput(node, 1, right_use);
+ for (int i = 2; i < node->InputCount(); i++) {
+ EnqueueInput(node, i);
+ }
SetOutput(node, output);
}
+ // Helper for binops of the I x I -> O variety.
+ void VisitBinop(Node* node, UseInfo input_use, NodeOutputInfo output) {
+ VisitBinop(node, input_use, input_use, output);
+ }
+
// Helper for unops of the I -> O variety.
- void VisitUnop(Node* node, MachineTypeUnion input_use,
- MachineTypeUnion output) {
+ void VisitUnop(Node* node, UseInfo input_use, NodeOutputInfo output) {
DCHECK_EQ(1, node->InputCount());
ProcessInput(node, 0, input_use);
SetOutput(node, output);
}
// Helper for leaf nodes.
- void VisitLeaf(Node* node, MachineTypeUnion output) {
+ void VisitLeaf(Node* node, NodeOutputInfo output) {
DCHECK_EQ(0, node->InputCount());
SetOutput(node, output);
}
// Helpers for specific types of binops.
void VisitFloat64Binop(Node* node) {
- VisitBinop(node, kMachFloat64, kMachFloat64);
+ VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
}
- void VisitInt32Binop(Node* node) { VisitBinop(node, kMachInt32, kMachInt32); }
+ void VisitInt32Binop(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ }
+ void VisitWord32TruncatingBinop(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::NumberTruncatedToWord32());
+ }
void VisitUint32Binop(Node* node) {
- VisitBinop(node, kMachUint32, kMachUint32);
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
}
- void VisitInt64Binop(Node* node) { VisitBinop(node, kMachInt64, kMachInt64); }
+ void VisitInt64Binop(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Int64());
+ }
void VisitUint64Binop(Node* node) {
- VisitBinop(node, kMachUint64, kMachUint64);
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Uint64());
}
- void VisitFloat64Cmp(Node* node) { VisitBinop(node, kMachFloat64, kRepBit); }
- void VisitInt32Cmp(Node* node) { VisitBinop(node, kMachInt32, kRepBit); }
- void VisitUint32Cmp(Node* node) { VisitBinop(node, kMachUint32, kRepBit); }
- void VisitInt64Cmp(Node* node) { VisitBinop(node, kMachInt64, kRepBit); }
- void VisitUint64Cmp(Node* node) { VisitBinop(node, kMachUint64, kRepBit); }
+ void VisitFloat64Cmp(Node* node) {
+ VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+ }
+ void VisitInt32Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+ }
+ void VisitUint32Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+ }
+ void VisitInt64Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
+ }
+ void VisitUint64Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
+ }
// Infer representation for phi-like nodes.
- MachineType GetRepresentationForPhi(Node* node, MachineTypeUnion use) {
- // Phis adapt to the output representation their uses demand.
- Type* upper = NodeProperties::GetBounds(node).upper;
- if ((use & kRepMask) == kRepTagged) {
- // only tagged uses.
- return kRepTagged;
- } else if (upper->Is(Type::Integral32())) {
- // Integer within [-2^31, 2^32[ range.
- if ((use & kRepMask) == kRepFloat64) {
- // only float64 uses.
- return kRepFloat64;
- } else if (upper->Is(Type::Signed32()) || upper->Is(Type::Unsigned32())) {
- // multiple uses, but we are within 32 bits range => pick kRepWord32.
- return kRepWord32;
- } else if ((use & kRepMask) == kRepWord32 ||
- (use & kTypeMask) == kTypeInt32 ||
- (use & kTypeMask) == kTypeUint32) {
- // We only use 32 bits or we use the result consistently.
- return kRepWord32;
- } else {
- return kRepFloat64;
- }
- } else if (IsSafeBitOperand(node)) {
- // multiple uses => pick kRepBit.
- return kRepBit;
- } else if (upper->Is(Type::Number())) {
- // multiple uses => pick kRepFloat64.
- return kRepFloat64;
+ NodeOutputInfo GetOutputInfoForPhi(Node* node, Truncation use) {
+ // Compute the type.
+ Type* type = GetInfo(node->InputAt(0))->output_type();
+ for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
+ type = Type::Union(type, GetInfo(node->InputAt(i))->output_type(),
+ jsgraph_->zone());
}
- return kRepTagged;
+
+ // Compute the representation.
+ MachineRepresentation rep = MachineRepresentation::kTagged;
+ if (type->Is(Type::None())) {
+ rep = MachineRepresentation::kNone;
+ } else if (type->Is(Type::Signed32()) || type->Is(Type::Unsigned32())) {
+ rep = MachineRepresentation::kWord32;
+ } else if (use.TruncatesToWord32()) {
+ rep = MachineRepresentation::kWord32;
+ } else if (type->Is(Type::Boolean())) {
+ rep = MachineRepresentation::kBit;
+ } else if (type->Is(Type::Number())) {
+ rep = MachineRepresentation::kFloat64;
+ } else if (type->Is(Type::Internal())) {
+ // We mark (u)int64 as Type::Internal.
+ // TODO(jarin) This is a workaround for our lack of (u)int64
+ // types. This can be removed once we can represent (u)int64
+ // unambiguously. (At the moment internal objects, such as the hole,
+ // are also Type::Internal()).
+ bool is_word64 = GetInfo(node->InputAt(0))->representation() ==
+ MachineRepresentation::kWord64;
+#ifdef DEBUG
+ // Check that all the inputs agree on being Word64.
+ for (int i = 1; i < node->op()->ValueInputCount(); i++) {
+ DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
+ MachineRepresentation::kWord64);
+ }
+#endif
+ rep = is_word64 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kTagged;
+ }
+ return NodeOutputInfo(rep, type);
}
// Helper for handling selects.
- void VisitSelect(Node* node, MachineTypeUnion use,
+ void VisitSelect(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- ProcessInput(node, 0, kRepBit);
- MachineType output = GetRepresentationForPhi(node, use);
+ ProcessInput(node, 0, UseInfo::Bool());
- Type* upper = NodeProperties::GetBounds(node).upper;
- MachineType output_type =
- static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
- SetOutput(node, output_type);
+ NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+ SetOutput(node, output);
if (lower()) {
// Update the select operator.
SelectParameters p = SelectParametersOf(node->op());
- MachineType type = static_cast<MachineType>(output_type);
- if (type != p.type()) {
- node->set_op(lowering->common()->Select(type, p.hint()));
+ if (output.representation() != p.representation()) {
+ NodeProperties::ChangeOp(node, lowering->common()->Select(
+ output.representation(), p.hint()));
}
-
- // Convert inputs to the output representation of this select.
- ProcessInput(node, 1, output_type);
- ProcessInput(node, 2, output_type);
- } else {
- // Propagate {use} of the select to value inputs.
- MachineType use_type =
- static_cast<MachineType>((use & kTypeMask) | output);
- ProcessInput(node, 1, use_type);
- ProcessInput(node, 2, use_type);
}
+ // Convert inputs to the output representation of this phi, pass the
+ // truncation truncation along.
+ UseInfo input_use(output.representation(), truncation);
+ ProcessInput(node, 1, input_use);
+ ProcessInput(node, 2, input_use);
}
// Helper for handling phis.
- void VisitPhi(Node* node, MachineTypeUnion use,
+ void VisitPhi(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- MachineType output = GetRepresentationForPhi(node, use);
-
- Type* upper = NodeProperties::GetBounds(node).upper;
- MachineType output_type =
- static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
- SetOutput(node, output_type);
+ NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+ SetOutput(node, output);
int values = node->op()->ValueInputCount();
-
if (lower()) {
// Update the phi operator.
- MachineType type = static_cast<MachineType>(output_type);
- if (type != OpParameter<MachineType>(node)) {
- node->set_op(lowering->common()->Phi(type, values));
- }
-
- // Convert inputs to the output representation of this phi.
- for (Edge const edge : node->input_edges()) {
- // TODO(titzer): it'd be nice to have distinguished edge kinds here.
- ProcessInput(node, edge.index(), values > 0 ? output_type : 0);
- values--;
- }
- } else {
- // Propagate {use} of the phi to value inputs, and 0 to control.
- MachineType use_type =
- static_cast<MachineType>((use & kTypeMask) | output);
- for (Edge const edge : node->input_edges()) {
- // TODO(titzer): it'd be nice to have distinguished edge kinds here.
- ProcessInput(node, edge.index(), values > 0 ? use_type : 0);
- values--;
+ if (output.representation() != PhiRepresentationOf(node->op())) {
+ NodeProperties::ChangeOp(
+ node, lowering->common()->Phi(output.representation(), values));
}
}
+
+ // Convert inputs to the output representation of this phi, pass the
+ // truncation truncation along.
+ UseInfo input_use(output.representation(), truncation);
+ for (int i = 0; i < node->InputCount(); i++) {
+ ProcessInput(node, i, i < values ? input_use : UseInfo::None());
+ }
+ }
+
+ void VisitCall(Node* node, SimplifiedLowering* lowering) {
+ const CallDescriptor* desc = OpParameter<const CallDescriptor*>(node->op());
+ const MachineSignature* sig = desc->GetMachineSignature();
+ int params = static_cast<int>(sig->parameter_count());
+ // Propagate representation information from call descriptor.
+ for (int i = 0; i < node->InputCount(); i++) {
+ if (i == 0) {
+ // The target of the call.
+ ProcessInput(node, i, UseInfo::None());
+ } else if ((i - 1) < params) {
+ ProcessInput(node, i, TruncatingUseInfoFromRepresentation(
+ sig->GetParam(i - 1).representation()));
+ } else {
+ ProcessInput(node, i, UseInfo::None());
+ }
+ }
+
+ if (sig->return_count() > 0) {
+ SetOutputFromMachineType(node, desc->GetMachineSignature()->GetReturn());
+ } else {
+ SetOutput(node, NodeOutputInfo::AnyTagged());
+ }
+ }
+
+ MachineSemantic DeoptValueSemanticOf(Type* type) {
+ CHECK(!type->Is(Type::None()));
+ // We only need signedness to do deopt correctly.
+ if (type->Is(Type::Signed32())) {
+ return MachineSemantic::kInt32;
+ } else if (type->Is(Type::Unsigned32())) {
+ return MachineSemantic::kUint32;
+ } else {
+ return MachineSemantic::kAny;
+ }
+ }
+
+ void VisitStateValues(Node* node) {
+ if (phase_ == PROPAGATE) {
+ for (int i = 0; i < node->InputCount(); i++) {
+ EnqueueInput(node, i, UseInfo::Any());
+ }
+ } else {
+ Zone* zone = jsgraph_->zone();
+ ZoneVector<MachineType>* types =
+ new (zone->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(node->InputCount(), zone);
+ for (int i = 0; i < node->InputCount(); i++) {
+ NodeInfo* input_info = GetInfo(node->InputAt(i));
+ MachineType machine_type(
+ input_info->representation(),
+ DeoptValueSemanticOf(input_info->output_type()));
+ DCHECK(machine_type.representation() !=
+ MachineRepresentation::kWord32 ||
+ machine_type.semantic() == MachineSemantic::kInt32 ||
+ machine_type.semantic() == MachineSemantic::kUint32);
+ (*types)[i] = machine_type;
+ }
+ NodeProperties::ChangeOp(node,
+ jsgraph_->common()->TypedStateValues(types));
+ }
+ SetOutput(node, NodeOutputInfo::AnyTagged());
}
const Operator* Int32Op(Node* node) {
@@ -413,60 +825,9 @@
return changer_->Float64OperatorFor(node->opcode());
}
- bool CanLowerToInt32Binop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, Type::Signed32()) && !CanObserveNonInt32(use);
- }
-
- bool IsSafeBitOperand(Node* node) {
- Type* type = NodeProperties::GetBounds(node).upper;
- return type->Is(safe_bit_range_);
- }
-
- bool IsSafeIntAdditiveOperand(Node* node) {
- Type* type = NodeProperties::GetBounds(node).upper;
- // TODO(jarin): Unfortunately, bitset types are not subtypes of larger
- // range types, so we have to explicitly check for Integral32 here
- // (in addition to the safe integer range). Once we fix subtyping for
- // ranges, we should simplify this.
- return type->Is(safe_int_additive_range_) || type->Is(Type::Integral32());
- }
-
- bool CanLowerToInt32AdditiveBinop(Node* node, MachineTypeUnion use) {
- return IsSafeIntAdditiveOperand(node->InputAt(0)) &&
- IsSafeIntAdditiveOperand(node->InputAt(1)) &&
- !CanObserveNonInt32(use);
- }
-
- bool CanLowerToUint32Binop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, Type::Unsigned32()) && !CanObserveNonUint32(use);
- }
-
- bool CanLowerToUint32AdditiveBinop(Node* node, MachineTypeUnion use) {
- return IsSafeIntAdditiveOperand(node->InputAt(0)) &&
- IsSafeIntAdditiveOperand(node->InputAt(1)) &&
- !CanObserveNonUint32(use);
- }
-
- bool CanObserveNonInt32(MachineTypeUnion use) {
- return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
- }
-
- bool CanObserveMinusZero(MachineTypeUnion use) {
- // TODO(turbofan): technically Uint32 cannot observe minus zero either.
- return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
- }
-
- bool CanObserveNaN(MachineTypeUnion use) {
- return (use & (kTypeNumber | kTypeAny)) != 0;
- }
-
- bool CanObserveNonUint32(MachineTypeUnion use) {
- return (use & (kTypeInt32 | kTypeNumber | kTypeAny)) != 0;
- }
-
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
- void VisitNode(Node* node, MachineTypeUnion use,
+ void VisitNode(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
switch (node->opcode()) {
//------------------------------------------------------------------
@@ -474,43 +835,43 @@
//------------------------------------------------------------------
case IrOpcode::kStart:
case IrOpcode::kDead:
- return VisitLeaf(node, 0);
+ return VisitLeaf(node, NodeOutputInfo::None());
case IrOpcode::kParameter: {
// TODO(titzer): use representation from linkage.
- Type* upper = NodeProperties::GetBounds(node).upper;
- ProcessInput(node, 0, 0);
- SetOutput(node, kRepTagged | changer_->TypeFromUpperBound(upper));
+ Type* type = NodeProperties::GetType(node);
+ ProcessInput(node, 0, UseInfo::None());
+ SetOutput(node, NodeOutputInfo(MachineRepresentation::kTagged, type));
return;
}
case IrOpcode::kInt32Constant:
- return VisitLeaf(node, kRepWord32);
+ return VisitLeaf(node, NodeOutputInfo::Int32());
case IrOpcode::kInt64Constant:
- return VisitLeaf(node, kRepWord64);
+ return VisitLeaf(node, NodeOutputInfo::Int64());
+ case IrOpcode::kFloat32Constant:
+ return VisitLeaf(node, NodeOutputInfo::Float32());
case IrOpcode::kFloat64Constant:
- return VisitLeaf(node, kRepFloat64);
+ return VisitLeaf(node, NodeOutputInfo::Float64());
case IrOpcode::kExternalConstant:
- return VisitLeaf(node, kMachPtr);
+ return VisitLeaf(node, NodeOutputInfo::Pointer());
case IrOpcode::kNumberConstant:
- return VisitLeaf(node, kRepTagged);
+ return VisitLeaf(node, NodeOutputInfo::NumberTagged());
case IrOpcode::kHeapConstant:
- return VisitLeaf(node, kRepTagged);
-
- case IrOpcode::kEnd:
- case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- case IrOpcode::kReturn:
- case IrOpcode::kMerge:
- case IrOpcode::kThrow:
- return VisitInputs(node); // default visit for all node inputs.
+ return VisitLeaf(node, NodeOutputInfo::AnyTagged());
case IrOpcode::kBranch:
- ProcessInput(node, 0, kRepBit);
- Enqueue(NodeProperties::GetControlInput(node, 0));
+ ProcessInput(node, 0, UseInfo::Bool());
+ EnqueueInput(node, NodeProperties::FirstControlIndex(node));
+ break;
+ case IrOpcode::kSwitch:
+ ProcessInput(node, 0, UseInfo::TruncatingWord32());
+ EnqueueInput(node, NodeProperties::FirstControlIndex(node));
break;
case IrOpcode::kSelect:
- return VisitSelect(node, use, lowering);
+ return VisitSelect(node, truncation, lowering);
case IrOpcode::kPhi:
- return VisitPhi(node, use, lowering);
+ return VisitPhi(node, truncation, lowering);
+ case IrOpcode::kCall:
+ return VisitCall(node, lowering);
//------------------------------------------------------------------
// JavaScript operators.
@@ -524,67 +885,45 @@
JS_OP_LIST(DEFINE_JS_CASE)
#undef DEFINE_JS_CASE
VisitInputs(node);
- return SetOutput(node, kRepTagged);
+ return SetOutput(node, NodeOutputInfo::AnyTagged());
//------------------------------------------------------------------
// Simplified operators.
//------------------------------------------------------------------
- case IrOpcode::kAnyToBoolean: {
- if (IsSafeBitOperand(node->InputAt(0))) {
- VisitUnop(node, kRepBit, kRepBit);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitUnop(node, kMachAnyTagged, kTypeBool | kRepTagged);
- if (lower()) {
- // AnyToBoolean(x) => Call(ToBooleanStub, x, no-context)
- Operator::Properties properties = node->op()->properties();
- Callable callable = CodeFactory::ToBoolean(
- jsgraph_->isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
- CallDescriptor::Flags flags = CallDescriptor::kPatchableCallSite;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- callable.descriptor(), 0, flags, properties, jsgraph_->zone());
- node->set_op(jsgraph_->common()->Call(desc));
- node->InsertInput(jsgraph_->zone(), 0,
- jsgraph_->HeapConstant(callable.code()));
- node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
- }
- }
- break;
- }
case IrOpcode::kBooleanNot: {
if (lower()) {
- MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
- if (input & kRepBit) {
+ NodeInfo* input_info = GetInfo(node->InputAt(0));
+ if (input_info->representation() == MachineRepresentation::kBit) {
// BooleanNot(x: kRepBit) => Word32Equal(x, #0)
- node->set_op(lowering->machine()->Word32Equal());
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
+ NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
} else {
// BooleanNot(x: kRepTagged) => WordEqual(x, #false)
- node->set_op(lowering->machine()->WordEqual());
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
+ NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
}
} else {
// No input representation requirement; adapt during lowering.
- ProcessInput(node, 0, kTypeBool);
- SetOutput(node, kRepBit);
+ ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
+ SetOutput(node, NodeOutputInfo::Bool());
}
break;
}
case IrOpcode::kBooleanToNumber: {
if (lower()) {
- MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
- if (input & kRepBit) {
+ NodeInfo* input_info = GetInfo(node->InputAt(0));
+ if (input_info->representation() == MachineRepresentation::kBit) {
// BooleanToNumber(x: kRepBit) => x
DeferReplacement(node, node->InputAt(0));
} else {
// BooleanToNumber(x: kRepTagged) => WordEqual(x, #true)
- node->set_op(lowering->machine()->WordEqual());
node->AppendInput(jsgraph_->zone(), jsgraph_->TrueConstant());
+ NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
}
} else {
// No input representation requirement; adapt during lowering.
- ProcessInput(node, 0, kTypeBool);
- SetOutput(node, kMachInt32);
+ ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
+ SetOutput(node, NodeOutputInfo::Int32());
}
break;
}
@@ -592,288 +931,319 @@
case IrOpcode::kNumberLessThan:
case IrOpcode::kNumberLessThanOrEqual: {
// Number comparisons reduce to integer comparisons for integer inputs.
- if (BothInputsAre(node, Type::Signed32())) {
+ if (BothInputsAreSigned32(node)) {
// => signed Int32Cmp
VisitInt32Cmp(node);
- if (lower()) node->set_op(Int32Op(node));
- } else if (BothInputsAre(node, Type::Unsigned32())) {
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ } else if (BothInputsAreUnsigned32(node)) {
// => unsigned Int32Cmp
VisitUint32Cmp(node);
- if (lower()) node->set_op(Uint32Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
} else {
// => Float64Cmp
VisitFloat64Cmp(node);
- if (lower()) node->set_op(Float64Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
break;
}
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract: {
- // Add and subtract reduce to Int32Add/Sub if the inputs
- // are already integers and all uses are truncating.
- if (CanLowerToInt32Binop(node, use)) {
+ if (BothInputsAre(node, Type::Signed32()) &&
+ NodeProperties::GetType(node)->Is(Type::Signed32())) {
+ // int32 + int32 = int32
// => signed Int32Add/Sub
VisitInt32Binop(node);
- if (lower()) node->set_op(Int32Op(node));
- } else if (CanLowerToInt32AdditiveBinop(node, use)) {
- // => signed Int32Add/Sub, truncating inputs
- ProcessTruncateWord32Input(node, 0, kTypeInt32);
- ProcessTruncateWord32Input(node, 1, kTypeInt32);
- SetOutput(node, kMachInt32);
- if (lower()) node->set_op(Int32Op(node));
- } else if (CanLowerToUint32Binop(node, use)) {
- // => unsigned Int32Add/Sub
- VisitUint32Binop(node);
- if (lower()) node->set_op(Uint32Op(node));
- } else if (CanLowerToUint32AdditiveBinop(node, use)) {
- // => signed Int32Add/Sub, truncating inputs
- ProcessTruncateWord32Input(node, 0, kTypeUint32);
- ProcessTruncateWord32Input(node, 1, kTypeUint32);
- SetOutput(node, kMachUint32);
- if (lower()) node->set_op(Uint32Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ } else if (BothInputsAre(node, type_cache_.kAdditiveSafeInteger) &&
+ truncation.TruncatesToWord32()) {
+ // safe-int + safe-int = x (truncated to int32)
+ // => signed Int32Add/Sub (truncated)
+ VisitWord32TruncatingBinop(node);
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
} else {
// => Float64Add/Sub
VisitFloat64Binop(node);
- if (lower()) node->set_op(Float64Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
break;
}
case IrOpcode::kNumberMultiply: {
- NumberMatcher right(node->InputAt(1));
- if (right.IsInRange(-1048576, 1048576)) { // must fit double mantissa.
- if (CanLowerToInt32Binop(node, use)) {
- // => signed Int32Mul
+ if (BothInputsAreSigned32(node)) {
+ if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
+ // Multiply reduces to Int32Mul if the inputs and the output
+ // are integers.
VisitInt32Binop(node);
- if (lower()) node->set_op(Int32Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ break;
+ }
+ if (truncation.TruncatesToWord32() &&
+ NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger)) {
+ // Multiply reduces to Int32Mul if the inputs are integers,
+ // the uses are truncating and the result is in the safe
+ // integer range.
+ VisitWord32TruncatingBinop(node);
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
break;
}
}
// => Float64Mul
VisitFloat64Binop(node);
- if (lower()) node->set_op(Float64Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
break;
}
case IrOpcode::kNumberDivide: {
- if (CanLowerToInt32Binop(node, use)) {
+ if (BothInputsAreSigned32(node)) {
+ if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => signed Int32Div
VisitInt32Binop(node);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
break;
+ }
+ if (truncation.TruncatesToWord32()) {
+ // => signed Int32Div
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ break;
+ }
}
- if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
+ if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Div
- VisitUint32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
break;
}
// => Float64Div
VisitFloat64Binop(node);
- if (lower()) node->set_op(Float64Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
break;
}
case IrOpcode::kNumberModulus: {
- if (CanLowerToInt32Binop(node, use)) {
- // => signed Int32Mod
- VisitInt32Binop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- break;
+ if (BothInputsAreSigned32(node)) {
+ if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
+ // => signed Int32Mod
+ VisitInt32Binop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ break;
+ }
+ if (truncation.TruncatesToWord32()) {
+ // => signed Int32Mod
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ break;
+ }
}
- if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
+ if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Mod
- VisitUint32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
break;
}
// => Float64Mod
VisitFloat64Binop(node);
- if (lower()) node->set_op(Float64Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
break;
}
- case IrOpcode::kNumberToInt32: {
- MachineTypeUnion use_rep = use & kRepMask;
- Node* input = node->InputAt(0);
- Type* in_upper = NodeProperties::GetBounds(input).upper;
- MachineTypeUnion in = GetInfo(input)->output;
- if (in_upper->Is(Type::Signed32())) {
- // If the input has type int32, pass through representation.
- VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeUint32 ||
- in_upper->Is(Type::Unsigned32())) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeUint32 | kRepWord32, kTypeInt32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeInt32 ||
- (in & kRepMask) == kRepWord32) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeInt32 | kRepWord32, kTypeInt32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- // Require the input in float64 format and perform truncation.
- // TODO(turbofan): avoid a truncation with a smi check.
- VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
- if (lower())
- node->set_op(lowering->machine()->TruncateFloat64ToInt32());
+ case IrOpcode::kNumberBitwiseOr:
+ case IrOpcode::kNumberBitwiseXor:
+ case IrOpcode::kNumberBitwiseAnd: {
+ VisitInt32Binop(node);
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ break;
+ }
+ case IrOpcode::kNumberShiftLeft: {
+ Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
}
break;
}
+ case IrOpcode::kNumberShiftRight: {
+ Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
+ }
+ break;
+ }
+ case IrOpcode::kNumberShiftRightLogical: {
+ Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
+ }
+ break;
+ }
+ case IrOpcode::kNumberToInt32: {
+ // Just change representation if necessary.
+ VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ break;
+ }
case IrOpcode::kNumberToUint32: {
- MachineTypeUnion use_rep = use & kRepMask;
- Node* input = node->InputAt(0);
- Type* in_upper = NodeProperties::GetBounds(input).upper;
- MachineTypeUnion in = GetInfo(input)->output;
- if (in_upper->Is(Type::Unsigned32())) {
- // If the input has type uint32, pass through representation.
- VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeUint32 ||
- in_upper->Is(Type::Unsigned32())) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeUint32 | kRepWord32, kTypeUint32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeInt32 ||
- (in & kRepMask) == kRepWord32) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeInt32 | kRepWord32, kTypeUint32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- // Require the input in float64 format and perform truncation.
- // TODO(turbofan): avoid a truncation with a smi check.
- VisitUnop(node, kTypeUint32 | kRepFloat64, kTypeUint32 | kRepWord32);
- if (lower())
- node->set_op(lowering->machine()->TruncateFloat64ToInt32());
+ // Just change representation if necessary.
+ VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ break;
+ }
+ case IrOpcode::kNumberIsHoleNaN: {
+ VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+ if (lower()) {
+ // NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
+ // #HoleNaNLower32)
+ node->ReplaceInput(0,
+ jsgraph_->graph()->NewNode(
+ lowering->machine()->Float64ExtractLowWord32(),
+ node->InputAt(0)));
+ node->AppendInput(jsgraph_->zone(),
+ jsgraph_->Int32Constant(kHoleNanLower32));
+ NodeProperties::ChangeOp(node, jsgraph_->machine()->Word32Equal());
+ }
+ break;
+ }
+ case IrOpcode::kPlainPrimitiveToNumber: {
+ VisitUnop(node, UseInfo::AnyTagged(), NodeOutputInfo::NumberTagged());
+ if (lower()) {
+ // PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
+ Operator::Properties properties = node->op()->properties();
+ Callable callable = CodeFactory::ToNumber(jsgraph_->isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+ flags, properties);
+ node->InsertInput(jsgraph_->zone(), 0,
+ jsgraph_->HeapConstant(callable.code()));
+ node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
break;
}
case IrOpcode::kReferenceEqual: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
- if (lower()) node->set_op(lowering->machine()->WordEqual());
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
+ if (lower()) {
+ NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ }
break;
}
case IrOpcode::kStringEqual: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) lowering->DoStringEqual(node);
break;
}
case IrOpcode::kStringLessThan: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) lowering->DoStringLessThan(node);
break;
}
case IrOpcode::kStringLessThanOrEqual: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) lowering->DoStringLessThanOrEqual(node);
break;
}
- case IrOpcode::kStringAdd: {
- VisitBinop(node, kMachAnyTagged, kMachAnyTagged);
- if (lower()) lowering->DoStringAdd(node);
+ case IrOpcode::kAllocate: {
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ ProcessRemainingInputs(node, 1);
+ SetOutput(node, NodeOutputInfo::AnyTagged());
break;
}
case IrOpcode::kLoadField: {
FieldAccess access = FieldAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+ ProcessInput(node, 0, UseInfoForBasePointer(access));
ProcessRemainingInputs(node, 1);
- SetOutput(node, access.machine_type);
- if (lower()) lowering->DoLoadField(node);
+ SetOutputFromMachineType(node, access.machine_type);
break;
}
case IrOpcode::kStoreField: {
FieldAccess access = FieldAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access));
- ProcessInput(node, 1, access.machine_type);
+ ProcessInput(node, 0, UseInfoForBasePointer(access));
+ ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
+ access.machine_type.representation()));
ProcessRemainingInputs(node, 2);
- SetOutput(node, 0);
- if (lower()) lowering->DoStoreField(node);
+ SetOutput(node, NodeOutputInfo::None());
break;
}
case IrOpcode::kLoadBuffer: {
BufferAccess access = BufferAccessOf(node->op());
- ProcessInput(node, 0, kMachPtr); // buffer
- ProcessInput(node, 1, kMachInt32); // offset
- ProcessInput(node, 2, kMachInt32); // length
+ ProcessInput(node, 0, UseInfo::PointerInt()); // buffer
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // offset
+ ProcessInput(node, 2, UseInfo::TruncatingWord32()); // length
ProcessRemainingInputs(node, 3);
- // Tagged overrides everything if we have to do a typed array bounds
- // check, because we may need to return undefined then.
- MachineType output_type;
- if (use & kRepTagged) {
- output_type = kMachAnyTagged;
- } else if (use & kRepFloat64) {
- if (access.machine_type() & kRepFloat32) {
- output_type = access.machine_type();
+
+ NodeOutputInfo output_info;
+ if (truncation.TruncatesUndefinedToZeroOrNaN()) {
+ if (truncation.TruncatesNaNToZero()) {
+ // If undefined is truncated to a non-NaN number, we can use
+ // the load's representation.
+ output_info = NodeOutputInfo(access.machine_type().representation(),
+ NodeProperties::GetType(node));
} else {
- output_type = kMachFloat64;
+ // If undefined is truncated to a number, but the use can
+ // observe NaN, we need to output at least the float32
+ // representation.
+ if (access.machine_type().representation() ==
+ MachineRepresentation::kFloat32) {
+ output_info =
+ NodeOutputInfo(access.machine_type().representation(),
+ NodeProperties::GetType(node));
+ } else {
+ output_info = NodeOutputInfo::Float64();
+ }
}
- } else if (use & kRepFloat32) {
- output_type = kMachFloat32;
} else {
- output_type = access.machine_type();
+ // If undefined is not truncated away, we need to have the tagged
+ // representation.
+ output_info = NodeOutputInfo::AnyTagged();
}
- SetOutput(node, output_type);
- if (lower()) lowering->DoLoadBuffer(node, output_type, changer_);
+ SetOutput(node, output_info);
+ if (lower())
+ lowering->DoLoadBuffer(node, output_info.representation(), changer_);
break;
}
case IrOpcode::kStoreBuffer: {
BufferAccess access = BufferAccessOf(node->op());
- ProcessInput(node, 0, kMachPtr); // buffer
- ProcessInput(node, 1, kMachInt32); // offset
- ProcessInput(node, 2, kMachInt32); // length
- ProcessInput(node, 3, access.machine_type()); // value
+ ProcessInput(node, 0, UseInfo::PointerInt()); // buffer
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // offset
+ ProcessInput(node, 2, UseInfo::TruncatingWord32()); // length
+ ProcessInput(node, 3,
+ TruncatingUseInfoFromRepresentation(
+ access.machine_type().representation())); // value
ProcessRemainingInputs(node, 4);
- SetOutput(node, 0);
+ SetOutput(node, NodeOutputInfo::None());
if (lower()) lowering->DoStoreBuffer(node);
break;
}
case IrOpcode::kLoadElement: {
ElementAccess access = ElementAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access)); // base
- ProcessInput(node, 1, kMachInt32); // index
+ ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessRemainingInputs(node, 2);
- SetOutput(node, access.machine_type);
- if (lower()) lowering->DoLoadElement(node);
+ SetOutputFromMachineType(node, access.machine_type);
break;
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access)); // base
- ProcessInput(node, 1, kMachInt32); // index
- ProcessInput(node, 2, access.machine_type); // value
+ ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 2,
+ TruncatingUseInfoFromRepresentation(
+ access.machine_type.representation())); // value
ProcessRemainingInputs(node, 3);
- SetOutput(node, 0);
- if (lower()) lowering->DoStoreElement(node);
+ SetOutput(node, NodeOutputInfo::None());
+ break;
+ }
+ case IrOpcode::kObjectIsNumber: {
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ SetOutput(node, NodeOutputInfo::Bool());
+ if (lower()) lowering->DoObjectIsNumber(node);
break;
}
case IrOpcode::kObjectIsSmi: {
- ProcessInput(node, 0, kMachAnyTagged);
- SetOutput(node, kRepBit | kTypeBool);
- if (lower()) {
- Node* is_tagged = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->WordAnd(), node->InputAt(0),
- jsgraph_->Int32Constant(static_cast<int>(kSmiTagMask)));
- Node* is_smi = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->WordEqual(), is_tagged,
- jsgraph_->Int32Constant(kSmiTag));
- DeferReplacement(node, is_smi);
- }
- break;
- }
- case IrOpcode::kObjectIsNonNegativeSmi: {
- ProcessInput(node, 0, kMachAnyTagged);
- SetOutput(node, kRepBit | kTypeBool);
- if (lower()) {
- Node* is_tagged = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->WordAnd(), node->InputAt(0),
- jsgraph_->Int32Constant(static_cast<int>(kSmiTagMask)));
- Node* is_smi = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->WordEqual(), is_tagged,
- jsgraph_->Int32Constant(kSmiTag));
- Node* is_non_neg = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->IntLessThanOrEqual(),
- jsgraph_->Int32Constant(0), node->InputAt(0));
- Node* is_non_neg_smi = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->Word32And(), is_smi, is_non_neg);
- DeferReplacement(node, is_non_neg_smi);
- }
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ SetOutput(node, NodeOutputInfo::Bool());
+ if (lower()) lowering->DoObjectIsSmi(node);
break;
}
@@ -881,29 +1251,31 @@
// Machine-level operators.
//------------------------------------------------------------------
case IrOpcode::kLoad: {
- // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
- MachineTypeUnion tBase = kRepTagged | kMachPtr;
- LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
- ProcessInput(node, 0, tBase); // pointer or object
- ProcessInput(node, 1, kMachInt32); // index
+ // TODO(jarin) Eventually, we should get rid of all machine stores
+ // from the high-level phases, then this becomes UNREACHABLE.
+ LoadRepresentation rep = LoadRepresentationOf(node->op());
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
+ ProcessInput(node, 1, UseInfo::PointerInt()); // index
ProcessRemainingInputs(node, 2);
- SetOutput(node, rep);
+ SetOutputFromMachineType(node, rep);
break;
}
case IrOpcode::kStore: {
- // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
- MachineTypeUnion tBase = kRepTagged | kMachPtr;
- StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
- ProcessInput(node, 0, tBase); // pointer or object
- ProcessInput(node, 1, kMachInt32); // index
- ProcessInput(node, 2, rep.machine_type());
+ // TODO(jarin) Eventually, we should get rid of all machine stores
+ // from the high-level phases, then this becomes UNREACHABLE.
+ StoreRepresentation rep = StoreRepresentationOf(node->op());
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
+ ProcessInput(node, 1, UseInfo::PointerInt()); // index
+ ProcessInput(node, 2,
+ TruncatingUseInfoFromRepresentation(rep.representation()));
ProcessRemainingInputs(node, 3);
- SetOutput(node, 0);
+ SetOutput(node, NodeOutputInfo::None());
break;
}
case IrOpcode::kWord32Shr:
// We output unsigned int32 for shift right because JavaScript.
- return VisitBinop(node, kMachUint32, kMachUint32);
+ return VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Uint32());
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
@@ -912,9 +1284,15 @@
// We use signed int32 as the output type for these word32 operations,
// though the machine bits are the same for either signed or unsigned,
// because JavaScript considers the result from these operations signed.
- return VisitBinop(node, kRepWord32, kRepWord32 | kTypeInt32);
+ return VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Int32());
case IrOpcode::kWord32Equal:
- return VisitBinop(node, kRepWord32, kRepBit);
+ return VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Bool());
+
+ case IrOpcode::kWord32Clz:
+ return VisitUnop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Uint32());
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
@@ -958,222 +1336,187 @@
case IrOpcode::kWord64Shl:
case IrOpcode::kWord64Shr:
case IrOpcode::kWord64Sar:
- return VisitBinop(node, kRepWord64, kRepWord64);
+ return VisitBinop(node, UseInfo::TruncatingWord64(),
+ NodeOutputInfo::Int64());
case IrOpcode::kWord64Equal:
- return VisitBinop(node, kRepWord64, kRepBit);
+ return VisitBinop(node, UseInfo::TruncatingWord64(),
+ NodeOutputInfo::Bool());
case IrOpcode::kChangeInt32ToInt64:
- return VisitUnop(node, kTypeInt32 | kRepWord32,
- kTypeInt32 | kRepWord64);
+ return VisitUnop(
+ node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kWord64, Type::Signed32()));
case IrOpcode::kChangeUint32ToUint64:
- return VisitUnop(node, kTypeUint32 | kRepWord32,
- kTypeUint32 | kRepWord64);
+ return VisitUnop(
+ node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kWord64, Type::Unsigned32()));
case IrOpcode::kTruncateFloat64ToFloat32:
- return VisitUnop(node, kTypeNumber | kRepFloat64,
- kTypeNumber | kRepFloat32);
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float32());
+ case IrOpcode::kTruncateFloat64ToInt32:
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
case IrOpcode::kTruncateInt64ToInt32:
// TODO(titzer): Is kTypeInt32 correct here?
- return VisitUnop(node, kTypeInt32 | kRepWord64,
- kTypeInt32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Word64TruncatingToWord32(),
+ NodeOutputInfo::Int32());
case IrOpcode::kChangeFloat32ToFloat64:
- return VisitUnop(node, kTypeNumber | kRepFloat32,
- kTypeNumber | kRepFloat64);
+ return VisitUnop(node, UseInfo::Float32(), NodeOutputInfo::Float64());
case IrOpcode::kChangeInt32ToFloat64:
- return VisitUnop(node, kTypeInt32 | kRepWord32,
- kTypeInt32 | kRepFloat64);
+ return VisitUnop(
+ node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kFloat64, Type::Signed32()));
case IrOpcode::kChangeUint32ToFloat64:
- return VisitUnop(node, kTypeUint32 | kRepWord32,
- kTypeUint32 | kRepFloat64);
+ return VisitUnop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kFloat64,
+ Type::Unsigned32()));
case IrOpcode::kChangeFloat64ToInt32:
- return VisitUnop(node, kTypeInt32 | kRepFloat64,
- kTypeInt32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
+ NodeOutputInfo::Int32());
case IrOpcode::kChangeFloat64ToUint32:
- return VisitUnop(node, kTypeUint32 | kRepFloat64,
- kTypeUint32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
+ NodeOutputInfo::Uint32());
case IrOpcode::kFloat64Add:
case IrOpcode::kFloat64Sub:
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
+ case IrOpcode::kFloat64Min:
return VisitFloat64Binop(node);
+ case IrOpcode::kFloat64Abs:
case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64Floor:
- case IrOpcode::kFloat64Ceil:
+ case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
- return VisitUnop(node, kMachFloat64, kMachFloat64);
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64Cmp(node);
+ case IrOpcode::kFloat64ExtractLowWord32:
+ case IrOpcode::kFloat64ExtractHighWord32:
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
+ case IrOpcode::kFloat64InsertLowWord32:
+ case IrOpcode::kFloat64InsertHighWord32:
+ return VisitBinop(node, UseInfo::Float64(), UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Float64());
case IrOpcode::kLoadStackPointer:
- return VisitLeaf(node, kMachPtr);
+ case IrOpcode::kLoadFramePointer:
+ return VisitLeaf(node, NodeOutputInfo::Pointer());
case IrOpcode::kStateValues:
- for (int i = 0; i < node->InputCount(); i++) {
- ProcessInput(node, i, kTypeAny);
- }
- SetOutput(node, kMachAnyTagged);
+ VisitStateValues(node);
break;
default:
VisitInputs(node);
+ // Assume the output is tagged.
+ SetOutput(node, NodeOutputInfo::AnyTagged());
break;
}
}
void DeferReplacement(Node* node, Node* replacement) {
- if (FLAG_trace_representation) {
- TRACE(("defer replacement #%d:%s with #%d:%s\n", node->id(),
- node->op()->mnemonic(), replacement->id(),
- replacement->op()->mnemonic()));
- }
- if (replacement->id() < count_) {
- // Replace with a previously existing node eagerly.
+ TRACE("defer replacement #%d:%s with #%d:%s\n", node->id(),
+ node->op()->mnemonic(), replacement->id(),
+ replacement->op()->mnemonic());
+
+ if (replacement->id() < count_ &&
+ GetInfo(node)->output_type()->Is(GetInfo(replacement)->output_type())) {
+ // Replace with a previously existing node eagerly only if the type is the
+ // same.
node->ReplaceUses(replacement);
} else {
// Otherwise, we are replacing a node with a representation change.
// Such a substitution must be done after all lowering is done, because
- // new nodes do not have {NodeInfo} entries, and that would confuse
- // the representation change insertion for uses of it.
+ // changing the type could confuse the representation change
+ // insertion for uses of the node.
replacements_.push_back(node);
replacements_.push_back(replacement);
}
- // TODO(titzer) node->RemoveAllInputs(); // Node is now dead.
+ node->NullAllInputs(); // Node is now dead.
}
- void PrintUseInfo(Node* node) {
- TRACE(("#%d:%-20s ", node->id(), node->op()->mnemonic()));
- PrintInfo(GetUseInfo(node));
- TRACE(("\n"));
- }
-
- void PrintInfo(MachineTypeUnion info) {
+ void PrintOutputInfo(NodeInfo* info) {
if (FLAG_trace_representation) {
OFStream os(stdout);
- os << static_cast<MachineType>(info);
+ os << info->representation() << " (";
+ info->output_type()->PrintTo(os, Type::SEMANTIC_DIM);
+ os << ")";
+ }
+ }
+
+ void PrintRepresentation(MachineRepresentation rep) {
+ if (FLAG_trace_representation) {
+ OFStream os(stdout);
+ os << rep;
+ }
+ }
+
+ void PrintTruncation(Truncation truncation) {
+ if (FLAG_trace_representation) {
+ OFStream os(stdout);
+ os << truncation.description();
+ }
+ }
+
+ void PrintUseInfo(UseInfo info) {
+ if (FLAG_trace_representation) {
+ OFStream os(stdout);
+ os << info.preferred() << ":" << info.truncation().description();
}
}
private:
JSGraph* jsgraph_;
- int count_; // number of nodes in the graph
- NodeInfo* info_; // node id -> usage information
+ size_t const count_; // number of nodes in the graph
+ ZoneVector<NodeInfo> info_; // node id -> usage information
+#ifdef DEBUG
+ ZoneVector<InputUseInfos> node_input_use_infos_; // Debug information about
+ // requirements on inputs.
+#endif // DEBUG
NodeVector nodes_; // collected nodes
NodeVector replacements_; // replacements to be done after lowering
Phase phase_; // current phase of algorithm
RepresentationChanger* changer_; // for inserting representation changes
ZoneQueue<Node*> queue_; // queue for traversing the graph
- Type* safe_bit_range_;
- Type* safe_int_additive_range_;
+ // TODO(danno): RepresentationSelector shouldn't know anything about the
+ // source positions table, but must for now since there currently is no other
+ // way to pass down source position information to nodes created during
+ // lowering. Once this phase becomes a vanilla reducer, it should get source
+ // position information via the SourcePositionWrapper like all other reducers.
+ SourcePositionTable* source_positions_;
+ TypeCache const& type_cache_;
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() >= 0);
DCHECK(node->id() < count_);
return &info_[node->id()];
}
-
- MachineTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
};
-Node* SimplifiedLowering::IsTagged(Node* node) {
- // TODO(titzer): factor this out to a TaggingScheme abstraction.
- STATIC_ASSERT(kSmiTagMask == 1); // Only works if tag is the low bit.
- return graph()->NewNode(machine()->WordAnd(), node,
- jsgraph()->Int32Constant(kSmiTagMask));
-}
+SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
+ SourcePositionTable* source_positions)
+ : jsgraph_(jsgraph),
+ zone_(zone),
+ type_cache_(TypeCache::Get()),
+ source_positions_(source_positions) {}
void SimplifiedLowering::LowerAllNodes() {
- SimplifiedOperatorBuilder simplified(graph()->zone());
- RepresentationChanger changer(jsgraph(), &simplified,
- graph()->zone()->isolate());
- RepresentationSelector selector(jsgraph(), zone_, &changer);
+ RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
+ RepresentationSelector selector(jsgraph(), zone_, &changer,
+ source_positions_);
selector.Run(this);
}
-Node* SimplifiedLowering::Untag(Node* node) {
- // TODO(titzer): factor this out to a TaggingScheme abstraction.
- Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
- return graph()->NewNode(machine()->WordSar(), node, shift_amount);
-}
-
-
-Node* SimplifiedLowering::SmiTag(Node* node) {
- // TODO(titzer): factor this out to a TaggingScheme abstraction.
- Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
- return graph()->NewNode(machine()->WordShl(), node, shift_amount);
-}
-
-
-Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) {
- return jsgraph()->Int32Constant(offset - kHeapObjectTag);
-}
-
-
-static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
- MachineType representation,
- Type* type) {
- // TODO(turbofan): skip write barriers for Smis, etc.
- if (base_is_tagged == kTaggedBase &&
- RepresentationOf(representation) == kRepTagged) {
- // Write barriers are only for writes into heap objects (i.e. tagged base).
- return kFullWriteBarrier;
- }
- return kNoWriteBarrier;
-}
-
-
-void SimplifiedLowering::DoLoadField(Node* node) {
- const FieldAccess& access = FieldAccessOf(node->op());
- node->set_op(machine()->Load(access.machine_type));
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
-}
-
-
-void SimplifiedLowering::DoStoreField(Node* node) {
- const FieldAccess& access = FieldAccessOf(node->op());
- WriteBarrierKind kind = ComputeWriteBarrierKind(
- access.base_is_tagged, access.machine_type, access.type);
- node->set_op(
- machine()->Store(StoreRepresentation(access.machine_type, kind)));
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
-}
-
-
-Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
- Node* const key) {
- Node* index = key;
- const int element_size_shift = ElementSizeLog2Of(access.machine_type);
- if (element_size_shift) {
- index = graph()->NewNode(machine()->Word32Shl(), index,
- jsgraph()->Int32Constant(element_size_shift));
- }
- const int fixed_offset = access.header_size - access.tag();
- if (fixed_offset) {
- index = graph()->NewNode(machine()->Int32Add(), index,
- jsgraph()->Int32Constant(fixed_offset));
- }
- if (machine()->Is64()) {
- // TODO(turbofan): This is probably only correct for typed arrays, and only
- // if the typed arrays are at most 2GiB in size, which happens to match
- // exactly our current situation.
- index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
- }
- return index;
-}
-
-
-void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
+void SimplifiedLowering::DoLoadBuffer(Node* node,
+ MachineRepresentation output_rep,
RepresentationChanger* changer) {
DCHECK_EQ(IrOpcode::kLoadBuffer, node->opcode());
- DCHECK_NE(kMachNone, RepresentationOf(output_type));
- MachineType const type = BufferAccessOf(node->op()).machine_type();
- if (output_type != type) {
+ DCHECK_NE(MachineRepresentation::kNone, output_rep);
+ MachineType const access_type = BufferAccessOf(node->op()).machine_type();
+ if (output_rep != access_type.representation()) {
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
@@ -1189,19 +1532,21 @@
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue =
- graph()->NewNode(machine()->Load(type), buffer, index, effect, if_true);
- Node* vtrue = changer->GetRepresentationFor(etrue, type, output_type);
+ Node* etrue = graph()->NewNode(machine()->Load(access_type), buffer, index,
+ effect, if_true);
+ Node* vtrue = changer->GetRepresentationFor(
+ etrue, access_type.representation(), NodeProperties::GetType(node),
+ output_rep, Truncation::None());
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
Node* vfalse;
- if (output_type & kRepTagged) {
+ if (output_rep == MachineRepresentation::kTagged) {
vfalse = jsgraph()->UndefinedConstant();
- } else if (output_type & kRepFloat64) {
+ } else if (output_rep == MachineRepresentation::kFloat64) {
vfalse =
jsgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
- } else if (output_type & kRepFloat32) {
+ } else if (output_rep == MachineRepresentation::kFloat32) {
vfalse =
jsgraph()->Float32Constant(std::numeric_limits<float>::quiet_NaN());
} else {
@@ -1212,82 +1557,83 @@
Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
// Replace effect uses of {node} with the {ephi}.
- NodeProperties::ReplaceWithValue(node, node, ephi);
+ NodeProperties::ReplaceUses(node, node, ephi);
// Turn the {node} into a Phi.
- node->set_op(common()->Phi(output_type, 2));
node->ReplaceInput(0, vtrue);
node->ReplaceInput(1, vfalse);
node->ReplaceInput(2, merge);
node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node, common()->Phi(output_rep, 2));
} else {
- node->set_op(machine()->CheckedLoad(type));
+ NodeProperties::ChangeOp(node, machine()->CheckedLoad(access_type));
}
}
void SimplifiedLowering::DoStoreBuffer(Node* node) {
DCHECK_EQ(IrOpcode::kStoreBuffer, node->opcode());
- MachineType const type = BufferAccessOf(node->op()).machine_type();
- node->set_op(machine()->CheckedStore(type));
+ MachineRepresentation const rep =
+ BufferAccessOf(node->op()).machine_type().representation();
+ NodeProperties::ChangeOp(node, machine()->CheckedStore(rep));
}
-void SimplifiedLowering::DoLoadElement(Node* node) {
- const ElementAccess& access = ElementAccessOf(node->op());
- node->set_op(machine()->Load(access.machine_type));
- node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+void SimplifiedLowering::DoObjectIsNumber(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ // TODO(bmeurer): Optimize somewhat based on input type.
+ Node* check =
+ graph()->NewNode(machine()->WordEqual(),
+ graph()->NewNode(machine()->WordAnd(), input,
+ jsgraph()->IntPtrConstant(kSmiTagMask)),
+ jsgraph()->IntPtrConstant(kSmiTag));
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->Int32Constant(1);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(
+ machine()->WordEqual(),
+ graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), input,
+ jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
+ graph()->start(), if_false),
+ jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
+ Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ node->ReplaceInput(0, vtrue);
+ node->AppendInput(graph()->zone(), vfalse);
+ node->AppendInput(graph()->zone(), control);
+ NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
}
-void SimplifiedLowering::DoStoreElement(Node* node) {
- const ElementAccess& access = ElementAccessOf(node->op());
- node->set_op(machine()->Store(StoreRepresentation(
- access.machine_type,
- ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type,
- access.type))));
- node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+void SimplifiedLowering::DoObjectIsSmi(Node* node) {
+ node->ReplaceInput(0,
+ graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
+ jsgraph()->IntPtrConstant(kSmiTagMask)));
+ node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
+ NodeProperties::ChangeOp(node, machine()->WordEqual());
}
-void SimplifiedLowering::DoStringAdd(Node* node) {
+Node* SimplifiedLowering::StringComparison(Node* node) {
Operator::Properties properties = node->op()->properties();
- Callable callable = CodeFactory::StringAdd(
- zone()->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ Callable callable = CodeFactory::StringCompare(isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- callable.descriptor(), 0, flags, properties, zone());
- node->set_op(common()->Call(desc));
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->AppendInput(graph()->zone(), jsgraph()->UndefinedConstant());
- node->AppendInput(graph()->zone(), graph()->start());
- node->AppendInput(graph()->zone(), graph()->start());
-}
-
-
-Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
- CEntryStub stub(zone()->isolate(), 1);
- Runtime::FunctionId f =
- requires_ordering ? Runtime::kStringCompare : Runtime::kStringEquals;
- ExternalReference ref(f, zone()->isolate());
- Operator::Properties props = node->op()->properties();
- // TODO(mstarzinger): We should call StringCompareStub here instead, once an
- // interface descriptor is available for it.
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(f, 2, props, zone());
- return graph()->NewNode(common()->Call(desc),
- jsgraph()->HeapConstant(stub.GetCode()),
- NodeProperties::GetValueInput(node, 0),
- NodeProperties::GetValueInput(node, 1),
- jsgraph()->ExternalConstant(ref),
- jsgraph()->Int32Constant(2),
- jsgraph()->UndefinedConstant());
+ isolate(), zone(), callable.descriptor(), 0, flags, properties);
+ return graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+ NodeProperties::GetValueInput(node, 0),
+ NodeProperties::GetValueInput(node, 1), jsgraph()->NoContextConstant(),
+ NodeProperties::GetEffectInput(node),
+ NodeProperties::GetControlInput(node));
}
Node* SimplifiedLowering::Int32Div(Node* const node) {
Int32BinopMatcher m(node);
Node* const zero = jsgraph()->Int32Constant(0);
+ Node* const minus_one = jsgraph()->Int32Constant(-1);
Node* const lhs = m.left().node();
Node* const rhs = m.right().node();
@@ -1299,20 +1645,62 @@
return graph()->NewNode(machine()->Int32Div(), lhs, rhs, graph()->start());
}
- Diamond if_zero(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), rhs, zero),
- BranchHint::kFalse);
+ // General case for signed integer division.
+ //
+ // if 0 < rhs then
+ // lhs / rhs
+ // else
+ // if rhs < -1 then
+ // lhs / rhs
+ // else if rhs == 0 then
+ // 0
+ // else
+ // 0 - lhs
+ //
+ // Note: We do not use the Diamond helper class here, because it really hurts
+ // readability with nested diamonds.
+ const Operator* const merge_op = common()->Merge(2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kWord32, 2);
- Diamond if_minus_one(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), rhs,
- jsgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
- if_minus_one.Nest(if_zero, false);
- Node* sub = graph()->NewNode(machine()->Int32Sub(), zero, lhs);
- Node* div =
- graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_minus_one.if_false);
+ Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+ graph()->start());
- return if_zero.Phi(kMachInt32, zero, if_minus_one.Phi(kMachInt32, sub, div));
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* true0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* false0;
+ {
+ Node* check1 = graph()->NewNode(machine()->Int32LessThan(), rhs, minus_one);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* true1 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true1);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* false1;
+ {
+ Node* check2 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* true2 = zero;
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* false2 = graph()->NewNode(machine()->Int32Sub(), zero, lhs);
+
+ if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
+ false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
+ false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
+ }
+
+ Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+ return graph()->NewNode(phi_op, true0, false0, merge0);
}
@@ -1350,7 +1738,8 @@
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
const Operator* const merge_op = common()->Merge(2);
- const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kWord32, 2);
Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
@@ -1429,7 +1818,7 @@
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
Diamond d(graph(), common(), check, BranchHint::kFalse);
Node* div = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, d.if_false);
- return d.Phi(kMachUint32, zero, div);
+ return d.Phi(MachineRepresentation::kWord32, zero, div);
}
@@ -1461,7 +1850,8 @@
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
const Operator* const merge_op = common()->Merge(2);
- const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kWord32, 2);
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), rhs,
graph()->start());
@@ -1492,24 +1882,61 @@
}
+void SimplifiedLowering::DoShift(Node* node, Operator const* op,
+ Type* rhs_type) {
+ Node* const rhs = NodeProperties::GetValueInput(node, 1);
+ if (!rhs_type->Is(type_cache_.kZeroToThirtyOne)) {
+ node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
+ jsgraph()->Int32Constant(0x1f)));
+ }
+ NodeProperties::ChangeOp(node, op);
+}
+
+
+namespace {
+
+void ReplaceEffectUses(Node* node, Node* replacement) {
+ // Requires distinguishing between value and effect edges.
+ DCHECK(replacement->op()->EffectOutputCount() > 0);
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(replacement);
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ }
+ }
+}
+
+} // namespace
+
+
void SimplifiedLowering::DoStringEqual(Node* node) {
- node->set_op(machine()->WordEqual());
- node->ReplaceInput(0, StringComparison(node, false));
+ Node* comparison = StringComparison(node);
+ ReplaceEffectUses(node, comparison);
+ node->ReplaceInput(0, comparison);
node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->WordEqual());
}
void SimplifiedLowering::DoStringLessThan(Node* node) {
- node->set_op(machine()->IntLessThan());
- node->ReplaceInput(0, StringComparison(node, true));
+ Node* comparison = StringComparison(node);
+ ReplaceEffectUses(node, comparison);
+ node->ReplaceInput(0, comparison);
node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->IntLessThan());
}
void SimplifiedLowering::DoStringLessThanOrEqual(Node* node) {
- node->set_op(machine()->IntLessThanOrEqual());
- node->ReplaceInput(0, StringComparison(node, true));
+ Node* comparison = StringComparison(node);
+ ReplaceEffectUses(node, comparison);
+ node->ReplaceInput(0, comparison);
node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->IntLessThanOrEqual());
}
} // namespace compiler
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index b21cf21..f9410f8 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -12,31 +12,33 @@
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class TypeCache;
+
+
namespace compiler {
// Forward declarations.
class RepresentationChanger;
+class SourcePositionTable;
-
-class SimplifiedLowering FINAL {
+class SimplifiedLowering final {
public:
- SimplifiedLowering(JSGraph* jsgraph, Zone* zone)
- : jsgraph_(jsgraph), zone_(zone) {}
+ SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
+ SourcePositionTable* source_positions);
~SimplifiedLowering() {}
void LowerAllNodes();
- // TODO(titzer): These are exposed for direct testing. Use a friend class.
- void DoLoadField(Node* node);
- void DoStoreField(Node* node);
- // TODO(turbofan): The output_type can be removed once the result of the
+ // TODO(turbofan): The representation can be removed once the result of the
// representation analysis is stored in the node bounds.
- void DoLoadBuffer(Node* node, MachineType output_type,
+ void DoLoadBuffer(Node* node, MachineRepresentation rep,
RepresentationChanger* changer);
void DoStoreBuffer(Node* node);
- void DoLoadElement(Node* node);
- void DoStoreElement(Node* node);
- void DoStringAdd(Node* node);
+ void DoObjectIsNumber(Node* node);
+ void DoObjectIsSmi(Node* node);
+ void DoShift(Node* node, Operator const* op, Type* rhs_type);
void DoStringEqual(Node* node);
void DoStringLessThan(Node* node);
void DoStringLessThanOrEqual(Node* node);
@@ -44,13 +46,16 @@
private:
JSGraph* const jsgraph_;
Zone* const zone_;
+ TypeCache const& type_cache_;
- Node* SmiTag(Node* node);
- Node* IsTagged(Node* node);
- Node* Untag(Node* node);
- Node* OffsetMinusTagConstant(int32_t offset);
- Node* ComputeIndex(const ElementAccess& access, Node* const key);
- Node* StringComparison(Node* node, bool requires_ordering);
+ // TODO(danno): SimplifiedLowering shouldn't know anything about the source
+ // positions table, but must for now since there currently is no other way to
+ // pass down source position information to nodes created during
+ // lowering. Once this phase becomes a vanilla reducer, it should get source
+ // position information via the SourcePositionWrapper like all other reducers.
+ SourcePositionTable* source_positions_;
+
+ Node* StringComparison(Node* node);
Node* Int32Div(Node* const node);
Node* Int32Mod(Node* const node);
Node* Uint32Div(Node* const node);
@@ -58,6 +63,7 @@
friend class RepresentationSelector;
+ Isolate* isolate() { return jsgraph_->isolate(); }
Zone* zone() { return jsgraph_->zone(); }
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph()->graph(); }
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index 9d45e5b..120d792 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -4,18 +4,18 @@
#include "src/compiler/simplified-operator-reducer.h"
-#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/conversions-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
SimplifiedOperatorReducer::SimplifiedOperatorReducer(JSGraph* jsgraph)
- : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+ : jsgraph_(jsgraph) {}
SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
@@ -23,35 +23,25 @@
Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
- case IrOpcode::kAnyToBoolean:
- return ReduceAnyToBoolean(node);
case IrOpcode::kBooleanNot: {
- HeapObjectMatcher<HeapObject> m(node->InputAt(0));
- if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
- return Replace(jsgraph()->TrueConstant());
+ HeapObjectMatcher m(node->InputAt(0));
+ if (m.HasValue()) {
+ return Replace(jsgraph()->BooleanConstant(!m.Value()->BooleanValue()));
}
- if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->true_value()))) {
- return Replace(jsgraph()->FalseConstant());
- }
- if (m.IsBooleanNot()) return Replace(m.node()->InputAt(0));
+ if (m.IsBooleanNot()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeBitToBool: {
Int32Matcher m(node->InputAt(0));
if (m.Is(0)) return Replace(jsgraph()->FalseConstant());
if (m.Is(1)) return Replace(jsgraph()->TrueConstant());
- if (m.IsChangeBoolToBit()) return Replace(m.node()->InputAt(0));
+ if (m.IsChangeBoolToBit()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeBoolToBit: {
- HeapObjectMatcher<HeapObject> m(node->InputAt(0));
- if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
- return ReplaceInt32(0);
- }
- if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->true_value()))) {
- return ReplaceInt32(1);
- }
- if (m.IsChangeBitToBool()) return Replace(m.node()->InputAt(0));
+ HeapObjectMatcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(m.Value()->BooleanValue());
+ if (m.IsChangeBitToBool()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeFloat64ToTagged: {
@@ -69,12 +59,10 @@
if (m.HasValue()) return ReplaceFloat64(m.Value());
if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
if (m.IsChangeInt32ToTagged()) {
- return Change(node, machine()->ChangeInt32ToFloat64(),
- m.node()->InputAt(0));
+ return Change(node, machine()->ChangeInt32ToFloat64(), m.InputAt(0));
}
if (m.IsChangeUint32ToTagged()) {
- return Change(node, machine()->ChangeUint32ToFloat64(),
- m.node()->InputAt(0));
+ return Change(node, machine()->ChangeUint32ToFloat64(), m.InputAt(0));
}
break;
}
@@ -82,20 +70,18 @@
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
if (m.IsChangeFloat64ToTagged()) {
- return Change(node, machine()->ChangeFloat64ToInt32(),
- m.node()->InputAt(0));
+ return Change(node, machine()->ChangeFloat64ToInt32(), m.InputAt(0));
}
- if (m.IsChangeInt32ToTagged()) return Replace(m.node()->InputAt(0));
+ if (m.IsChangeInt32ToTagged()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeTaggedToUint32: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceUint32(DoubleToUint32(m.Value()));
if (m.IsChangeFloat64ToTagged()) {
- return Change(node, machine()->ChangeFloat64ToUint32(),
- m.node()->InputAt(0));
+ return Change(node, machine()->ChangeFloat64ToUint32(), m.InputAt(0));
}
- if (m.IsChangeUint32ToTagged()) return Replace(m.node()->InputAt(0));
+ if (m.IsChangeUint32ToTagged()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeUint32ToTagged: {
@@ -103,6 +89,8 @@
if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
break;
}
+ case IrOpcode::kReferenceEqual:
+ return ReduceReferenceEqual(node);
default:
break;
}
@@ -110,27 +98,18 @@
}
-Reduction SimplifiedOperatorReducer::ReduceAnyToBoolean(Node* node) {
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Type* const input_type = NodeProperties::GetBounds(input).upper;
- if (input_type->Is(Type::Boolean())) {
- // AnyToBoolean(x:boolean) => x
- return Replace(input);
- }
- if (input_type->Is(Type::OrderedNumber())) {
- // AnyToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
- Node* compare = graph()->NewNode(simplified()->NumberEqual(), input,
- jsgraph()->ZeroConstant());
- return Change(node, simplified()->BooleanNot(), compare);
- }
- if (input_type->Is(Type::String())) {
- // AnyToBoolean(x:string) => BooleanNot(NumberEqual(x.length, #0))
- FieldAccess const access = AccessBuilder::ForStringLength();
- Node* length = graph()->NewNode(simplified()->LoadField(access), input,
- graph()->start(), graph()->start());
- Node* compare = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- return Change(node, simplified()->BooleanNot(), compare);
+Reduction SimplifiedOperatorReducer::ReduceReferenceEqual(Node* node) {
+ DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
+ Node* const left = NodeProperties::GetValueInput(node, 0);
+ Node* const right = NodeProperties::GetValueInput(node, 1);
+ HeapObjectMatcher match_left(left);
+ HeapObjectMatcher match_right(right);
+ if (match_left.HasValue() && match_right.HasValue()) {
+ if (match_left.Value().is_identical_to(match_right.Value())) {
+ return Replace(jsgraph()->TrueConstant());
+ } else {
+ return Replace(jsgraph()->FalseConstant());
+ }
}
return NoChange();
}
@@ -140,8 +119,8 @@
Node* a) {
DCHECK_EQ(node->InputCount(), OperatorProperties::GetTotalInputCount(op));
DCHECK_LE(1, node->InputCount());
- node->set_op(op);
node->ReplaceInput(0, a);
+ NodeProperties::ChangeOp(node, op);
return Changed(node);
}
@@ -169,16 +148,6 @@
Graph* SimplifiedOperatorReducer::graph() const { return jsgraph()->graph(); }
-Factory* SimplifiedOperatorReducer::factory() const {
- return jsgraph()->isolate()->factory();
-}
-
-
-CommonOperatorBuilder* SimplifiedOperatorReducer::common() const {
- return jsgraph()->common();
-}
-
-
MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
return jsgraph()->machine();
}
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
index 1e565b8..979a3d0 100644
--- a/src/compiler/simplified-operator-reducer.h
+++ b/src/compiler/simplified-operator-reducer.h
@@ -6,30 +6,26 @@
#define V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
-
-// Forward declarations.
-class Heap;
-
namespace compiler {
// Forward declarations.
-class CommonOperatorBuilder;
class JSGraph;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
-class SimplifiedOperatorReducer FINAL : public Reducer {
+
+class SimplifiedOperatorReducer final : public Reducer {
public:
explicit SimplifiedOperatorReducer(JSGraph* jsgraph);
- ~SimplifiedOperatorReducer() FINAL;
+ ~SimplifiedOperatorReducer() final;
- Reduction Reduce(Node* node) FINAL;
+ Reduction Reduce(Node* node) final;
private:
- Reduction ReduceAnyToBoolean(Node* node);
+ Reduction ReduceReferenceEqual(Node* node);
Reduction Change(Node* node, const Operator* op, Node* a);
Reduction ReplaceFloat64(double value);
@@ -41,14 +37,11 @@
Reduction ReplaceNumber(int32_t value);
Graph* graph() const;
- Factory* factory() const;
JSGraph* jsgraph() const { return jsgraph_; }
- CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ SimplifiedOperatorBuilder* simplified() const;
- JSGraph* jsgraph_;
- SimplifiedOperatorBuilder simplified_;
+ JSGraph* const jsgraph_;
DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index 9d88d12..1eaa287 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -29,24 +29,24 @@
switch (external_array_type_) {
case kExternalUint8Array:
case kExternalUint8ClampedArray:
- return kMachUint8;
+ return MachineType::Uint8();
case kExternalInt8Array:
- return kMachInt8;
+ return MachineType::Int8();
case kExternalUint16Array:
- return kMachUint16;
+ return MachineType::Uint16();
case kExternalInt16Array:
- return kMachInt16;
+ return MachineType::Int16();
case kExternalUint32Array:
- return kMachUint32;
+ return MachineType::Uint32();
case kExternalInt32Array:
- return kMachInt32;
+ return MachineType::Int32();
case kExternalFloat32Array:
- return kMachFloat32;
+ return MachineType::Float32();
case kExternalFloat64Array:
- return kMachFloat64;
+ return MachineType::Float64();
}
UNREACHABLE();
- return kMachNone;
+ return MachineType::None();
}
@@ -157,39 +157,46 @@
}
-#define PURE_OP_LIST(V) \
- V(AnyToBoolean, Operator::kNoProperties, 1) \
- V(BooleanNot, Operator::kNoProperties, 1) \
- V(BooleanToNumber, Operator::kNoProperties, 1) \
- V(NumberEqual, Operator::kCommutative, 2) \
- V(NumberLessThan, Operator::kNoProperties, 2) \
- V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
- V(NumberAdd, Operator::kCommutative, 2) \
- V(NumberSubtract, Operator::kNoProperties, 2) \
- V(NumberMultiply, Operator::kCommutative, 2) \
- V(NumberDivide, Operator::kNoProperties, 2) \
- V(NumberModulus, Operator::kNoProperties, 2) \
- V(NumberToInt32, Operator::kNoProperties, 1) \
- V(NumberToUint32, Operator::kNoProperties, 1) \
- V(StringEqual, Operator::kCommutative, 2) \
- V(StringLessThan, Operator::kNoProperties, 2) \
- V(StringLessThanOrEqual, Operator::kNoProperties, 2) \
- V(StringAdd, Operator::kNoProperties, 2) \
- V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
- V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
- V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
- V(ChangeInt32ToTagged, Operator::kNoProperties, 1) \
- V(ChangeUint32ToTagged, Operator::kNoProperties, 1) \
- V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
- V(ChangeBoolToBit, Operator::kNoProperties, 1) \
- V(ChangeBitToBool, Operator::kNoProperties, 1) \
- V(ObjectIsSmi, Operator::kNoProperties, 1) \
- V(ObjectIsNonNegativeSmi, Operator::kNoProperties, 1)
+#define PURE_OP_LIST(V) \
+ V(BooleanNot, Operator::kNoProperties, 1) \
+ V(BooleanToNumber, Operator::kNoProperties, 1) \
+ V(NumberEqual, Operator::kCommutative, 2) \
+ V(NumberLessThan, Operator::kNoProperties, 2) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
+ V(NumberAdd, Operator::kCommutative, 2) \
+ V(NumberSubtract, Operator::kNoProperties, 2) \
+ V(NumberMultiply, Operator::kCommutative, 2) \
+ V(NumberDivide, Operator::kNoProperties, 2) \
+ V(NumberModulus, Operator::kNoProperties, 2) \
+ V(NumberBitwiseOr, Operator::kCommutative, 2) \
+ V(NumberBitwiseXor, Operator::kCommutative, 2) \
+ V(NumberBitwiseAnd, Operator::kCommutative, 2) \
+ V(NumberShiftLeft, Operator::kNoProperties, 2) \
+ V(NumberShiftRight, Operator::kNoProperties, 2) \
+ V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
+ V(NumberToInt32, Operator::kNoProperties, 1) \
+ V(NumberToUint32, Operator::kNoProperties, 1) \
+ V(NumberIsHoleNaN, Operator::kNoProperties, 1) \
+ V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeBoolToBit, Operator::kNoProperties, 1) \
+ V(ChangeBitToBool, Operator::kNoProperties, 1) \
+ V(ObjectIsNumber, Operator::kNoProperties, 1) \
+ V(ObjectIsSmi, Operator::kNoProperties, 1)
+#define NO_THROW_OP_LIST(V) \
+ V(StringEqual, Operator::kCommutative, 2) \
+ V(StringLessThan, Operator::kNoThrow, 2) \
+ V(StringLessThanOrEqual, Operator::kNoThrow, 2)
-struct SimplifiedOperatorGlobalCache FINAL {
+struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, input_count) \
- struct Name##Operator FINAL : public Operator { \
+ struct Name##Operator final : public Operator { \
Name##Operator() \
: Operator(IrOpcode::k##Name, Operator::kPure | properties, #Name, \
input_count, 0, 0, 1, 0, 0) {} \
@@ -198,15 +205,25 @@
PURE_OP_LIST(PURE)
#undef PURE
+#define NO_THROW(Name, properties, input_count) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, Operator::kNoThrow | properties, #Name, \
+ input_count, 1, 1, 1, 1, 0) {} \
+ }; \
+ Name##Operator k##Name;
+ NO_THROW_OP_LIST(NO_THROW)
+#undef NO_THROW
+
#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
- struct LoadBuffer##Type##Operator FINAL : public Operator1<BufferAccess> { \
+ struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> { \
LoadBuffer##Type##Operator() \
: Operator1<BufferAccess>(IrOpcode::kLoadBuffer, \
Operator::kNoThrow | Operator::kNoWrite, \
"LoadBuffer", 3, 1, 1, 1, 1, 0, \
BufferAccess(kExternal##Type##Array)) {} \
}; \
- struct StoreBuffer##Type##Operator FINAL : public Operator1<BufferAccess> { \
+ struct StoreBuffer##Type##Operator final : public Operator1<BufferAccess> { \
StoreBuffer##Type##Operator() \
: Operator1<BufferAccess>(IrOpcode::kStoreBuffer, \
Operator::kNoRead | Operator::kNoThrow, \
@@ -228,10 +245,11 @@
: cache_(kCache.Get()), zone_(zone) {}
-#define PURE(Name, properties, input_count) \
+#define GET_FROM_CACHE(Name, properties, input_count) \
const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
-PURE_OP_LIST(PURE)
-#undef PURE
+PURE_OP_LIST(GET_FROM_CACHE)
+NO_THROW_OP_LIST(GET_FROM_CACHE)
+#undef GET_FROM_CACHE
const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
@@ -242,6 +260,13 @@
}
+const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
+ return new (zone())
+ Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
+ "Allocate", 1, 1, 1, 1, 1, 0, pretenure);
+}
+
+
const Operator* SimplifiedOperatorBuilder::LoadBuffer(BufferAccess access) {
switch (access.external_array_type()) {
#define LOAD_BUFFER(Type, type, TYPE, ctype, size) \
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 22664fa..3821a6d 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -7,8 +7,9 @@
#include <iosfwd>
-#include "src/compiler/machine-type.h"
#include "src/handles.h"
+#include "src/machine-type.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
@@ -34,7 +35,7 @@
// An access descriptor for loads/stores of array buffers.
-class BufferAccess FINAL {
+class BufferAccess final {
public:
explicit BufferAccess(ExternalArrayType external_array_type)
: external_array_type_(external_array_type) {}
@@ -124,12 +125,10 @@
// - Bool: a tagged pointer to either the canonical JS #false or
// the canonical JS #true object
// - Bit: an untagged integer 0 or 1, but word-sized
-class SimplifiedOperatorBuilder FINAL {
+class SimplifiedOperatorBuilder final : public ZoneObject {
public:
explicit SimplifiedOperatorBuilder(Zone* zone);
- const Operator* AnyToBoolean();
-
const Operator* BooleanNot();
const Operator* BooleanToNumber();
@@ -141,15 +140,23 @@
const Operator* NumberMultiply();
const Operator* NumberDivide();
const Operator* NumberModulus();
+ const Operator* NumberBitwiseOr();
+ const Operator* NumberBitwiseXor();
+ const Operator* NumberBitwiseAnd();
+ const Operator* NumberShiftLeft();
+ const Operator* NumberShiftRight();
+ const Operator* NumberShiftRightLogical();
const Operator* NumberToInt32();
const Operator* NumberToUint32();
+ const Operator* NumberIsHoleNaN();
+
+ const Operator* PlainPrimitiveToNumber();
const Operator* ReferenceEqual(Type* type);
const Operator* StringEqual();
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
- const Operator* StringAdd();
const Operator* ChangeTaggedToInt32();
const Operator* ChangeTaggedToUint32();
@@ -160,8 +167,10 @@
const Operator* ChangeBoolToBit();
const Operator* ChangeBitToBool();
+ const Operator* ObjectIsNumber();
const Operator* ObjectIsSmi();
- const Operator* ObjectIsNonNegativeSmi();
+
+ const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
const Operator* LoadField(FieldAccess const&);
const Operator* StoreField(FieldAccess const&);
diff --git a/src/compiler/source-position.cc b/src/compiler/source-position.cc
index 9e21ae4..48361ec 100644
--- a/src/compiler/source-position.cc
+++ b/src/compiler/source-position.cc
@@ -4,19 +4,18 @@
#include "src/compiler/source-position.h"
#include "src/compiler/graph.h"
-#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-aux-data.h"
namespace v8 {
namespace internal {
namespace compiler {
-class SourcePositionTable::Decorator FINAL : public GraphDecorator {
+class SourcePositionTable::Decorator final : public GraphDecorator {
public:
explicit Decorator(SourcePositionTable* source_positions)
: source_positions_(source_positions) {}
- void Decorate(Node* node) FINAL {
- DCHECK(!source_positions_->current_position_.IsInvalid());
+ void Decorate(Node* node) final {
source_positions_->table_.Set(node, source_positions_->current_position_);
}
@@ -27,22 +26,22 @@
SourcePositionTable::SourcePositionTable(Graph* graph)
: graph_(graph),
- decorator_(NULL),
- current_position_(SourcePosition::Invalid()),
+ decorator_(nullptr),
+ current_position_(SourcePosition::Unknown()),
table_(graph->zone()) {}
void SourcePositionTable::AddDecorator() {
- DCHECK(decorator_ == NULL);
+ DCHECK_NULL(decorator_);
decorator_ = new (graph_->zone()) Decorator(this);
graph_->AddDecorator(decorator_);
}
void SourcePositionTable::RemoveDecorator() {
- DCHECK(decorator_ != NULL);
+ DCHECK_NOT_NULL(decorator_);
graph_->RemoveDecorator(decorator_);
- decorator_ = NULL;
+ decorator_ = nullptr;
}
@@ -50,6 +49,24 @@
return table_.Get(node);
}
+
+void SourcePositionTable::Print(std::ostream& os) const {
+ os << "{";
+ bool needs_comma = false;
+ for (auto i : table_) {
+ SourcePosition pos = i.second;
+ if (pos.IsKnown()) {
+ if (needs_comma) {
+ os << ",";
+ }
+ os << "\"" << i.first << "\""
+ << ":" << pos.raw();
+ needs_comma = true;
+ }
+ }
+ os << "}";
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/source-position.h b/src/compiler/source-position.h
index 390a17d..81db1d2 100644
--- a/src/compiler/source-position.h
+++ b/src/compiler/source-position.h
@@ -14,22 +14,18 @@
// Encapsulates encoding and decoding of sources positions from which Nodes
// originated.
-class SourcePosition FINAL {
+class SourcePosition final {
public:
explicit SourcePosition(int raw = kUnknownPosition) : raw_(raw) {}
static SourcePosition Unknown() { return SourcePosition(kUnknownPosition); }
bool IsUnknown() const { return raw() == kUnknownPosition; }
-
- static SourcePosition Invalid() { return SourcePosition(kInvalidPosition); }
- bool IsInvalid() const { return raw() == kInvalidPosition; }
+ bool IsKnown() const { return raw() != kUnknownPosition; }
int raw() const { return raw_; }
private:
- static const int kInvalidPosition = -2;
static const int kUnknownPosition = RelocInfo::kNoPosition;
- STATIC_ASSERT(kInvalidPosition != kUnknownPosition);
int raw_;
};
@@ -43,9 +39,9 @@
}
-class SourcePositionTable FINAL {
+class SourcePositionTable final {
public:
- class Scope {
+ class Scope final {
public:
Scope(SourcePositionTable* source_positions, SourcePosition position)
: source_positions_(source_positions),
@@ -61,19 +57,17 @@
private:
void Init(SourcePosition position) {
- if (!position.IsUnknown() || prev_position_.IsInvalid()) {
- source_positions_->current_position_ = position;
- }
+ if (position.IsKnown()) source_positions_->current_position_ = position;
}
- SourcePositionTable* source_positions_;
- SourcePosition prev_position_;
+ SourcePositionTable* const source_positions_;
+ SourcePosition const prev_position_;
DISALLOW_COPY_AND_ASSIGN(Scope);
};
explicit SourcePositionTable(Graph* graph);
~SourcePositionTable() {
- if (decorator_ != NULL) RemoveDecorator();
+ if (decorator_) RemoveDecorator();
}
void AddDecorator();
@@ -81,10 +75,12 @@
SourcePosition GetSourcePosition(Node* node) const;
+ void Print(std::ostream& os) const;
+
private:
class Decorator;
- Graph* graph_;
+ Graph* const graph_;
Decorator* decorator_;
SourcePosition current_position_;
NodeAuxData<SourcePosition> table_;
@@ -96,4 +92,4 @@
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_SOURCE_POSITION_H_
diff --git a/src/compiler/state-values-utils.cc b/src/compiler/state-values-utils.cc
new file mode 100644
index 0000000..77cc227
--- /dev/null
+++ b/src/compiler/state-values-utils.cc
@@ -0,0 +1,317 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/state-values-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+StateValuesCache::StateValuesCache(JSGraph* js_graph)
+ : js_graph_(js_graph),
+ hash_map_(AreKeysEqual, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone())),
+ working_space_(zone()),
+ empty_state_values_(nullptr) {}
+
+
+// static
+bool StateValuesCache::AreKeysEqual(void* key1, void* key2) {
+ NodeKey* node_key1 = reinterpret_cast<NodeKey*>(key1);
+ NodeKey* node_key2 = reinterpret_cast<NodeKey*>(key2);
+
+ if (node_key1->node == nullptr) {
+ if (node_key2->node == nullptr) {
+ return AreValueKeysEqual(reinterpret_cast<StateValuesKey*>(key1),
+ reinterpret_cast<StateValuesKey*>(key2));
+ } else {
+ return IsKeysEqualToNode(reinterpret_cast<StateValuesKey*>(key1),
+ node_key2->node);
+ }
+ } else {
+ if (node_key2->node == nullptr) {
+ // If the nodes are already processed, they must be the same.
+ return IsKeysEqualToNode(reinterpret_cast<StateValuesKey*>(key2),
+ node_key1->node);
+ } else {
+ return node_key1->node == node_key2->node;
+ }
+ }
+ UNREACHABLE();
+}
+
+
+// static
+bool StateValuesCache::IsKeysEqualToNode(StateValuesKey* key, Node* node) {
+ if (key->count != static_cast<size_t>(node->InputCount())) {
+ return false;
+ }
+ for (size_t i = 0; i < key->count; i++) {
+ if (key->values[i] != node->InputAt(static_cast<int>(i))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+// static
+bool StateValuesCache::AreValueKeysEqual(StateValuesKey* key1,
+ StateValuesKey* key2) {
+ if (key1->count != key2->count) {
+ return false;
+ }
+ for (size_t i = 0; i < key1->count; i++) {
+ if (key1->values[i] != key2->values[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+Node* StateValuesCache::GetEmptyStateValues() {
+ if (empty_state_values_ == nullptr) {
+ empty_state_values_ = graph()->NewNode(common()->StateValues(0));
+ }
+ return empty_state_values_;
+}
+
+
+NodeVector* StateValuesCache::GetWorkingSpace(size_t level) {
+ while (working_space_.size() <= level) {
+ void* space = zone()->New(sizeof(NodeVector));
+ working_space_.push_back(new (space)
+ NodeVector(kMaxInputCount, nullptr, zone()));
+ }
+ return working_space_[level];
+}
+
+namespace {
+
+int StateValuesHashKey(Node** nodes, size_t count) {
+ size_t hash = count;
+ for (size_t i = 0; i < count; i++) {
+ hash = hash * 23 + nodes[i]->id();
+ }
+ return static_cast<int>(hash & 0x7fffffff);
+}
+
+} // namespace
+
+
+Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count) {
+ StateValuesKey key(count, nodes);
+ int hash = StateValuesHashKey(nodes, count);
+ ZoneHashMap::Entry* lookup =
+ hash_map_.LookupOrInsert(&key, hash, ZoneAllocationPolicy(zone()));
+ DCHECK_NOT_NULL(lookup);
+ Node* node;
+ if (lookup->value == nullptr) {
+ int input_count = static_cast<int>(count);
+ node = graph()->NewNode(common()->StateValues(input_count), input_count,
+ nodes);
+ NodeKey* new_key = new (zone()->New(sizeof(NodeKey))) NodeKey(node);
+ lookup->key = new_key;
+ lookup->value = node;
+ } else {
+ node = reinterpret_cast<Node*>(lookup->value);
+ }
+ return node;
+}
+
+
+class StateValuesCache::ValueArrayIterator {
+ public:
+ ValueArrayIterator(Node** values, size_t count)
+ : values_(values), count_(count), current_(0) {}
+
+ void Advance() {
+ if (!done()) {
+ current_++;
+ }
+ }
+
+ bool done() { return current_ >= count_; }
+
+ Node* node() {
+ DCHECK(!done());
+ return values_[current_];
+ }
+
+ private:
+ Node** values_;
+ size_t count_;
+ size_t current_;
+};
+
+
+Node* StateValuesCache::BuildTree(ValueArrayIterator* it, size_t max_height) {
+ if (max_height == 0) {
+ Node* node = it->node();
+ it->Advance();
+ return node;
+ }
+ DCHECK(!it->done());
+
+ NodeVector* buffer = GetWorkingSpace(max_height);
+ size_t count = 0;
+ for (; count < kMaxInputCount; count++) {
+ if (it->done()) break;
+ (*buffer)[count] = BuildTree(it, max_height - 1);
+ }
+ if (count == 1) {
+ return (*buffer)[0];
+ } else {
+ return GetValuesNodeFromCache(&(buffer->front()), count);
+ }
+}
+
+
+Node* StateValuesCache::GetNodeForValues(Node** values, size_t count) {
+#if DEBUG
+ for (size_t i = 0; i < count; i++) {
+ DCHECK_NE(values[i]->opcode(), IrOpcode::kStateValues);
+ DCHECK_NE(values[i]->opcode(), IrOpcode::kTypedStateValues);
+ }
+#endif
+ if (count == 0) {
+ return GetEmptyStateValues();
+ }
+ size_t height = 0;
+ size_t max_nodes = 1;
+ while (count > max_nodes) {
+ height++;
+ max_nodes *= kMaxInputCount;
+ }
+
+ ValueArrayIterator it(values, count);
+
+ Node* tree = BuildTree(&it, height);
+
+ // If the 'tree' is a single node, equip it with a StateValues wrapper.
+ if (tree->opcode() != IrOpcode::kStateValues &&
+ tree->opcode() != IrOpcode::kTypedStateValues) {
+ tree = GetValuesNodeFromCache(&tree, 1);
+ }
+
+ return tree;
+}
+
+
+StateValuesAccess::iterator::iterator(Node* node) : current_depth_(0) {
+ // A hacky way initialize - just set the index before the node we want
+ // to process and then advance to it.
+ stack_[current_depth_].node = node;
+ stack_[current_depth_].index = -1;
+ Advance();
+}
+
+
+StateValuesAccess::iterator::StatePos* StateValuesAccess::iterator::Top() {
+ DCHECK(current_depth_ >= 0);
+ DCHECK(current_depth_ < kMaxInlineDepth);
+ return &(stack_[current_depth_]);
+}
+
+
+void StateValuesAccess::iterator::Push(Node* node) {
+ current_depth_++;
+ CHECK(current_depth_ < kMaxInlineDepth);
+ stack_[current_depth_].node = node;
+ stack_[current_depth_].index = 0;
+}
+
+
+void StateValuesAccess::iterator::Pop() {
+ DCHECK(current_depth_ >= 0);
+ current_depth_--;
+}
+
+
+bool StateValuesAccess::iterator::done() { return current_depth_ < 0; }
+
+
+void StateValuesAccess::iterator::Advance() {
+ // Advance the current index.
+ Top()->index++;
+
+ // Fix up the position to point to a valid node.
+ while (true) {
+ // TODO(jarin): Factor to a separate method.
+ Node* node = Top()->node;
+ int index = Top()->index;
+
+ if (index >= node->InputCount()) {
+ // Pop stack and move to the next sibling.
+ Pop();
+ if (done()) {
+ // Stack is exhausted, we have reached the end.
+ return;
+ }
+ Top()->index++;
+ } else if (node->InputAt(index)->opcode() == IrOpcode::kStateValues ||
+ node->InputAt(index)->opcode() == IrOpcode::kTypedStateValues) {
+ // Nested state, we need to push to the stack.
+ Push(node->InputAt(index));
+ } else {
+ // We are on a valid node, we can stop the iteration.
+ return;
+ }
+ }
+}
+
+
+Node* StateValuesAccess::iterator::node() {
+ return Top()->node->InputAt(Top()->index);
+}
+
+
+MachineType StateValuesAccess::iterator::type() {
+ Node* state = Top()->node;
+ if (state->opcode() == IrOpcode::kStateValues) {
+ return MachineType::AnyTagged();
+ } else {
+ DCHECK_EQ(IrOpcode::kTypedStateValues, state->opcode());
+ const ZoneVector<MachineType>* types =
+ OpParameter<const ZoneVector<MachineType>*>(state);
+ return (*types)[Top()->index];
+ }
+}
+
+
+bool StateValuesAccess::iterator::operator!=(iterator& other) {
+ // We only allow comparison with end().
+ CHECK(other.done());
+ return !done();
+}
+
+
+StateValuesAccess::iterator& StateValuesAccess::iterator::operator++() {
+ Advance();
+ return *this;
+}
+
+
+StateValuesAccess::TypedNode StateValuesAccess::iterator::operator*() {
+ return TypedNode(node(), type());
+}
+
+
+size_t StateValuesAccess::size() {
+ size_t count = 0;
+ for (int i = 0; i < node_->InputCount(); i++) {
+ if (node_->InputAt(i)->opcode() == IrOpcode::kStateValues ||
+ node_->InputAt(i)->opcode() == IrOpcode::kTypedStateValues) {
+ count += StateValuesAccess(node_->InputAt(i)).size();
+ } else {
+ count++;
+ }
+ }
+ return count;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/state-values-utils.h b/src/compiler/state-values-utils.h
new file mode 100644
index 0000000..79550bd
--- /dev/null
+++ b/src/compiler/state-values-utils.h
@@ -0,0 +1,120 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_STATE_VALUES_UTILS_H_
+#define V8_COMPILER_STATE_VALUES_UTILS_H_
+
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+
+class Graph;
+
+class StateValuesCache {
+ public:
+ explicit StateValuesCache(JSGraph* js_graph);
+
+ Node* GetNodeForValues(Node** values, size_t count);
+
+ private:
+ static const size_t kMaxInputCount = 8;
+
+ struct NodeKey {
+ Node* node;
+
+ explicit NodeKey(Node* node) : node(node) {}
+ };
+
+ struct StateValuesKey : public NodeKey {
+ // ValueArray - array of nodes ({node} has to be nullptr).
+ size_t count;
+ Node** values;
+
+ StateValuesKey(size_t count, Node** values)
+ : NodeKey(nullptr), count(count), values(values) {}
+ };
+
+ class ValueArrayIterator;
+
+ static bool AreKeysEqual(void* key1, void* key2);
+ static bool IsKeysEqualToNode(StateValuesKey* key, Node* node);
+ static bool AreValueKeysEqual(StateValuesKey* key1, StateValuesKey* key2);
+
+ Node* BuildTree(ValueArrayIterator* it, size_t max_height);
+ NodeVector* GetWorkingSpace(size_t level);
+ Node* GetEmptyStateValues();
+ Node* GetValuesNodeFromCache(Node** nodes, size_t count);
+
+ Graph* graph() { return js_graph_->graph(); }
+ CommonOperatorBuilder* common() { return js_graph_->common(); }
+
+ Zone* zone() { return graph()->zone(); }
+
+ JSGraph* js_graph_;
+ ZoneHashMap hash_map_;
+ ZoneVector<NodeVector*> working_space_; // One working space per level.
+ Node* empty_state_values_;
+};
+
+class StateValuesAccess {
+ public:
+ struct TypedNode {
+ Node* node;
+ MachineType type;
+ TypedNode(Node* node, MachineType type) : node(node), type(type) {}
+ };
+
+ class iterator {
+ public:
+ // Bare minimum of operators needed for range iteration.
+ bool operator!=(iterator& other);
+ iterator& operator++();
+ TypedNode operator*();
+
+ private:
+ friend class StateValuesAccess;
+
+ iterator() : current_depth_(-1) {}
+ explicit iterator(Node* node);
+
+ Node* node();
+ MachineType type();
+ bool done();
+ void Advance();
+
+ struct StatePos {
+ Node* node;
+ int index;
+
+ explicit StatePos(Node* node) : node(node), index(0) {}
+ StatePos() {}
+ };
+
+ StatePos* Top();
+ void Push(Node* node);
+ void Pop();
+
+ static const int kMaxInlineDepth = 8;
+ StatePos stack_[kMaxInlineDepth];
+ int current_depth_;
+ };
+
+ explicit StateValuesAccess(Node* node) : node_(node) {}
+
+ size_t size();
+ iterator begin() { return iterator(node_); }
+ iterator end() { return iterator(); }
+
+ private:
+ Node* node_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_STATE_VALUES_UTILS_H_
diff --git a/src/compiler/tail-call-optimization.cc b/src/compiler/tail-call-optimization.cc
new file mode 100644
index 0000000..6635fb9
--- /dev/null
+++ b/src/compiler/tail-call-optimization.cc
@@ -0,0 +1,83 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/tail-call-optimization.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction TailCallOptimization::Reduce(Node* node) {
+ if (node->opcode() != IrOpcode::kReturn) return NoChange();
+ // The value which is returned must be the result of a potential tail call,
+ // there must be no try/catch/finally around the Call, and there must be no
+ // other effect between the Call and the Return nodes.
+ Node* const call = NodeProperties::GetValueInput(node, 0);
+ if (call->opcode() == IrOpcode::kCall &&
+ OpParameter<CallDescriptor const*>(call)->SupportsTailCalls() &&
+ NodeProperties::GetEffectInput(node) == call &&
+ !NodeProperties::IsExceptionalCall(call)) {
+ Node* const control = NodeProperties::GetControlInput(node);
+ if (control->opcode() == IrOpcode::kIfSuccess &&
+ call->OwnedBy(node, control) && control->OwnedBy(node)) {
+ // Furthermore, control has to flow via an IfSuccess from the Call, so
+ // the Return node value and effect depends directly on the Call node,
+ // and indirectly control depends on the Call via an IfSuccess.
+
+ // Value1 ... ValueN Effect Control
+ // ^ ^ ^ ^
+ // | | | |
+ // | +--+ +-+ |
+ // +----------+ | | +------+
+ // \ | | /
+ // Call[Descriptor]
+ // ^ ^ ^
+ // | | |
+ // +-+ | |
+ // | | |
+ // | +-+ |
+ // | | IfSuccess
+ // | | ^
+ // | | |
+ // Return
+ // ^
+ // |
+
+ // The resulting graph looks like this:
+
+ // Value1 ... ValueN Effect Control
+ // ^ ^ ^ ^
+ // | | | |
+ // | +--+ +-+ |
+ // +----------+ | | +------+
+ // \ | | /
+ // TailCall[Descriptor]
+ // ^
+ // |
+
+ DCHECK_EQ(call, NodeProperties::GetControlInput(control, 0));
+ DCHECK_EQ(3, node->InputCount());
+ node->ReplaceInput(0, NodeProperties::GetEffectInput(call));
+ node->ReplaceInput(1, NodeProperties::GetControlInput(call));
+ node->RemoveInput(2);
+ for (int index = 0; index < call->op()->ValueInputCount(); ++index) {
+ node->InsertInput(graph()->zone(), index,
+ NodeProperties::GetValueInput(call, index));
+ }
+ NodeProperties::ChangeOp(
+ node, common()->TailCall(OpParameter<CallDescriptor const*>(call)));
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/tail-call-optimization.h b/src/compiler/tail-call-optimization.h
new file mode 100644
index 0000000..b5d4f96
--- /dev/null
+++ b/src/compiler/tail-call-optimization.h
@@ -0,0 +1,40 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
+#define V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+
+
+// Performs tail call optimization by replacing certain combinations of Return
+// and Call nodes with a single TailCall.
+class TailCallOptimization final : public Reducer {
+ public:
+ TailCallOptimization(CommonOperatorBuilder* common, Graph* graph)
+ : common_(common), graph_(graph) {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ CommonOperatorBuilder* common() const { return common_; }
+ Graph* graph() const { return graph_; }
+
+ CommonOperatorBuilder* const common_;
+ Graph* const graph_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
diff --git a/src/compiler/type-hint-analyzer.cc b/src/compiler/type-hint-analyzer.cc
new file mode 100644
index 0000000..42c4627
--- /dev/null
+++ b/src/compiler/type-hint-analyzer.cc
@@ -0,0 +1,98 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/type-hint-analyzer.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/type-hints.h"
+#include "src/ic/ic-state.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// TODO(bmeurer): This detour via types is ugly.
+BinaryOperationHints::Hint ToHint(Type* type) {
+ if (type->Is(Type::None())) return BinaryOperationHints::kNone;
+ if (type->Is(Type::SignedSmall())) return BinaryOperationHints::kSignedSmall;
+ if (type->Is(Type::Signed32())) return BinaryOperationHints::kSigned32;
+ if (type->Is(Type::Number())) return BinaryOperationHints::kNumber;
+ if (type->Is(Type::String())) return BinaryOperationHints::kString;
+ return BinaryOperationHints::kAny;
+}
+
+} // namespace
+
+
+bool TypeHintAnalysis::GetBinaryOperationHints(
+ TypeFeedbackId id, BinaryOperationHints* hints) const {
+ auto i = infos_.find(id);
+ if (i == infos_.end()) return false;
+ Handle<Code> code = i->second;
+ DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
+ BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
+ *hints = BinaryOperationHints(ToHint(state.GetLeftType()),
+ ToHint(state.GetRightType()),
+ ToHint(state.GetResultType()));
+ return true;
+}
+
+
+bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
+ ToBooleanHints* hints) const {
+ auto i = infos_.find(id);
+ if (i == infos_.end()) return false;
+ Handle<Code> code = i->second;
+ DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
+ ToBooleanStub stub(code->GetIsolate(), code->extra_ic_state());
+// TODO(bmeurer): Replace ToBooleanStub::Types with ToBooleanHints.
+#define ASSERT_COMPATIBLE(NAME, Name) \
+ STATIC_ASSERT(1 << ToBooleanStub::NAME == \
+ static_cast<int>(ToBooleanHint::k##Name))
+ ASSERT_COMPATIBLE(UNDEFINED, Undefined);
+ ASSERT_COMPATIBLE(BOOLEAN, Boolean);
+ ASSERT_COMPATIBLE(NULL_TYPE, Null);
+ ASSERT_COMPATIBLE(SMI, SmallInteger);
+ ASSERT_COMPATIBLE(SPEC_OBJECT, Receiver);
+ ASSERT_COMPATIBLE(STRING, String);
+ ASSERT_COMPATIBLE(SYMBOL, Symbol);
+ ASSERT_COMPATIBLE(HEAP_NUMBER, HeapNumber);
+ ASSERT_COMPATIBLE(SIMD_VALUE, SimdValue);
+#undef ASSERT_COMPATIBLE
+ *hints = ToBooleanHints(stub.types().ToIntegral());
+ return true;
+}
+
+
+TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
+ DisallowHeapAllocation no_gc;
+ TypeHintAnalysis::Infos infos(zone());
+ Isolate* const isolate = code->GetIsolate();
+ int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ switch (target->kind()) {
+ case Code::BINARY_OP_IC:
+ case Code::TO_BOOLEAN_IC: {
+ // Add this feedback to the {infos}.
+ TypeFeedbackId id(static_cast<unsigned>(rinfo->data()));
+ infos.insert(std::make_pair(id, handle(target, isolate)));
+ break;
+ }
+ default:
+ // Ignore the remaining code objects.
+ break;
+ }
+ }
+ return new (zone()) TypeHintAnalysis(infos);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/type-hint-analyzer.h b/src/compiler/type-hint-analyzer.h
new file mode 100644
index 0000000..1a79905
--- /dev/null
+++ b/src/compiler/type-hint-analyzer.h
@@ -0,0 +1,51 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPE_HINT_ANALYZER_H_
+#define V8_COMPILER_TYPE_HINT_ANALYZER_H_
+
+#include "src/compiler/type-hints.h"
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The result of analyzing type hints.
+class TypeHintAnalysis final : public ZoneObject {
+ public:
+ typedef ZoneMap<TypeFeedbackId, Handle<Code>> Infos;
+
+ explicit TypeHintAnalysis(Infos const& infos) : infos_(infos) {}
+
+ bool GetBinaryOperationHints(TypeFeedbackId id,
+ BinaryOperationHints* hints) const;
+ bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
+
+ private:
+ Infos const infos_;
+};
+
+
+// The class that performs type hint analysis on the fullcodegen code object.
+class TypeHintAnalyzer final {
+ public:
+ explicit TypeHintAnalyzer(Zone* zone) : zone_(zone) {}
+
+ TypeHintAnalysis* Analyze(Handle<Code> code);
+
+ private:
+ Zone* zone() const { return zone_; }
+
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TYPE_HINT_ANALYZER_H_
diff --git a/src/compiler/type-hints.cc b/src/compiler/type-hints.cc
new file mode 100644
index 0000000..06abad6
--- /dev/null
+++ b/src/compiler/type-hints.cc
@@ -0,0 +1,83 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/type-hints.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+std::ostream& operator<<(std::ostream& os, BinaryOperationHints::Hint hint) {
+ switch (hint) {
+ case BinaryOperationHints::kNone:
+ return os << "None";
+ case BinaryOperationHints::kSignedSmall:
+ return os << "SignedSmall";
+ case BinaryOperationHints::kSigned32:
+ return os << "Signed32";
+ case BinaryOperationHints::kNumber:
+ return os << "Number";
+ case BinaryOperationHints::kString:
+ return os << "String";
+ case BinaryOperationHints::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, BinaryOperationHints hints) {
+ return os << hints.left() << "*" << hints.right() << "->" << hints.result();
+}
+
+
+std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
+ switch (hint) {
+ case ToBooleanHint::kNone:
+ return os << "None";
+ case ToBooleanHint::kUndefined:
+ return os << "Undefined";
+ case ToBooleanHint::kBoolean:
+ return os << "Boolean";
+ case ToBooleanHint::kNull:
+ return os << "Null";
+ case ToBooleanHint::kSmallInteger:
+ return os << "SmallInteger";
+ case ToBooleanHint::kReceiver:
+ return os << "Receiver";
+ case ToBooleanHint::kString:
+ return os << "String";
+ case ToBooleanHint::kSymbol:
+ return os << "Symbol";
+ case ToBooleanHint::kHeapNumber:
+ return os << "HeapNumber";
+ case ToBooleanHint::kSimdValue:
+ return os << "SimdValue";
+ case ToBooleanHint::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
+ if (hints == ToBooleanHint::kAny) return os << "Any";
+ if (hints == ToBooleanHint::kNone) return os << "None";
+ bool first = true;
+ for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * CHAR_BIT; ++i) {
+ ToBooleanHint const hint = static_cast<ToBooleanHint>(1u << i);
+ if (hints & hint) {
+ if (!first) os << "|";
+ first = false;
+ os << hint;
+ }
+ }
+ return os;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/type-hints.h b/src/compiler/type-hints.h
new file mode 100644
index 0000000..f1cc640
--- /dev/null
+++ b/src/compiler/type-hints.h
@@ -0,0 +1,84 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPE_HINTS_H_
+#define V8_COMPILER_TYPE_HINTS_H_
+
+#include "src/base/flags.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Type hints for an binary operation.
+class BinaryOperationHints final {
+ public:
+ enum Hint { kNone, kSignedSmall, kSigned32, kNumber, kString, kAny };
+
+ BinaryOperationHints() : BinaryOperationHints(kNone, kNone, kNone) {}
+ BinaryOperationHints(Hint left, Hint right, Hint result)
+ : bit_field_(LeftField::encode(left) | RightField::encode(right) |
+ ResultField::encode(result)) {}
+
+ static BinaryOperationHints Any() {
+ return BinaryOperationHints(kAny, kAny, kAny);
+ }
+
+ Hint left() const { return LeftField::decode(bit_field_); }
+ Hint right() const { return RightField::decode(bit_field_); }
+ Hint result() const { return ResultField::decode(bit_field_); }
+
+ bool operator==(BinaryOperationHints const& that) const {
+ return this->bit_field_ == that.bit_field_;
+ }
+ bool operator!=(BinaryOperationHints const& that) const {
+ return !(*this == that);
+ }
+
+ friend size_t hash_value(BinaryOperationHints const& hints) {
+ return hints.bit_field_;
+ }
+
+ private:
+ typedef BitField<Hint, 0, 3> LeftField;
+ typedef BitField<Hint, 3, 3> RightField;
+ typedef BitField<Hint, 6, 3> ResultField;
+
+ uint32_t bit_field_;
+};
+
+std::ostream& operator<<(std::ostream&, BinaryOperationHints::Hint);
+std::ostream& operator<<(std::ostream&, BinaryOperationHints);
+
+
+// Type hints for the ToBoolean type conversion.
+enum class ToBooleanHint : uint16_t {
+ kNone = 0u,
+ kUndefined = 1u << 0,
+ kBoolean = 1u << 1,
+ kNull = 1u << 2,
+ kSmallInteger = 1u << 3,
+ kReceiver = 1u << 4,
+ kString = 1u << 5,
+ kSymbol = 1u << 6,
+ kHeapNumber = 1u << 7,
+ kSimdValue = 1u << 8,
+ kAny = kUndefined | kBoolean | kNull | kSmallInteger | kReceiver | kString |
+ kSymbol | kHeapNumber | kSimdValue
+};
+
+std::ostream& operator<<(std::ostream&, ToBooleanHint);
+
+typedef base::Flags<ToBooleanHint, uint16_t> ToBooleanHints;
+
+std::ostream& operator<<(std::ostream&, ToBooleanHints);
+
+DEFINE_OPERATORS_FOR_FLAGS(ToBooleanHints)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TYPE_HINTS_H_
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 137829e..c1f816d 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -2,220 +2,71 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/typer.h"
+
+#include "src/base/flags.h"
#include "src/bootstrapper.h"
-#include "src/compiler/graph-inl.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/compiler/typer.h"
+#include "src/objects-inl.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
namespace compiler {
-#define NATIVE_TYPES(V) \
- V(Int8) \
- V(Uint8) \
- V(Int16) \
- V(Uint16) \
- V(Int32) \
- V(Uint32) \
- V(Float32) \
- V(Float64)
-
-enum LazyCachedType {
- kNumberFunc0,
- kNumberFunc1,
- kNumberFunc2,
- kImulFunc,
- kClz32Func,
- kArrayBufferFunc,
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- k##Type, k##Type##Array, k##Type##ArrayFunc,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- kNumLazyCachedTypes
-};
-
-
-// Constructs and caches types lazily.
-// TODO(turbofan): these types could be globally cached or cached per isolate.
-class LazyTypeCache FINAL : public ZoneObject {
- public:
- explicit LazyTypeCache(Zone* zone) : zone_(zone) {
- memset(cache_, 0, sizeof(cache_));
- }
-
- inline Type* Get(LazyCachedType type) {
- int index = static_cast<int>(type);
- DCHECK(index < kNumLazyCachedTypes);
- if (cache_[index] == NULL) cache_[index] = Create(type);
- return cache_[index];
- }
-
- private:
- Type* Create(LazyCachedType type) {
- switch (type) {
- case kInt8:
- return CreateNative(CreateRange<int8_t>(), Type::UntaggedSigned8());
- case kUint8:
- return CreateNative(CreateRange<uint8_t>(), Type::UntaggedUnsigned8());
- case kInt16:
- return CreateNative(CreateRange<int16_t>(), Type::UntaggedSigned16());
- case kUint16:
- return CreateNative(CreateRange<uint16_t>(),
- Type::UntaggedUnsigned16());
- case kInt32:
- return CreateNative(Type::Signed32(), Type::UntaggedSigned32());
- case kUint32:
- return CreateNative(Type::Unsigned32(), Type::UntaggedUnsigned32());
- case kFloat32:
- return CreateNative(Type::Number(), Type::UntaggedFloat32());
- case kFloat64:
- return CreateNative(Type::Number(), Type::UntaggedFloat64());
- case kUint8Clamped:
- return Get(kUint8);
- case kNumberFunc0:
- return Type::Function(Type::Number(), zone());
- case kNumberFunc1:
- return Type::Function(Type::Number(), Type::Number(), zone());
- case kNumberFunc2:
- return Type::Function(Type::Number(), Type::Number(), Type::Number(),
- zone());
- case kImulFunc:
- return Type::Function(Type::Signed32(), Type::Integral32(),
- Type::Integral32(), zone());
- case kClz32Func:
- return Type::Function(CreateRange(0, 32), Type::Number(), zone());
- case kArrayBufferFunc:
- return Type::Function(Type::Object(zone()), Type::Unsigned32(), zone());
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case k##Type##Array: \
- return CreateArray(Get(k##Type)); \
- case k##Type##ArrayFunc: \
- return CreateArrayFunction(Get(k##Type##Array));
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- case kNumLazyCachedTypes:
- break;
- }
- UNREACHABLE();
- return NULL;
- }
-
- Type* CreateArray(Type* element) const {
- return Type::Array(element, zone());
- }
-
- Type* CreateArrayFunction(Type* array) const {
- Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
- Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
- Type* arg3 = arg2;
- return Type::Function(array, arg1, arg2, arg3, zone());
- }
-
- Type* CreateNative(Type* semantic, Type* representation) const {
- return Type::Intersect(semantic, representation, zone());
- }
-
- template <typename T>
- Type* CreateRange() const {
- return CreateRange(std::numeric_limits<T>::min(),
- std::numeric_limits<T>::max());
- }
-
- Type* CreateRange(double min, double max) const {
- return Type::Range(factory()->NewNumber(min), factory()->NewNumber(max),
- zone());
- }
-
- Factory* factory() const { return isolate()->factory(); }
- Isolate* isolate() const { return zone()->isolate(); }
- Zone* zone() const { return zone_; }
-
- Type* cache_[kNumLazyCachedTypes];
- Zone* zone_;
-};
-
-
-class Typer::Decorator FINAL : public GraphDecorator {
+class Typer::Decorator final : public GraphDecorator {
public:
explicit Decorator(Typer* typer) : typer_(typer) {}
- void Decorate(Node* node) FINAL;
+ void Decorate(Node* node) final;
private:
- Typer* typer_;
+ Typer* const typer_;
};
-Typer::Typer(Graph* graph, MaybeHandle<Context> context)
- : graph_(graph),
- context_(context),
- decorator_(NULL),
- cache_(new (graph->zone()) LazyTypeCache(graph->zone())),
- weaken_min_limits_(graph->zone()),
- weaken_max_limits_(graph->zone()) {
+Typer::Typer(Isolate* isolate, Graph* graph, Flags flags,
+ CompilationDependencies* dependencies,
+ Type::FunctionType* function_type)
+ : isolate_(isolate),
+ graph_(graph),
+ flags_(flags),
+ dependencies_(dependencies),
+ function_type_(function_type),
+ decorator_(nullptr),
+ cache_(TypeCache::Get()) {
Zone* zone = this->zone();
- Factory* f = zone->isolate()->factory();
+ Factory* const factory = isolate->factory();
- Handle<Object> zero = f->NewNumber(0);
- Handle<Object> one = f->NewNumber(1);
- Handle<Object> infinity = f->NewNumber(+V8_INFINITY);
- Handle<Object> minusinfinity = f->NewNumber(-V8_INFINITY);
-
- Type* number = Type::Number();
- Type* signed32 = Type::Signed32();
- Type* unsigned32 = Type::Unsigned32();
- Type* nan_or_minuszero = Type::Union(Type::NaN(), Type::MinusZero(), zone);
+ Type* infinity = Type::Constant(factory->infinity_value(), zone);
+ Type* minus_infinity = Type::Constant(factory->minus_infinity_value(), zone);
+ // TODO(neis): Unfortunately, the infinities created in other places might
+ // be different ones (eg the result of NewNumber in TypeNumberConstant).
Type* truncating_to_zero =
- Type::Union(Type::Union(Type::Constant(infinity, zone),
- Type::Constant(minusinfinity, zone), zone),
- nan_or_minuszero, zone);
+ Type::Union(Type::Union(infinity, minus_infinity, zone),
+ Type::MinusZeroOrNaN(), zone);
+ DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
- boolean_or_number = Type::Union(Type::Boolean(), Type::Number(), zone);
- undefined_or_null = Type::Union(Type::Undefined(), Type::Null(), zone);
- undefined_or_number = Type::Union(Type::Undefined(), Type::Number(), zone);
- singleton_false = Type::Constant(f->false_value(), zone);
- singleton_true = Type::Constant(f->true_value(), zone);
- singleton_zero = Type::Range(zero, zero, zone);
- singleton_one = Type::Range(one, one, zone);
- zero_or_one = Type::Union(singleton_zero, singleton_one, zone);
- zeroish = Type::Union(singleton_zero, nan_or_minuszero, zone);
- signed32ish = Type::Union(signed32, truncating_to_zero, zone);
- unsigned32ish = Type::Union(unsigned32, truncating_to_zero, zone);
- falsish = Type::Union(Type::Undetectable(),
- Type::Union(Type::Union(singleton_false, zeroish, zone),
- undefined_or_null, zone),
- zone);
- truish = Type::Union(
- singleton_true,
+ singleton_false_ = Type::Constant(factory->false_value(), zone);
+ singleton_true_ = Type::Constant(factory->true_value(), zone);
+ singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
+ signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
+ unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
+ falsish_ = Type::Union(
+ Type::Undetectable(),
+ Type::Union(
+ Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
+ Type::NullOrUndefined(), zone),
+ singleton_the_hole_, zone),
+ zone);
+ truish_ = Type::Union(
+ singleton_true_,
Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone), zone);
- integer = Type::Range(minusinfinity, infinity, zone);
- weakint = Type::Union(integer, nan_or_minuszero, zone);
-
- number_fun0_ = Type::Function(number, zone);
- number_fun1_ = Type::Function(number, number, zone);
- number_fun2_ = Type::Function(number, number, number, zone);
-
- weakint_fun1_ = Type::Function(weakint, number, zone);
- random_fun_ = Type::Function(Type::OrderedNumber(), zone);
-
- const int limits_count = 20;
-
- weaken_min_limits_.reserve(limits_count + 1);
- weaken_max_limits_.reserve(limits_count + 1);
-
- double limit = 1 << 30;
- weaken_min_limits_.push_back(f->NewNumber(0));
- weaken_max_limits_.push_back(f->NewNumber(0));
- for (int i = 0; i < limits_count; i++) {
- weaken_min_limits_.push_back(f->NewNumber(-limit));
- weaken_max_limits_.push_back(f->NewNumber(limit - 1));
- limit *= 2;
- }
decorator_ = new (zone) Decorator(this);
graph_->AddDecorator(decorator_);
@@ -229,21 +80,23 @@
class Typer::Visitor : public Reducer {
public:
- explicit Visitor(Typer* typer) : typer_(typer) {}
+ explicit Visitor(Typer* typer)
+ : typer_(typer), weakened_nodes_(typer->zone()) {}
- Reduction Reduce(Node* node) OVERRIDE {
+ Reduction Reduce(Node* node) override {
if (node->op()->ValueOutputCount() == 0) return NoChange();
switch (node->opcode()) {
#define DECLARE_CASE(x) \
case IrOpcode::k##x: \
- return UpdateBounds(node, TypeBinaryOp(node, x##Typer));
+ return UpdateType(node, TypeBinaryOp(node, x##Typer));
JS_SIMPLE_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) \
case IrOpcode::k##x: \
- return UpdateBounds(node, Type##x(node));
+ return UpdateType(node, Type##x(node));
DECLARE_CASE(Start)
+ DECLARE_CASE(IfException)
// VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
COMMON_OP_LIST(DECLARE_CASE)
SIMPLIFIED_OP_LIST(DECLARE_CASE)
@@ -255,15 +108,30 @@
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
+ DECLARE_CASE(Loop)
+ DECLARE_CASE(Branch)
+ DECLARE_CASE(IfTrue)
+ DECLARE_CASE(IfFalse)
+ DECLARE_CASE(IfSuccess)
+ DECLARE_CASE(Switch)
+ DECLARE_CASE(IfValue)
+ DECLARE_CASE(IfDefault)
+ DECLARE_CASE(Merge)
+ DECLARE_CASE(Deoptimize)
+ DECLARE_CASE(Return)
+ DECLARE_CASE(TailCall)
+ DECLARE_CASE(Terminate)
+ DECLARE_CASE(OsrNormalEntry)
+ DECLARE_CASE(OsrLoopEntry)
+ DECLARE_CASE(Throw)
DECLARE_CASE(End)
- INNER_CONTROL_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
break;
}
return NoChange();
}
- Bounds TypeNode(Node* node) {
+ Type* TypeNode(Node* node) {
switch (node->opcode()) {
#define DECLARE_CASE(x) \
case IrOpcode::k##x: return TypeBinaryOp(node, x##Typer);
@@ -272,6 +140,7 @@
#define DECLARE_CASE(x) case IrOpcode::k##x: return Type##x(node);
DECLARE_CASE(Start)
+ DECLARE_CASE(IfException)
// VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
COMMON_OP_LIST(DECLARE_CASE)
SIMPLIFIED_OP_LIST(DECLARE_CASE)
@@ -283,64 +152,93 @@
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
+ DECLARE_CASE(Loop)
+ DECLARE_CASE(Branch)
+ DECLARE_CASE(IfTrue)
+ DECLARE_CASE(IfFalse)
+ DECLARE_CASE(IfSuccess)
+ DECLARE_CASE(Switch)
+ DECLARE_CASE(IfValue)
+ DECLARE_CASE(IfDefault)
+ DECLARE_CASE(Merge)
+ DECLARE_CASE(Deoptimize)
+ DECLARE_CASE(Return)
+ DECLARE_CASE(TailCall)
+ DECLARE_CASE(Terminate)
+ DECLARE_CASE(OsrNormalEntry)
+ DECLARE_CASE(OsrLoopEntry)
+ DECLARE_CASE(Throw)
DECLARE_CASE(End)
- INNER_CONTROL_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
break;
}
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
Type* TypeConstant(Handle<Object> value);
private:
Typer* typer_;
- MaybeHandle<Context> context_;
+ ZoneSet<NodeId> weakened_nodes_;
-#define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
+#define DECLARE_METHOD(x) inline Type* Type##x(Node* node);
DECLARE_METHOD(Start)
+ DECLARE_METHOD(IfException)
VALUE_OP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
- Bounds BoundsOrNone(Node* node) {
- return NodeProperties::IsTyped(node) ? NodeProperties::GetBounds(node)
- : Bounds(Type::None());
+ Type* TypeOrNone(Node* node) {
+ return NodeProperties::IsTyped(node) ? NodeProperties::GetType(node)
+ : Type::None();
}
- Bounds Operand(Node* node, int i) {
+ Type* Operand(Node* node, int i) {
Node* operand_node = NodeProperties::GetValueInput(node, i);
- return BoundsOrNone(operand_node);
+ return TypeOrNone(operand_node);
}
- Bounds ContextOperand(Node* node) {
- Bounds result = BoundsOrNone(NodeProperties::GetContextInput(node));
- DCHECK(result.upper->Maybe(Type::Internal()));
- // TODO(rossberg): More precisely, instead of the above assertion, we should
- // back-propagate the constraint that it has to be a subtype of Internal.
- return result;
- }
-
- Type* Weaken(Type* current_type, Type* previous_type);
+ Type* WrapContextTypeForInput(Node* node);
+ Type* Weaken(Node* node, Type* current_type, Type* previous_type);
Zone* zone() { return typer_->zone(); }
Isolate* isolate() { return typer_->isolate(); }
Graph* graph() { return typer_->graph(); }
- MaybeHandle<Context> context() { return typer_->context(); }
+ Typer::Flags flags() const { return typer_->flags(); }
+ CompilationDependencies* dependencies() const {
+ return typer_->dependencies();
+ }
+
+ void SetWeakened(NodeId node_id) { weakened_nodes_.insert(node_id); }
+ bool IsWeakened(NodeId node_id) {
+ return weakened_nodes_.find(node_id) != weakened_nodes_.end();
+ }
typedef Type* (*UnaryTyperFun)(Type*, Typer* t);
typedef Type* (*BinaryTyperFun)(Type*, Type*, Typer* t);
- Bounds TypeUnaryOp(Node* node, UnaryTyperFun);
- Bounds TypeBinaryOp(Node* node, BinaryTyperFun);
+ Type* TypeUnaryOp(Node* node, UnaryTyperFun);
+ Type* TypeBinaryOp(Node* node, BinaryTyperFun);
+ enum ComparisonOutcomeFlags {
+ kComparisonTrue = 1,
+ kComparisonFalse = 2,
+ kComparisonUndefined = 4
+ };
+ typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
+
+ static ComparisonOutcome Invert(ComparisonOutcome, Typer*);
static Type* Invert(Type*, Typer*);
- static Type* FalsifyUndefined(Type*, Typer*);
+ static Type* FalsifyUndefined(ComparisonOutcome, Typer*);
static Type* Rangify(Type*, Typer*);
static Type* ToPrimitive(Type*, Typer*);
static Type* ToBoolean(Type*, Typer*);
+ static Type* ToInteger(Type*, Typer*);
+ static Type* ToLength(Type*, Typer*);
+ static Type* ToName(Type*, Typer*);
static Type* ToNumber(Type*, Typer*);
+ static Type* ToObject(Type*, Typer*);
static Type* ToString(Type*, Typer*);
static Type* NumberToInt32(Type*, Typer*);
static Type* NumberToUint32(Type*, Typer*);
@@ -351,68 +249,52 @@
static Type* JSDivideRanger(Type::RangeType*, Type::RangeType*, Typer*);
static Type* JSModulusRanger(Type::RangeType*, Type::RangeType*, Typer*);
- static Type* JSCompareTyper(Type*, Type*, Typer*);
+ static ComparisonOutcome JSCompareTyper(Type*, Type*, Typer*);
#define DECLARE_METHOD(x) static Type* x##Typer(Type*, Type*, Typer*);
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
- static Type* JSUnaryNotTyper(Type*, Typer*);
+ static Type* JSTypeOfTyper(Type*, Typer*);
static Type* JSLoadPropertyTyper(Type*, Type*, Typer*);
static Type* JSCallFunctionTyper(Type*, Typer*);
- Reduction UpdateBounds(Node* node, Bounds current) {
+ static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
+
+ Reduction UpdateType(Node* node, Type* current) {
if (NodeProperties::IsTyped(node)) {
- // Widen the bounds of a previously typed node.
- Bounds previous = NodeProperties::GetBounds(node);
- // Speed up termination in the presence of range types:
- current.upper = Weaken(current.upper, previous.upper);
- current.lower = Weaken(current.lower, previous.lower);
+ // Widen the type of a previously typed node.
+ Type* previous = NodeProperties::GetType(node);
+ if (node->opcode() == IrOpcode::kPhi) {
+ // Speed up termination in the presence of range types:
+ current = Weaken(node, current, previous);
+ }
- // Types should not get less precise.
- DCHECK(previous.lower->Is(current.lower));
- DCHECK(previous.upper->Is(current.upper));
+ CHECK(previous->Is(current));
- NodeProperties::SetBounds(node, current);
- if (!(previous.Narrows(current) && current.Narrows(previous))) {
+ NodeProperties::SetType(node, current);
+ if (!current->Is(previous)) {
// If something changed, revisit all uses.
return Changed(node);
}
return NoChange();
} else {
- // No previous type, simply update the bounds.
- NodeProperties::SetBounds(node, current);
+ // No previous type, simply update the type.
+ NodeProperties::SetType(node, current);
return Changed(node);
}
}
};
-void Typer::Run() {
- {
- // TODO(titzer): this is a hack. Reset types for interior nodes first.
- NodeDeque deque(zone());
- NodeMarker<bool> marked(graph(), 2);
- deque.push_front(graph()->end());
- marked.Set(graph()->end(), true);
- while (!deque.empty()) {
- Node* node = deque.front();
- deque.pop_front();
- // TODO(titzer): there shouldn't be a need to retype constants.
- if (node->op()->ValueOutputCount() > 0)
- NodeProperties::RemoveBounds(node);
- for (Node* input : node->inputs()) {
- if (!marked.Get(input)) {
- marked.Set(input, true);
- deque.push_back(input);
- }
- }
- }
- }
+void Typer::Run() { Run(NodeVector(zone())); }
+
+void Typer::Run(const NodeVector& roots) {
Visitor visitor(this);
- GraphReducer graph_reducer(graph(), zone());
+ GraphReducer graph_reducer(zone(), graph());
graph_reducer.AddReducer(&visitor);
+ for (Node* const root : roots) graph_reducer.ReduceNode(root);
graph_reducer.ReduceGraph();
}
@@ -424,12 +306,12 @@
bool is_typed = NodeProperties::IsTyped(node);
if (is_typed || NodeProperties::AllValueInputsAreTyped(node)) {
Visitor typing(typer_);
- Bounds bounds = typing.TypeNode(node);
+ Type* type = typing.TypeNode(node);
if (is_typed) {
- bounds =
- Bounds::Both(bounds, NodeProperties::GetBounds(node), typer_->zone());
+ type = Type::Intersect(type, NodeProperties::GetType(node),
+ typer_->zone());
}
- NodeProperties::SetBounds(node, bounds);
+ NodeProperties::SetType(node, type);
}
}
}
@@ -442,54 +324,54 @@
// as an argument.
-Bounds Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
- Bounds input = Operand(node, 0);
- Type* upper = input.upper->Is(Type::None())
- ? Type::None()
- : f(input.upper, typer_);
- Type* lower = input.lower->Is(Type::None())
- ? Type::None()
- : (input.lower == input.upper || upper->IsConstant())
- ? upper // TODO(neis): Extend this to Range(x,x), NaN, MinusZero, ...?
- : f(input.lower, typer_);
- // TODO(neis): Figure out what to do with lower bound.
- return Bounds(lower, upper);
+Type* Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
+ Type* input = Operand(node, 0);
+ return input->IsInhabited() ? f(input, typer_) : Type::None();
}
-Bounds Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
- Bounds left = Operand(node, 0);
- Bounds right = Operand(node, 1);
- Type* upper = left.upper->Is(Type::None()) || right.upper->Is(Type::None())
- ? Type::None()
- : f(left.upper, right.upper, typer_);
- Type* lower = left.lower->Is(Type::None()) || right.lower->Is(Type::None())
- ? Type::None()
- : ((left.lower == left.upper && right.lower == right.upper) ||
- upper->IsConstant())
- ? upper
- : f(left.lower, right.lower, typer_);
- // TODO(neis): Figure out what to do with lower bound.
- return Bounds(lower, upper);
+Type* Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
+ Type* left = Operand(node, 0);
+ Type* right = Operand(node, 1);
+ return left->IsInhabited() && right->IsInhabited() ? f(left, right, typer_)
+ : Type::None();
}
Type* Typer::Visitor::Invert(Type* type, Typer* t) {
- if (type->Is(t->singleton_false)) return t->singleton_true;
- if (type->Is(t->singleton_true)) return t->singleton_false;
+ DCHECK(type->Is(Type::Boolean()));
+ DCHECK(type->IsInhabited());
+ if (type->Is(t->singleton_false_)) return t->singleton_true_;
+ if (type->Is(t->singleton_true_)) return t->singleton_false_;
return type;
}
-Type* Typer::Visitor::FalsifyUndefined(Type* type, Typer* t) {
- if (type->Is(Type::Undefined())) return t->singleton_false;
- return type;
+Typer::Visitor::ComparisonOutcome Typer::Visitor::Invert(
+ ComparisonOutcome outcome, Typer* t) {
+ ComparisonOutcome result(0);
+ if ((outcome & kComparisonUndefined) != 0) result |= kComparisonUndefined;
+ if ((outcome & kComparisonTrue) != 0) result |= kComparisonFalse;
+ if ((outcome & kComparisonFalse) != 0) result |= kComparisonTrue;
+ return result;
+}
+
+
+Type* Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
+ if ((outcome & kComparisonFalse) != 0 ||
+ (outcome & kComparisonUndefined) != 0) {
+ return (outcome & kComparisonTrue) != 0 ? Type::Boolean()
+ : t->singleton_false_;
+ }
+ // Type should be non empty, so we know it should be true.
+ DCHECK((outcome & kComparisonTrue) != 0);
+ return t->singleton_true_;
}
Type* Typer::Visitor::Rangify(Type* type, Typer* t) {
if (type->IsRange()) return type; // Shortcut.
- if (!type->Is(t->integer) && !type->Is(Type::Integral32())) {
+ if (!type->Is(t->cache_.kInteger)) {
return type; // Give up on non-integer types.
}
double min = type->Min();
@@ -500,8 +382,7 @@
DCHECK(std::isnan(max));
return type;
}
- Factory* f = t->isolate()->factory();
- return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+ return Type::Range(min, max, t->zone());
}
@@ -518,38 +399,84 @@
Type* Typer::Visitor::ToBoolean(Type* type, Typer* t) {
if (type->Is(Type::Boolean())) return type;
- if (type->Is(t->falsish)) return t->singleton_false;
- if (type->Is(t->truish)) return t->singleton_true;
+ if (type->Is(t->falsish_)) return t->singleton_false_;
+ if (type->Is(t->truish_)) return t->singleton_true_;
if (type->Is(Type::PlainNumber()) && (type->Max() < 0 || 0 < type->Min())) {
- return t->singleton_true; // Ruled out nan, -0 and +0.
+ return t->singleton_true_; // Ruled out nan, -0 and +0.
}
return Type::Boolean();
}
+// static
+Type* Typer::Visitor::ToInteger(Type* type, Typer* t) {
+ // ES6 section 7.1.4 ToInteger ( argument )
+ type = ToNumber(type, t);
+ if (type->Is(t->cache_.kIntegerOrMinusZero)) return type;
+ return t->cache_.kIntegerOrMinusZero;
+}
+
+
+// static
+Type* Typer::Visitor::ToLength(Type* type, Typer* t) {
+ // ES6 section 7.1.15 ToLength ( argument )
+ type = ToInteger(type, t);
+ double min = type->Min();
+ double max = type->Max();
+ if (min <= 0.0) min = 0.0;
+ if (max > kMaxSafeInteger) max = kMaxSafeInteger;
+ if (max <= min) max = min;
+ return Type::Range(min, max, t->zone());
+}
+
+
+// static
+Type* Typer::Visitor::ToName(Type* type, Typer* t) {
+ // ES6 section 7.1.14 ToPropertyKey ( argument )
+ type = ToPrimitive(type, t);
+ if (type->Is(Type::Name())) return type;
+ if (type->Maybe(Type::Symbol())) return Type::Name();
+ return ToString(type, t);
+}
+
+
+// static
Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
if (type->Is(Type::Number())) return type;
- if (type->Is(Type::Null())) return t->singleton_zero;
- if (type->Is(Type::Undefined())) return Type::NaN();
- if (type->Is(t->undefined_or_null)) {
- return Type::Union(Type::NaN(), t->singleton_zero, t->zone());
+ if (type->Is(Type::NullOrUndefined())) {
+ if (type->Is(Type::Null())) return t->cache_.kSingletonZero;
+ if (type->Is(Type::Undefined())) return Type::NaN();
+ return Type::Union(Type::NaN(), t->cache_.kSingletonZero, t->zone());
}
- if (type->Is(t->undefined_or_number)) {
+ if (type->Is(Type::NumberOrUndefined())) {
return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
Type::NaN(), t->zone());
}
- if (type->Is(t->singleton_false)) return t->singleton_zero;
- if (type->Is(t->singleton_true)) return t->singleton_one;
- if (type->Is(Type::Boolean())) return t->zero_or_one;
- if (type->Is(t->boolean_or_number)) {
+ if (type->Is(t->singleton_false_)) return t->cache_.kSingletonZero;
+ if (type->Is(t->singleton_true_)) return t->cache_.kSingletonOne;
+ if (type->Is(Type::Boolean())) return t->cache_.kZeroOrOne;
+ if (type->Is(Type::BooleanOrNumber())) {
return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
- t->zero_or_one, t->zone());
+ t->cache_.kZeroOrOne, t->zone());
}
return Type::Number();
}
+// static
+Type* Typer::Visitor::ToObject(Type* type, Typer* t) {
+ // ES6 section 7.1.13 ToObject ( argument )
+ if (type->Is(Type::Receiver())) return type;
+ if (type->Is(Type::Primitive())) return Type::OtherObject();
+ if (!type->Maybe(Type::Undetectable())) return Type::DetectableReceiver();
+ return Type::Receiver();
+}
+
+
+// static
Type* Typer::Visitor::ToString(Type* type, Typer* t) {
+ // ES6 section 7.1.12 ToString ( argument )
+ type = ToPrimitive(type, t);
if (type->Is(Type::String())) return type;
return Type::String();
}
@@ -558,10 +485,11 @@
Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
// TODO(neis): DCHECK(type->Is(Type::Number()));
if (type->Is(Type::Signed32())) return type;
- if (type->Is(t->zeroish)) return t->singleton_zero;
- if (type->Is(t->signed32ish)) {
- return Type::Intersect(Type::Union(type, t->singleton_zero, t->zone()),
- Type::Signed32(), t->zone());
+ if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
+ if (type->Is(t->signed32ish_)) {
+ return Type::Intersect(
+ Type::Union(type, t->cache_.kSingletonZero, t->zone()),
+ Type::Signed32(), t->zone());
}
return Type::Signed32();
}
@@ -570,10 +498,11 @@
Type* Typer::Visitor::NumberToUint32(Type* type, Typer* t) {
// TODO(neis): DCHECK(type->Is(Type::Number()));
if (type->Is(Type::Unsigned32())) return type;
- if (type->Is(t->zeroish)) return t->singleton_zero;
- if (type->Is(t->unsigned32ish)) {
- return Type::Intersect(Type::Union(type, t->singleton_zero, t->zone()),
- Type::Unsigned32(), t->zone());
+ if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
+ if (type->Is(t->unsigned32ish_)) {
+ return Type::Intersect(
+ Type::Union(type, t->cache_.kSingletonZero, t->zone()),
+ Type::Unsigned32(), t->zone());
}
return Type::Unsigned32();
}
@@ -585,135 +514,167 @@
// Control operators.
-Bounds Typer::Visitor::TypeStart(Node* node) {
- return Bounds(Type::None(zone()), Type::Internal(zone()));
-}
+Type* Typer::Visitor::TypeStart(Node* node) { return Type::Internal(zone()); }
+
+
+Type* Typer::Visitor::TypeIfException(Node* node) { return Type::Any(); }
// Common operators.
-Bounds Typer::Visitor::TypeParameter(Node* node) {
- return Bounds::Unbounded(zone());
-}
-
-
-Bounds Typer::Visitor::TypeInt32Constant(Node* node) {
- Factory* f = isolate()->factory();
- Handle<Object> number = f->NewNumber(OpParameter<int32_t>(node));
- return Bounds(Type::Intersect(
- Type::Range(number, number, zone()), Type::UntaggedSigned32(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeInt64Constant(Node* node) {
- // TODO(rossberg): This actually seems to be a PointerConstant so far...
- return Bounds(Type::Internal()); // TODO(rossberg): Add int64 bitset type?
-}
-
-
-Bounds Typer::Visitor::TypeFloat32Constant(Node* node) {
- return Bounds(Type::Intersect(
- Type::Of(OpParameter<float>(node), zone()),
- Type::UntaggedFloat32(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Constant(Node* node) {
- return Bounds(Type::Intersect(
- Type::Of(OpParameter<double>(node), zone()),
- Type::UntaggedFloat64(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeNumberConstant(Node* node) {
- Factory* f = isolate()->factory();
- return Bounds(Type::Constant(
- f->NewNumber(OpParameter<double>(node)), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeHeapConstant(Node* node) {
- return Bounds(TypeConstant(OpParameter<Unique<HeapObject> >(node).handle()));
-}
-
-
-Bounds Typer::Visitor::TypeExternalConstant(Node* node) {
- return Bounds(Type::None(zone()), Type::Internal(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeSelect(Node* node) {
- return Bounds::Either(Operand(node, 1), Operand(node, 2), zone());
-}
-
-
-Bounds Typer::Visitor::TypePhi(Node* node) {
- int arity = node->op()->ValueInputCount();
- Bounds bounds = Operand(node, 0);
- for (int i = 1; i < arity; ++i) {
- bounds = Bounds::Either(bounds, Operand(node, i), zone());
+Type* Typer::Visitor::TypeParameter(Node* node) {
+ if (Type::FunctionType* function_type = typer_->function_type()) {
+ int const index = ParameterIndexOf(node->op());
+ if (index >= 0 && index < function_type->Arity()) {
+ return function_type->Parameter(index);
+ }
}
- return bounds;
+ return Type::Any();
}
-Bounds Typer::Visitor::TypeEffectPhi(Node* node) {
+Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
+
+
+Type* Typer::Visitor::TypeInt32Constant(Node* node) {
+ double number = OpParameter<int32_t>(node);
+ return Type::Intersect(Type::Range(number, number, zone()),
+ Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeInt64Constant(Node* node) {
+ // TODO(rossberg): This actually seems to be a PointerConstant so far...
+ return Type::Internal(); // TODO(rossberg): Add int64 bitset type?
+}
+
+
+Type* Typer::Visitor::TypeFloat32Constant(Node* node) {
+ return Type::Intersect(Type::Of(OpParameter<float>(node), zone()),
+ Type::UntaggedFloat32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeFloat64Constant(Node* node) {
+ return Type::Intersect(Type::Of(OpParameter<double>(node), zone()),
+ Type::UntaggedFloat64(), zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberConstant(Node* node) {
+ Factory* f = isolate()->factory();
+ double number = OpParameter<double>(node);
+ if (Type::IsInteger(number)) {
+ return Type::Range(number, number, zone());
+ }
+ return Type::Constant(f->NewNumber(number), zone());
+}
+
+
+Type* Typer::Visitor::TypeHeapConstant(Node* node) {
+ return TypeConstant(OpParameter<Handle<HeapObject>>(node));
+}
+
+
+Type* Typer::Visitor::TypeExternalConstant(Node* node) {
+ return Type::Internal(zone());
+}
+
+
+Type* Typer::Visitor::TypeSelect(Node* node) {
+ return Type::Union(Operand(node, 1), Operand(node, 2), zone());
+}
+
+
+Type* Typer::Visitor::TypePhi(Node* node) {
+ int arity = node->op()->ValueInputCount();
+ Type* type = Operand(node, 0);
+ for (int i = 1; i < arity; ++i) {
+ type = Type::Union(type, Operand(node, i), zone());
+ }
+ return type;
+}
+
+
+Type* Typer::Visitor::TypeEffectPhi(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeValueEffect(Node* node) {
+Type* Typer::Visitor::TypeEffectSet(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeFinish(Node* node) {
- return Operand(node, 0);
+Type* Typer::Visitor::TypeGuard(Node* node) {
+ Type* input_type = Operand(node, 0);
+ Type* guard_type = OpParameter<Type*>(node);
+ return Type::Intersect(input_type, guard_type, zone());
}
-Bounds Typer::Visitor::TypeFrameState(Node* node) {
+Type* Typer::Visitor::TypeBeginRegion(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+Type* Typer::Visitor::TypeFinishRegion(Node* node) { return Operand(node, 0); }
+
+
+Type* Typer::Visitor::TypeFrameState(Node* node) {
// TODO(rossberg): Ideally FrameState wouldn't have a value output.
- return Bounds(Type::None(zone()), Type::Internal(zone()));
+ return Type::Internal(zone());
}
-Bounds Typer::Visitor::TypeStateValues(Node* node) {
- return Bounds(Type::None(zone()), Type::Internal(zone()));
+Type* Typer::Visitor::TypeStateValues(Node* node) {
+ return Type::Internal(zone());
}
-Bounds Typer::Visitor::TypeCall(Node* node) {
- return Bounds::Unbounded(zone());
+Type* Typer::Visitor::TypeObjectState(Node* node) {
+ return Type::Internal(zone());
}
-Bounds Typer::Visitor::TypeProjection(Node* node) {
+Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
+ return Type::Internal(zone());
+}
+
+
+Type* Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
+
+
+Type* Typer::Visitor::TypeProjection(Node* node) {
// TODO(titzer): use the output type of the input to determine the bounds.
- return Bounds::Unbounded(zone());
+ return Type::Any();
}
+Type* Typer::Visitor::TypeDead(Node* node) { return Type::Any(); }
+
+
// JS comparison operators.
Type* Typer::Visitor::JSEqualTyper(Type* lhs, Type* rhs, Typer* t) {
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false;
- if (lhs->Is(t->undefined_or_null) && rhs->Is(t->undefined_or_null)) {
- return t->singleton_true;
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false_;
+ if (lhs->Is(Type::NullOrUndefined()) && rhs->Is(Type::NullOrUndefined())) {
+ return t->singleton_true_;
}
if (lhs->Is(Type::Number()) && rhs->Is(Type::Number()) &&
(lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
- return t->singleton_false;
+ return t->singleton_false_;
}
if (lhs->IsConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
// TODO(neis): Extend this to Range(x,x), MinusZero, ...?
- return t->singleton_true;
+ return t->singleton_true_;
}
return Type::Boolean();
}
@@ -737,16 +698,20 @@
Type* Typer::Visitor::JSStrictEqualTyper(Type* lhs, Type* rhs, Typer* t) {
- if (!JSType(lhs)->Maybe(JSType(rhs))) return t->singleton_false;
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false;
+ if (!JSType(lhs)->Maybe(JSType(rhs))) return t->singleton_false_;
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false_;
if (lhs->Is(Type::Number()) && rhs->Is(Type::Number()) &&
(lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
- return t->singleton_false;
+ return t->singleton_false_;
+ }
+ if ((lhs->Is(t->singleton_the_hole_) || rhs->Is(t->singleton_the_hole_)) &&
+ !lhs->Maybe(rhs)) {
+ return t->singleton_false_;
}
if (lhs->IsConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
- return t->singleton_true;
+ return t->singleton_true_;
}
return Type::Boolean();
}
@@ -761,26 +726,41 @@
// (<, <=, >=, >) with the help of a single abstract one. It behaves like <
// but returns undefined when the inputs cannot be compared.
// We implement the typing analogously.
-Type* Typer::Visitor::JSCompareTyper(Type* lhs, Type* rhs, Typer* t) {
+Typer::Visitor::ComparisonOutcome Typer::Visitor::JSCompareTyper(Type* lhs,
+ Type* rhs,
+ Typer* t) {
lhs = ToPrimitive(lhs, t);
rhs = ToPrimitive(rhs, t);
if (lhs->Maybe(Type::String()) && rhs->Maybe(Type::String())) {
- return Type::Boolean();
+ return ComparisonOutcome(kComparisonTrue) |
+ ComparisonOutcome(kComparisonFalse);
}
lhs = ToNumber(lhs, t);
rhs = ToNumber(rhs, t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::Undefined();
+
+ // Shortcut for NaNs.
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return kComparisonUndefined;
+
+ ComparisonOutcome result;
if (lhs->IsConstant() && rhs->Is(lhs)) {
- // Types are equal and are inhabited only by a single semantic value,
- // which is not NaN due to the previous check.
- return t->singleton_false;
+ // Types are equal and are inhabited only by a single semantic value.
+ result = kComparisonFalse;
+ } else if (lhs->Min() >= rhs->Max()) {
+ result = kComparisonFalse;
+ } else if (lhs->Max() < rhs->Min()) {
+ result = kComparisonTrue;
+ } else {
+ // We cannot figure out the result, return both true and false. (We do not
+ // have to return undefined because that cannot affect the result of
+ // FalsifyUndefined.)
+ return ComparisonOutcome(kComparisonTrue) |
+ ComparisonOutcome(kComparisonFalse);
}
- if (lhs->Min() >= rhs->Max()) return t->singleton_false;
- if (lhs->Max() < rhs->Min() &&
- !lhs->Maybe(Type::NaN()) && !rhs->Maybe(Type::NaN())) {
- return t->singleton_true;
+ // Add the undefined if we could see NaN.
+ if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
+ result |= kComparisonUndefined;
}
- return Type::Boolean();
+ return result;
}
@@ -809,7 +789,6 @@
Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
- Factory* f = t->isolate()->factory();
lhs = NumberToInt32(ToNumber(lhs, t), t);
rhs = NumberToInt32(ToNumber(rhs, t), t);
double lmin = lhs->Min();
@@ -837,13 +816,12 @@
// value.
max = std::min(max, -1.0);
}
- return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+ return Type::Range(min, max, t->zone());
// TODO(neis): Be precise for singleton inputs, here and elsewhere.
}
Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
- Factory* f = t->isolate()->factory();
lhs = NumberToInt32(ToNumber(lhs, t), t);
rhs = NumberToInt32(ToNumber(rhs, t), t);
double lmin = lhs->Min();
@@ -865,7 +843,7 @@
min = 0;
max = std::min(max, rmax);
}
- return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+ return Type::Range(min, max, t->zone());
}
@@ -878,12 +856,12 @@
double rmax = rhs->Max();
if ((lmin >= 0 && rmin >= 0) || (lmax < 0 && rmax < 0)) {
// Xor-ing negative or non-negative values results in a non-negative value.
- return Type::NonNegativeSigned32();
+ return Type::Unsigned31();
}
if ((lmax < 0 && rmin >= 0) || (lmin >= 0 && rmax < 0)) {
// Xor-ing a negative and a non-negative value results in a negative value.
// TODO(jarin) Use a range here.
- return Type::NegativeSigned32();
+ return Type::Negative32();
}
return Type::Signed32();
}
@@ -903,11 +881,17 @@
// Right-shifting a non-negative value cannot make it negative, nor larger.
min = std::max(min, 0.0);
max = std::min(max, lhs->Max());
+ if (rhs->Min() > 0 && rhs->Max() <= 31) {
+ max = static_cast<int>(max) >> static_cast<int>(rhs->Min());
+ }
}
if (lhs->Max() < 0) {
// Right-shifting a negative value cannot make it non-negative, nor smaller.
min = std::max(min, lhs->Min());
max = std::min(max, -1.0);
+ if (rhs->Min() > 0 && rhs->Max() <= 31) {
+ min = static_cast<int>(min) >> static_cast<int>(rhs->Min());
+ }
}
if (rhs->Min() > 0 && rhs->Max() <= 31) {
// Right-shifting by a positive value yields a small integer value.
@@ -919,8 +903,7 @@
// TODO(jarin) Ideally, the following micro-optimization should be performed
// by the type constructor.
if (max != Type::Signed32()->Max() || min != Type::Signed32()->Min()) {
- Factory* f = t->isolate()->factory();
- return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+ return Type::Range(min, max, t->zone());
}
return Type::Signed32();
}
@@ -928,11 +911,8 @@
Type* Typer::Visitor::JSShiftRightLogicalTyper(Type* lhs, Type* rhs, Typer* t) {
lhs = NumberToUint32(ToNumber(lhs, t), t);
- Factory* f = t->isolate()->factory();
// Logical right-shifting any value cannot make it larger.
- Handle<Object> min = f->NewNumber(0);
- Handle<Object> max = f->NewNumber(lhs->Max());
- return Type::Range(min, max, t->zone());
+ return Type::Range(0.0, lhs->Max(), t->zone());
}
@@ -974,10 +954,10 @@
Type* Typer::Visitor::JSAddRanger(Type::RangeType* lhs, Type::RangeType* rhs,
Typer* t) {
double results[4];
- results[0] = lhs->Min()->Number() + rhs->Min()->Number();
- results[1] = lhs->Min()->Number() + rhs->Max()->Number();
- results[2] = lhs->Max()->Number() + rhs->Min()->Number();
- results[3] = lhs->Max()->Number() + rhs->Max()->Number();
+ results[0] = lhs->Min() + rhs->Min();
+ results[1] = lhs->Min() + rhs->Max();
+ results[2] = lhs->Max() + rhs->Min();
+ results[3] = lhs->Max() + rhs->Max();
// Since none of the inputs can be -0, the result cannot be -0 either.
// However, it can be nan (the sum of two infinities of opposite sign).
// On the other hand, if none of the "results" above is nan, then the actual
@@ -987,9 +967,8 @@
if (std::isnan(results[i])) ++nans;
}
if (nans == 4) return Type::NaN(); // [-inf..-inf] + [inf..inf] or vice versa
- Factory* f = t->isolate()->factory();
- Type* range = Type::Range(f->NewNumber(array_min(results, 4)),
- f->NewNumber(array_max(results, 4)), t->zone());
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
// Examples:
// [-inf, -inf] + [+inf, +inf] = NaN
@@ -1023,10 +1002,10 @@
Type* Typer::Visitor::JSSubtractRanger(Type::RangeType* lhs,
Type::RangeType* rhs, Typer* t) {
double results[4];
- results[0] = lhs->Min()->Number() - rhs->Min()->Number();
- results[1] = lhs->Min()->Number() - rhs->Max()->Number();
- results[2] = lhs->Max()->Number() - rhs->Min()->Number();
- results[3] = lhs->Max()->Number() - rhs->Max()->Number();
+ results[0] = lhs->Min() - rhs->Min();
+ results[1] = lhs->Min() - rhs->Max();
+ results[2] = lhs->Max() - rhs->Min();
+ results[3] = lhs->Max() - rhs->Max();
// Since none of the inputs can be -0, the result cannot be -0.
// However, it can be nan (the subtraction of two infinities of same sign).
// On the other hand, if none of the "results" above is nan, then the actual
@@ -1036,9 +1015,8 @@
if (std::isnan(results[i])) ++nans;
}
if (nans == 4) return Type::NaN(); // [inf..inf] - [inf..inf] (all same sign)
- Factory* f = t->isolate()->factory();
- Type* range = Type::Range(f->NewNumber(array_min(results, 4)),
- f->NewNumber(array_max(results, 4)), t->zone());
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
// Examples:
// [-inf, +inf] - [-inf, +inf] = [-inf, +inf] \/ NaN
@@ -1062,10 +1040,10 @@
Type* Typer::Visitor::JSMultiplyRanger(Type::RangeType* lhs,
Type::RangeType* rhs, Typer* t) {
double results[4];
- double lmin = lhs->Min()->Number();
- double lmax = lhs->Max()->Number();
- double rmin = rhs->Min()->Number();
- double rmax = rhs->Max()->Number();
+ double lmin = lhs->Min();
+ double lmax = lhs->Max();
+ double rmin = rhs->Min();
+ double rmax = rhs->Max();
results[0] = lmin * rmin;
results[1] = lmin * rmax;
results[2] = lmax * rmin;
@@ -1074,16 +1052,15 @@
// the discontinuity makes it too complicated. Note that even if none of the
// "results" above is nan, the actual result may still be, so we have to do a
// different check:
- bool maybe_nan = (lhs->Maybe(t->singleton_zero) &&
+ bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
(rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
- (rhs->Maybe(t->singleton_zero) &&
+ (rhs->Maybe(t->cache_.kSingletonZero) &&
(lmin == -V8_INFINITY || lmax == +V8_INFINITY));
- if (maybe_nan) return t->weakint; // Giving up.
- bool maybe_minuszero = (lhs->Maybe(t->singleton_zero) && rmin < 0) ||
- (rhs->Maybe(t->singleton_zero) && lmin < 0);
- Factory* f = t->isolate()->factory();
- Type* range = Type::Range(f->NewNumber(array_min(results, 4)),
- f->NewNumber(array_max(results, 4)), t->zone());
+ if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN; // Giving up.
+ bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
+ (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
: range;
}
@@ -1107,7 +1084,7 @@
// Division is tricky, so all we do is try ruling out nan.
// TODO(neis): try ruling out -0 as well?
bool maybe_nan =
- lhs->Maybe(Type::NaN()) || rhs->Maybe(t->zeroish) ||
+ lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
(rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
return maybe_nan ? Type::Number() : Type::OrderedNumber();
@@ -1116,10 +1093,10 @@
Type* Typer::Visitor::JSModulusRanger(Type::RangeType* lhs,
Type::RangeType* rhs, Typer* t) {
- double lmin = lhs->Min()->Number();
- double lmax = lhs->Max()->Number();
- double rmin = rhs->Min()->Number();
- double rmax = rhs->Max()->Number();
+ double lmin = lhs->Min();
+ double lmax = lhs->Max();
+ double rmin = rhs->Min();
+ double rmax = rhs->Max();
double labs = std::max(std::abs(lmin), std::abs(lmax));
double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
@@ -1140,8 +1117,7 @@
maybe_minus_zero = true;
}
- Factory* f = t->isolate()->factory();
- Type* result = Type::Range(f->NewNumber(omin), f->NewNumber(omax), t->zone());
+ Type* result = Type::Range(omin, omax, t->zone());
if (maybe_minus_zero)
result = Type::Union(result, Type::MinusZero(), t->zone());
return result;
@@ -1153,7 +1129,7 @@
rhs = ToNumber(rhs, t);
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- if (lhs->Maybe(Type::NaN()) || rhs->Maybe(t->zeroish) ||
+ if (lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) {
// Result maybe NaN.
return Type::Number();
@@ -1171,54 +1147,102 @@
// JS unary operators.
-Type* Typer::Visitor::JSUnaryNotTyper(Type* type, Typer* t) {
- return Invert(ToBoolean(type, t), t);
+Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
+ Factory* const f = t->isolate()->factory();
+ if (type->Is(Type::Boolean())) {
+ return Type::Constant(f->boolean_string(), t->zone());
+ } else if (type->Is(Type::Number())) {
+ return Type::Constant(f->number_string(), t->zone());
+ } else if (type->Is(Type::String())) {
+ return Type::Constant(f->string_string(), t->zone());
+ } else if (type->Is(Type::Symbol())) {
+ return Type::Constant(f->symbol_string(), t->zone());
+ } else if (type->Is(Type::Union(Type::Undefined(), Type::Undetectable(),
+ t->zone()))) {
+ return Type::Constant(f->undefined_string(), t->zone());
+ } else if (type->Is(Type::Null())) {
+ return Type::Constant(f->object_string(), t->zone());
+ } else if (type->Is(Type::Function())) {
+ return Type::Constant(f->function_string(), t->zone());
+ } else if (type->IsConstant()) {
+ return Type::Constant(
+ Object::TypeOf(t->isolate(), type->AsConstant()->Value()), t->zone());
+ }
+ return Type::InternalizedString();
}
-Bounds Typer::Visitor::TypeJSUnaryNot(Node* node) {
- return TypeUnaryOp(node, JSUnaryNotTyper);
-}
-
-
-Bounds Typer::Visitor::TypeJSTypeOf(Node* node) {
- return Bounds(Type::None(zone()), Type::InternalizedString(zone()));
+Type* Typer::Visitor::TypeJSTypeOf(Node* node) {
+ return TypeUnaryOp(node, JSTypeOfTyper);
}
// JS conversion operators.
-Bounds Typer::Visitor::TypeJSToBoolean(Node* node) {
+Type* Typer::Visitor::TypeJSToBoolean(Node* node) {
return TypeUnaryOp(node, ToBoolean);
}
-Bounds Typer::Visitor::TypeJSToNumber(Node* node) {
+Type* Typer::Visitor::TypeJSToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
-Bounds Typer::Visitor::TypeJSToString(Node* node) {
+Type* Typer::Visitor::TypeJSToString(Node* node) {
return TypeUnaryOp(node, ToString);
}
-Bounds Typer::Visitor::TypeJSToName(Node* node) {
- return Bounds(Type::None(), Type::Name());
+Type* Typer::Visitor::TypeJSToName(Node* node) {
+ return TypeUnaryOp(node, ToName);
}
-Bounds Typer::Visitor::TypeJSToObject(Node* node) {
- return Bounds(Type::None(), Type::Receiver());
+Type* Typer::Visitor::TypeJSToObject(Node* node) {
+ return TypeUnaryOp(node, ToObject);
}
// JS object operators.
-Bounds Typer::Visitor::TypeJSCreate(Node* node) {
- return Bounds(Type::None(), Type::Object());
+Type* Typer::Visitor::TypeJSCreate(Node* node) { return Type::Object(); }
+
+
+Type* Typer::Visitor::TypeJSCreateArguments(Node* node) {
+ return Type::OtherObject();
+}
+
+
+Type* Typer::Visitor::TypeJSCreateArray(Node* node) {
+ return Type::OtherObject();
+}
+
+
+Type* Typer::Visitor::TypeJSCreateClosure(Node* node) {
+ return Type::Function();
+}
+
+
+Type* Typer::Visitor::TypeJSCreateIterResultObject(Node* node) {
+ return Type::OtherObject();
+}
+
+
+Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
+ return Type::OtherObject();
+}
+
+
+Type* Typer::Visitor::TypeJSCreateLiteralObject(Node* node) {
+ return Type::OtherObject();
+}
+
+
+Type* Typer::Visitor::TypeJSCreateLiteralRegExp(Node* node) {
+ return Type::OtherObject();
}
@@ -1232,819 +1256,253 @@
}
-Bounds Typer::Visitor::TypeJSLoadProperty(Node* node) {
+Type* Typer::Visitor::TypeJSLoadProperty(Node* node) {
return TypeBinaryOp(node, JSLoadPropertyTyper);
}
-Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
- return Bounds::Unbounded(zone());
+Type* Typer::Visitor::TypeJSLoadNamed(Node* node) {
+ Factory* const f = isolate()->factory();
+ Handle<Name> name = NamedAccessOf(node->op()).name();
+ if (name.is_identical_to(f->prototype_string())) {
+ Type* receiver = Operand(node, 0);
+ if (receiver->Is(Type::None())) return Type::None();
+ if (receiver->IsConstant() &&
+ receiver->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(receiver->AsConstant()->Value());
+ if (function->has_prototype()) {
+ // We need to add a code dependency on the initial map of the {function}
+ // in order to be notified about changes to "prototype" of {function},
+ // so we can only infer a constant type if deoptimization is enabled.
+ if (flags() & kDeoptimizationEnabled) {
+ JSFunction::EnsureHasInitialMap(function);
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ return Type::Constant(handle(initial_map->prototype(), isolate()),
+ zone());
+ }
+ }
+ } else if (receiver->IsClass() &&
+ receiver->AsClass()->Map()->IsJSFunctionMap()) {
+ Handle<Map> map = receiver->AsClass()->Map();
+ return map->has_non_instance_prototype() ? Type::Primitive(zone())
+ : Type::Receiver(zone());
+ }
+ }
+ return Type::Any();
}
+Type* Typer::Visitor::TypeJSLoadGlobal(Node* node) { return Type::Any(); }
+
+
// Returns a somewhat larger range if we previously assigned
// a (smaller) range to this node. This is used to speed up
// the fixpoint calculation in case there appears to be a loop
// in the graph. In the current implementation, we are
// increasing the limits to the closest power of two.
-Type* Typer::Visitor::Weaken(Type* current_type, Type* previous_type) {
- Type::RangeType* previous = previous_type->GetRange();
- Type::RangeType* current = current_type->GetRange();
- if (previous != NULL && current != NULL) {
- double current_min = current->Min()->Number();
- Handle<Object> new_min = current->Min();
+Type* Typer::Visitor::Weaken(Node* node, Type* current_type,
+ Type* previous_type) {
+ static const double kWeakenMinLimits[] = {
+ 0.0, -1073741824.0, -2147483648.0, -4294967296.0, -8589934592.0,
+ -17179869184.0, -34359738368.0, -68719476736.0, -137438953472.0,
+ -274877906944.0, -549755813888.0, -1099511627776.0, -2199023255552.0,
+ -4398046511104.0, -8796093022208.0, -17592186044416.0, -35184372088832.0,
+ -70368744177664.0, -140737488355328.0, -281474976710656.0,
+ -562949953421312.0};
+ static const double kWeakenMaxLimits[] = {
+ 0.0, 1073741823.0, 2147483647.0, 4294967295.0, 8589934591.0,
+ 17179869183.0, 34359738367.0, 68719476735.0, 137438953471.0,
+ 274877906943.0, 549755813887.0, 1099511627775.0, 2199023255551.0,
+ 4398046511103.0, 8796093022207.0, 17592186044415.0, 35184372088831.0,
+ 70368744177663.0, 140737488355327.0, 281474976710655.0,
+ 562949953421311.0};
+ STATIC_ASSERT(arraysize(kWeakenMinLimits) == arraysize(kWeakenMaxLimits));
- // Find the closest lower entry in the list of allowed
- // minima (or negative infinity if there is no such entry).
- if (current_min != previous->Min()->Number()) {
- new_min = typer_->integer->AsRange()->Min();
- for (const auto val : typer_->weaken_min_limits_) {
- if (val->Number() <= current_min) {
- new_min = val;
- break;
- }
- }
- }
-
- double current_max = current->Max()->Number();
- Handle<Object> new_max = current->Max();
- // Find the closest greater entry in the list of allowed
- // maxima (or infinity if there is no such entry).
- if (current_max != previous->Max()->Number()) {
- new_max = typer_->integer->AsRange()->Max();
- for (const auto val : typer_->weaken_max_limits_) {
- if (val->Number() >= current_max) {
- new_max = val;
- break;
- }
- }
- }
-
- return Type::Union(current_type,
- Type::Range(new_min, new_max, typer_->zone()),
- typer_->zone());
+ // If the types have nothing to do with integers, return the types.
+ Type* const integer = typer_->cache_.kInteger;
+ if (!previous_type->Maybe(integer)) {
+ return current_type;
}
- return current_type;
+ DCHECK(current_type->Maybe(integer));
+
+ Type* current_integer = Type::Intersect(current_type, integer, zone());
+ Type* previous_integer = Type::Intersect(previous_type, integer, zone());
+
+ // Once we start weakening a node, we should always weaken.
+ if (!IsWeakened(node->id())) {
+ // Only weaken if there is range involved; we should converge quickly
+ // for all other types (the exception is a union of many constants,
+ // but we currently do not increase the number of constants in unions).
+ Type::RangeType* previous = previous_integer->GetRange();
+ Type::RangeType* current = current_integer->GetRange();
+ if (current == nullptr || previous == nullptr) {
+ return current_type;
+ }
+ // Range is involved => we are weakening.
+ SetWeakened(node->id());
+ }
+
+ double current_min = current_integer->Min();
+ double new_min = current_min;
+ // Find the closest lower entry in the list of allowed
+ // minima (or negative infinity if there is no such entry).
+ if (current_min != previous_integer->Min()) {
+ new_min = -V8_INFINITY;
+ for (double const min : kWeakenMinLimits) {
+ if (min <= current_min) {
+ new_min = min;
+ break;
+ }
+ }
+ }
+
+ double current_max = current_integer->Max();
+ double new_max = current_max;
+ // Find the closest greater entry in the list of allowed
+ // maxima (or infinity if there is no such entry).
+ if (current_max != previous_integer->Max()) {
+ new_max = V8_INFINITY;
+ for (double const max : kWeakenMaxLimits) {
+ if (max >= current_max) {
+ new_max = max;
+ break;
+ }
+ }
+ }
+
+ return Type::Union(current_type,
+ Type::Range(new_min, new_max, typer_->zone()),
+ typer_->zone());
}
-Bounds Typer::Visitor::TypeJSStoreProperty(Node* node) {
+Type* Typer::Visitor::TypeJSStoreProperty(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeJSStoreNamed(Node* node) {
+Type* Typer::Visitor::TypeJSStoreNamed(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeJSDeleteProperty(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeJSStoreGlobal(Node* node) {
+ UNREACHABLE();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeJSHasProperty(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeJSInstanceOf(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeJSHasProperty(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
+Type* Typer::Visitor::TypeJSInstanceOf(Node* node) {
+ return Type::Boolean(zone());
}
// JS context operators.
-Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
- Bounds outer = Operand(node, 0);
- Type* context_type = outer.upper;
- if (context_type->Is(Type::None())) {
- // Upper bound of context is not yet known.
- return Bounds(Type::None(), Type::Any());
+Type* Typer::Visitor::TypeJSLoadContext(Node* node) {
+ ContextAccess const& access = ContextAccessOf(node->op());
+ if (access.index() == Context::EXTENSION_INDEX) {
+ return Type::TaggedPointer();
}
-
- DCHECK(context_type->Maybe(Type::Internal()));
- // TODO(rossberg): More precisely, instead of the above assertion, we should
- // back-propagate the constraint that it has to be a subtype of Internal.
-
- ContextAccess access = OpParameter<ContextAccess>(node);
- MaybeHandle<Context> context;
- if (context_type->IsConstant()) {
- context = Handle<Context>::cast(context_type->AsConstant()->Value());
- }
- // Walk context chain (as far as known), mirroring dynamic lookup.
- // Since contexts are mutable, the information is only useful as a lower
- // bound.
- // TODO(rossberg): Could use scope info to fix upper bounds for constant
- // bindings if we know that this code is never shared.
- for (size_t i = access.depth(); i > 0; --i) {
- if (context_type->IsContext()) {
- context_type = context_type->AsContext()->Outer();
- if (context_type->IsConstant()) {
- context = Handle<Context>::cast(context_type->AsConstant()->Value());
- }
- } else if (!context.is_null()) {
- context = handle(context.ToHandleChecked()->previous(), isolate());
- }
- }
- if (context.is_null()) {
- return Bounds::Unbounded(zone());
- } else {
- Handle<Object> value =
- handle(context.ToHandleChecked()->get(static_cast<int>(access.index())),
- isolate());
- Type* lower = TypeConstant(value);
- return Bounds(lower, Type::Any());
- }
+ // Since contexts are mutable, we just return the top.
+ return Type::Any();
}
-Bounds Typer::Visitor::TypeJSStoreContext(Node* node) {
+Type* Typer::Visitor::TypeJSStoreContext(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+Type* Typer::Visitor::TypeJSLoadDynamic(Node* node) { return Type::Any(); }
+
+
+Type* Typer::Visitor::WrapContextTypeForInput(Node* node) {
+ Type* outer = TypeOrNone(NodeProperties::GetContextInput(node));
+ if (outer->Is(Type::None())) {
+ return Type::None();
+ } else {
+ DCHECK(outer->Maybe(Type::Internal()));
+ return Type::Context(outer, zone());
+ }
}
-Bounds Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+Type* Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
+ return WrapContextTypeForInput(node);
}
-Bounds Typer::Visitor::TypeJSCreateWithContext(Node* node) {
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+Type* Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
+ return WrapContextTypeForInput(node);
}
-Bounds Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+Type* Typer::Visitor::TypeJSCreateWithContext(Node* node) {
+ return WrapContextTypeForInput(node);
}
-Bounds Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
+Type* Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
+ return WrapContextTypeForInput(node);
+}
+
+
+Type* Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
// TODO(rossberg): this is probably incorrect
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+ return WrapContextTypeForInput(node);
}
-Bounds Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
- Bounds outer = ContextOperand(node);
- return Bounds(Type::Context(outer.upper, zone()));
+Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
+ return WrapContextTypeForInput(node);
}
// JS other operators.
-Bounds Typer::Visitor::TypeJSYield(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeJSYield(Node* node) { return Type::Any(); }
-Bounds Typer::Visitor::TypeJSCallConstruct(Node* node) {
- return Bounds(Type::None(), Type::Receiver());
+Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
+ return Type::Receiver();
}
Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
- return fun->IsFunction() ? fun->AsFunction()->Result() : Type::Any();
-}
-
-
-Bounds Typer::Visitor::TypeJSCallFunction(Node* node) {
- return TypeUnaryOp(node, JSCallFunctionTyper); // We ignore argument types.
-}
-
-
-Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
- return Bounds::Unbounded(zone());
-}
-
-
-Bounds Typer::Visitor::TypeJSDebugger(Node* node) {
- return Bounds::Unbounded(zone());
-}
-
-
-// Simplified operators.
-
-
-Bounds Typer::Visitor::TypeAnyToBoolean(Node* node) {
- return TypeUnaryOp(node, ToBoolean);
-}
-
-
-Bounds Typer::Visitor::TypeBooleanNot(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeBooleanToNumber(Node* node) {
- return Bounds(Type::None(zone()), typer_->zero_or_one);
-}
-
-
-Bounds Typer::Visitor::TypeNumberEqual(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeNumberLessThan(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeNumberAdd(Node* node) {
- return Bounds(Type::None(zone()), Type::Number(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeNumberSubtract(Node* node) {
- return Bounds(Type::None(zone()), Type::Number(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeNumberMultiply(Node* node) {
- return Bounds(Type::None(zone()), Type::Number(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeNumberDivide(Node* node) {
- return Bounds(Type::None(zone()), Type::Number(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeNumberModulus(Node* node) {
- return Bounds(Type::None(zone()), Type::Number(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeNumberToInt32(Node* node) {
- return TypeUnaryOp(node, NumberToInt32);
-}
-
-
-Bounds Typer::Visitor::TypeNumberToUint32(Node* node) {
- return TypeUnaryOp(node, NumberToUint32);
-}
-
-
-Bounds Typer::Visitor::TypeReferenceEqual(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeStringEqual(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeStringLessThan(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
-}
-
-
-Bounds Typer::Visitor::TypeStringAdd(Node* node) {
- return Bounds(Type::None(zone()), Type::String(zone()));
-}
-
-
-static Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
- // TODO(neis): Enable when expressible.
- /*
- return Type::Union(
- Type::Intersect(type, Type::Semantic(), zone),
- Type::Intersect(rep, Type::Representation(), zone), zone);
- */
- return type;
-}
-
-
-Bounds Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Signed32()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::UntaggedSigned32(), zone()),
- ChangeRepresentation(arg.upper, Type::UntaggedSigned32(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Unsigned32()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::UntaggedUnsigned32(), zone()),
- ChangeRepresentation(arg.upper, Type::UntaggedUnsigned32(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Number()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::UntaggedFloat64(), zone()),
- ChangeRepresentation(arg.upper, Type::UntaggedFloat64(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Signed32()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::Tagged(), zone()),
- ChangeRepresentation(arg.upper, Type::Tagged(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Unsigned32()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::Tagged(), zone()),
- ChangeRepresentation(arg.upper, Type::Tagged(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): CHECK(arg.upper->Is(Type::Number()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::Tagged(), zone()),
- ChangeRepresentation(arg.upper, Type::Tagged(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeBoolToBit(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::UntaggedBit(), zone()),
- ChangeRepresentation(arg.upper, Type::UntaggedBit(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeBitToBool(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::TaggedPointer(), zone()),
- ChangeRepresentation(arg.upper, Type::TaggedPointer(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeLoadField(Node* node) {
- return Bounds(FieldAccessOf(node->op()).type);
-}
-
-
-Bounds Typer::Visitor::TypeLoadBuffer(Node* node) {
- // TODO(bmeurer): This typing is not yet correct. Since we can still access
- // out of bounds, the type in the general case has to include Undefined.
- switch (BufferAccessOf(node->op()).external_array_type()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return Bounds(typer_->cache_->Get(k##Type));
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+ if (fun->IsFunction()) {
+ return fun->AsFunction()->Result();
}
- UNREACHABLE();
- return Bounds();
-}
-
-
-Bounds Typer::Visitor::TypeLoadElement(Node* node) {
- return Bounds(ElementAccessOf(node->op()).type);
-}
-
-
-Bounds Typer::Visitor::TypeStoreField(Node* node) {
- UNREACHABLE();
- return Bounds();
-}
-
-
-Bounds Typer::Visitor::TypeStoreBuffer(Node* node) {
- UNREACHABLE();
- return Bounds();
-}
-
-
-Bounds Typer::Visitor::TypeStoreElement(Node* node) {
- UNREACHABLE();
- return Bounds();
-}
-
-
-Bounds Typer::Visitor::TypeObjectIsSmi(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeObjectIsNonNegativeSmi(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-// Machine operators.
-
-Bounds Typer::Visitor::TypeLoad(Node* node) {
- return Bounds::Unbounded(zone());
-}
-
-
-Bounds Typer::Visitor::TypeStore(Node* node) {
- UNREACHABLE();
- return Bounds();
-}
-
-
-Bounds Typer::Visitor::TypeWord32And(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeWord32Or(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeWord32Xor(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeWord32Shl(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeWord32Shr(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeWord32Sar(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeWord32Ror(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeWord32Equal(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeWord64And(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeWord64Or(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeWord64Xor(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeWord64Shl(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeWord64Shr(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeWord64Sar(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeWord64Ror(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeWord64Equal(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeInt32Add(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeInt32AddWithOverflow(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeInt32Sub(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeInt32SubWithOverflow(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeInt32Mul(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeInt32MulHigh(Node* node) {
- return Bounds(Type::Signed32());
-}
-
-
-Bounds Typer::Visitor::TypeInt32Div(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeInt32Mod(Node* node) {
- return Bounds(Type::Integral32());
-}
-
-
-Bounds Typer::Visitor::TypeInt32LessThan(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeInt32LessThanOrEqual(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeUint32Div(Node* node) {
- return Bounds(Type::Unsigned32());
-}
-
-
-Bounds Typer::Visitor::TypeUint32LessThan(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeUint32LessThanOrEqual(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeUint32Mod(Node* node) {
- return Bounds(Type::Unsigned32());
-}
-
-
-Bounds Typer::Visitor::TypeUint32MulHigh(Node* node) {
- return Bounds(Type::Unsigned32());
-}
-
-
-Bounds Typer::Visitor::TypeInt64Add(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeInt64Sub(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeInt64Mul(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeInt64Div(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeInt64Mod(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeInt64LessThan(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeInt64LessThanOrEqual(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeUint64Div(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeUint64LessThan(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeUint64Mod(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
- return Bounds(Type::Intersect(
- Type::Number(), Type::UntaggedFloat64(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
- return Bounds(Type::Intersect(
- Type::Signed32(), Type::UntaggedSigned32(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
- return Bounds(Type::Intersect(
- Type::Unsigned32(), Type::UntaggedUnsigned32(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeInt32ToFloat64(Node* node) {
- return Bounds(Type::Intersect(
- Type::Signed32(), Type::UntaggedFloat64(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeInt32ToInt64(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeChangeUint32ToFloat64(Node* node) {
- return Bounds(Type::Intersect(
- Type::Unsigned32(), Type::UntaggedFloat64(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeChangeUint32ToUint64(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
- return Bounds(Type::Intersect(
- Type::Number(), Type::UntaggedFloat32(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
- return Bounds(Type::Intersect(
- Type::Signed32(), Type::UntaggedSigned32(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
- return Bounds(Type::Intersect(
- Type::Signed32(), Type::UntaggedSigned32(), zone()));
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Add(Node* node) {
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Sub(Node* node) {
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Mul(Node* node) {
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Div(Node* node) {
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Mod(Node* node) {
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Sqrt(Node* node) {
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Equal(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64LessThan(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64LessThanOrEqual(Node* node) {
- return Bounds(Type::Boolean());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Floor(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64Ceil(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
- return Bounds(Type::Number());
-}
-
-
-Bounds Typer::Visitor::TypeLoadStackPointer(Node* node) {
- return Bounds(Type::Internal());
-}
-
-
-Bounds Typer::Visitor::TypeCheckedLoad(Node* node) {
- return Bounds::Unbounded(zone());
-}
-
-
-Bounds Typer::Visitor::TypeCheckedStore(Node* node) {
- UNREACHABLE();
- return Bounds();
-}
-
-
-// Heap constants.
-
-
-Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
- if (value->IsJSFunction()) {
- if (JSFunction::cast(*value)->shared()->HasBuiltinFunctionId()) {
- switch (JSFunction::cast(*value)->shared()->builtin_function_id()) {
+ if (fun->IsConstant() && fun->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(fun->AsConstant()->Value());
+ if (function->shared()->HasBuiltinFunctionId()) {
+ switch (function->shared()->builtin_function_id()) {
case kMathRandom:
- return typer_->random_fun_;
+ return Type::OrderedNumber();
case kMathFloor:
- return typer_->weakint_fun1_;
case kMathRound:
- return typer_->weakint_fun1_;
case kMathCeil:
- return typer_->weakint_fun1_;
+ return t->cache_.kIntegerOrMinusZeroOrNaN;
// Unary math functions.
- case kMathAbs: // TODO(rossberg): can't express overloading
+ case kMathAbs:
case kMathLog:
case kMathExp:
case kMathSqrt:
@@ -2055,52 +1513,925 @@
case kMathAsin:
case kMathAtan:
case kMathFround:
- return typer_->cache_->Get(kNumberFunc1);
+ return Type::Number();
// Binary math functions.
case kMathAtan2:
case kMathPow:
case kMathMax:
case kMathMin:
- return typer_->cache_->Get(kNumberFunc2);
+ return Type::Number();
case kMathImul:
- return typer_->cache_->Get(kImulFunc);
+ return Type::Signed32();
case kMathClz32:
- return typer_->cache_->Get(kClz32Func);
+ return t->cache_.kZeroToThirtyTwo;
+ // String functions.
+ case kStringCharAt:
+ case kStringFromCharCode:
+ return Type::String();
+ // Array functions.
+ case kArrayIndexOf:
+ case kArrayLastIndexOf:
+ return Type::Number();
default:
break;
}
- } else if (JSFunction::cast(*value)->IsBuiltin() && !context().is_null()) {
- Handle<Context> native =
- handle(context().ToHandleChecked()->native_context(), isolate());
- if (*value == native->array_buffer_fun()) {
- return typer_->cache_->Get(kArrayBufferFunc);
- } else if (*value == native->int8_array_fun()) {
- return typer_->cache_->Get(kInt8ArrayFunc);
- } else if (*value == native->int16_array_fun()) {
- return typer_->cache_->Get(kInt16ArrayFunc);
- } else if (*value == native->int32_array_fun()) {
- return typer_->cache_->Get(kInt32ArrayFunc);
- } else if (*value == native->uint8_array_fun()) {
- return typer_->cache_->Get(kUint8ArrayFunc);
- } else if (*value == native->uint16_array_fun()) {
- return typer_->cache_->Get(kUint16ArrayFunc);
- } else if (*value == native->uint32_array_fun()) {
- return typer_->cache_->Get(kUint32ArrayFunc);
- } else if (*value == native->float32_array_fun()) {
- return typer_->cache_->Get(kFloat32ArrayFunc);
- } else if (*value == native->float64_array_fun()) {
- return typer_->cache_->Get(kFloat64ArrayFunc);
- }
}
- } else if (value->IsJSTypedArray()) {
+ }
+ return Type::Any();
+}
+
+
+Type* Typer::Visitor::TypeJSCallFunction(Node* node) {
+ // TODO(bmeurer): We could infer better types if we wouldn't ignore the
+ // argument types for the JSCallFunctionTyper above.
+ return TypeUnaryOp(node, JSCallFunctionTyper);
+}
+
+
+Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
+ switch (CallRuntimeParametersOf(node->op()).id()) {
+ case Runtime::kInlineIsSmi:
+ case Runtime::kInlineIsArray:
+ case Runtime::kInlineIsDate:
+ case Runtime::kInlineIsTypedArray:
+ case Runtime::kInlineIsMinusZero:
+ case Runtime::kInlineIsFunction:
+ case Runtime::kInlineIsRegExp:
+ case Runtime::kInlineIsJSReceiver:
+ return Type::Boolean(zone());
+ case Runtime::kInlineDoubleLo:
+ case Runtime::kInlineDoubleHi:
+ return Type::Signed32();
+ case Runtime::kInlineConstructDouble:
+ case Runtime::kInlineMathFloor:
+ case Runtime::kInlineMathSqrt:
+ case Runtime::kInlineMathAcos:
+ case Runtime::kInlineMathAsin:
+ case Runtime::kInlineMathAtan:
+ case Runtime::kInlineMathAtan2:
+ return Type::Number();
+ case Runtime::kInlineMathClz32:
+ return Type::Range(0, 32, zone());
+ case Runtime::kInlineCreateIterResultObject:
+ case Runtime::kInlineRegExpConstructResult:
+ return Type::OtherObject();
+ case Runtime::kInlineSubString:
+ return Type::String();
+ case Runtime::kInlineToInteger:
+ return TypeUnaryOp(node, ToInteger);
+ case Runtime::kInlineToLength:
+ return TypeUnaryOp(node, ToLength);
+ case Runtime::kInlineToName:
+ return TypeUnaryOp(node, ToName);
+ case Runtime::kInlineToNumber:
+ return TypeUnaryOp(node, ToNumber);
+ case Runtime::kInlineToObject:
+ return TypeUnaryOp(node, ToObject);
+ case Runtime::kInlineToPrimitive:
+ case Runtime::kInlineToPrimitive_Number:
+ case Runtime::kInlineToPrimitive_String:
+ return TypeUnaryOp(node, ToPrimitive);
+ case Runtime::kInlineToString:
+ return TypeUnaryOp(node, ToString);
+ case Runtime::kHasInPrototypeChain:
+ return Type::Boolean();
+ default:
+ break;
+ }
+ return Type::Any();
+}
+
+
+Type* Typer::Visitor::TypeJSConvertReceiver(Node* node) {
+ return Type::Receiver();
+}
+
+
+Type* Typer::Visitor::TypeJSForInNext(Node* node) {
+ return Type::Union(Type::Name(), Type::Undefined(), zone());
+}
+
+
+Type* Typer::Visitor::TypeJSForInPrepare(Node* node) {
+ // TODO(bmeurer): Return a tuple type here.
+ return Type::Any();
+}
+
+
+Type* Typer::Visitor::TypeJSForInDone(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
+Type* Typer::Visitor::TypeJSForInStep(Node* node) {
+ STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
+ return Type::Range(1, FixedArray::kMaxLength + 1, zone());
+}
+
+
+Type* Typer::Visitor::TypeJSLoadMessage(Node* node) { return Type::Any(); }
+
+
+Type* Typer::Visitor::TypeJSStoreMessage(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
+
+
+// Simplified operators.
+
+
+Type* Typer::Visitor::TypeBooleanNot(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
+Type* Typer::Visitor::TypeBooleanToNumber(Node* node) {
+ return TypeUnaryOp(node, ToNumber);
+}
+
+
+Type* Typer::Visitor::TypeNumberEqual(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberLessThan(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(zone()); }
+
+
+Type* Typer::Visitor::TypeNumberSubtract(Node* node) {
+ return Type::Number(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberMultiply(Node* node) {
+ return Type::Number(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberDivide(Node* node) {
+ return Type::Number(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberModulus(Node* node) {
+ return Type::Number(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberBitwiseOr(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberBitwiseXor(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberBitwiseAnd(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberShiftLeft(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberShiftRight(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberShiftRightLogical(Node* node) {
+ return Type::Unsigned32(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberToInt32(Node* node) {
+ return TypeUnaryOp(node, NumberToInt32);
+}
+
+
+Type* Typer::Visitor::TypeNumberToUint32(Node* node) {
+ return TypeUnaryOp(node, NumberToUint32);
+}
+
+
+Type* Typer::Visitor::TypeNumberIsHoleNaN(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
+Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
+ return TypeUnaryOp(node, ToNumber);
+}
+
+
+// static
+Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
+ if (lhs->IsConstant() && rhs->Is(lhs)) {
+ return t->singleton_true_;
+ }
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeReferenceEqual(Node* node) {
+ return TypeBinaryOp(node, ReferenceEqualTyper);
+}
+
+
+Type* Typer::Visitor::TypeStringEqual(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
+Type* Typer::Visitor::TypeStringLessThan(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
+Type* Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
+namespace {
+
+Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
+ return Type::Union(Type::Semantic(type, zone),
+ Type::Representation(rep, zone), zone);
+}
+
+} // namespace
+
+
+Type* Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+ return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
+ return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Number()));
+ return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+ Type* rep =
+ arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
+ return ChangeRepresentation(arg, rep, zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
+ return ChangeRepresentation(arg, Type::Tagged(), zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): CHECK(arg.upper->Is(Type::Number()));
+ return ChangeRepresentation(arg, Type::Tagged(), zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeBoolToBit(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
+ return ChangeRepresentation(arg, Type::UntaggedBit(), zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeBitToBool(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
+ return ChangeRepresentation(arg, Type::TaggedPointer(), zone());
+}
+
+
+Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
+
+
+namespace {
+
+MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
+ if (object_type->IsConstant() &&
+ object_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<Map> object_map(
+ Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
+ if (object_map->is_stable()) return object_map;
+ } else if (object_type->IsClass()) {
+ Handle<Map> object_map = object_type->AsClass()->Map();
+ if (object_map->is_stable()) return object_map;
+ }
+ return MaybeHandle<Map>();
+}
+
+} // namespace
+
+
+Type* Typer::Visitor::TypeLoadField(Node* node) {
+ FieldAccess const& access = FieldAccessOf(node->op());
+ if (access.base_is_tagged == kTaggedBase &&
+ access.offset == HeapObject::kMapOffset) {
+ // The type of LoadField[Map](o) is Constant(map) if map is stable and
+ // either
+ // (a) o has type Constant(object) and map == object->map, or
+ // (b) o has type Class(map),
+ // and either
+ // (1) map cannot transition further, or
+ // (2) deoptimization is enabled and we can add a code dependency on the
+ // stability of map (to guard the Constant type information).
+ Type* const object = Operand(node, 0);
+ if (object->Is(Type::None())) return Type::None();
+ Handle<Map> object_map;
+ if (GetStableMapFromObjectType(object).ToHandle(&object_map)) {
+ if (object_map->CanTransition()) {
+ if (flags() & kDeoptimizationEnabled) {
+ dependencies()->AssumeMapStable(object_map);
+ } else {
+ return access.type;
+ }
+ }
+ Type* object_map_type = Type::Constant(object_map, zone());
+ DCHECK(object_map_type->Is(access.type));
+ return object_map_type;
+ }
+ }
+ return access.type;
+}
+
+
+Type* Typer::Visitor::TypeLoadBuffer(Node* node) {
+ // TODO(bmeurer): This typing is not yet correct. Since we can still access
+ // out of bounds, the type in the general case has to include Undefined.
+ switch (BufferAccessOf(node->op()).external_array_type()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return typer_->cache_.k##Type;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+Type* Typer::Visitor::TypeLoadElement(Node* node) {
+ return ElementAccessOf(node->op()).type;
+}
+
+
+Type* Typer::Visitor::TypeStoreField(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+Type* Typer::Visitor::TypeStoreBuffer(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+Type* Typer::Visitor::TypeStoreElement(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
+ Type* arg = Operand(node, 0);
+ if (arg->Is(Type::None())) return Type::None();
+ if (arg->Is(Type::Number())) return typer_->singleton_true_;
+ if (!arg->Maybe(Type::Number())) return typer_->singleton_false_;
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeObjectIsSmi(Node* node) {
+ Type* arg = Operand(node, 0);
+ if (arg->Is(Type::None())) return Type::None();
+ if (arg->Is(Type::TaggedSigned())) return typer_->singleton_true_;
+ if (arg->Is(Type::TaggedPointer())) return typer_->singleton_false_;
+ return Type::Boolean();
+}
+
+
+// Machine operators.
+
+Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
+
+
+Type* Typer::Visitor::TypeStore(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+Type* Typer::Visitor::TypeWord32And(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Or(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Xor(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Shl(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Shr(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Sar(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Ror(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Equal(Node* node) { return Type::Boolean(); }
+
+
+Type* Typer::Visitor::TypeWord32Clz(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Ctz(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Popcnt(Node* node) {
+ return Type::Integral32();
+}
+
+
+Type* Typer::Visitor::TypeWord64And(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Or(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Xor(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Shl(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Shr(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Sar(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Ror(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Clz(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Ctz(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Popcnt(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Equal(Node* node) { return Type::Boolean(); }
+
+
+Type* Typer::Visitor::TypeInt32Add(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeInt32AddWithOverflow(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeInt32Sub(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeInt32SubWithOverflow(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeInt32Mul(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeInt32MulHigh(Node* node) { return Type::Signed32(); }
+
+
+Type* Typer::Visitor::TypeInt32Div(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeInt32Mod(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeInt32LessThan(Node* node) { return Type::Boolean(); }
+
+
+Type* Typer::Visitor::TypeInt32LessThanOrEqual(Node* node) {
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeUint32Div(Node* node) { return Type::Unsigned32(); }
+
+
+Type* Typer::Visitor::TypeUint32LessThan(Node* node) { return Type::Boolean(); }
+
+
+Type* Typer::Visitor::TypeUint32LessThanOrEqual(Node* node) {
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeUint32Mod(Node* node) { return Type::Unsigned32(); }
+
+
+Type* Typer::Visitor::TypeUint32MulHigh(Node* node) {
+ return Type::Unsigned32();
+}
+
+
+Type* Typer::Visitor::TypeInt64Add(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeInt64AddWithOverflow(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeInt64Sub(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeInt64SubWithOverflow(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeInt64Mul(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeInt64Div(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeInt64Mod(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeInt64LessThan(Node* node) { return Type::Boolean(); }
+
+
+Type* Typer::Visitor::TypeInt64LessThanOrEqual(Node* node) {
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeUint64Div(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeUint64LessThan(Node* node) { return Type::Boolean(); }
+
+
+Type* Typer::Visitor::TypeUint64LessThanOrEqual(Node* node) {
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeUint64Mod(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
+ return Type::Intersect(Type::Number(), Type::UntaggedFloat64(), zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
+ return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
+ zone());
+}
+
+
+Type* Typer::Visitor::TypeTryTruncateFloat32ToInt64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeTryTruncateFloat64ToInt64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeTryTruncateFloat32ToUint64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeTryTruncateFloat64ToUint64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeChangeInt32ToFloat64(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedFloat64(), zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeInt32ToInt64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeChangeUint32ToFloat64(Node* node) {
+ return Type::Intersect(Type::Unsigned32(), Type::UntaggedFloat64(), zone());
+}
+
+
+Type* Typer::Visitor::TypeChangeUint32ToUint64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
+ return Type::Intersect(Type::Number(), Type::UntaggedFloat32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundInt64ToFloat32(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundInt64ToFloat64(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat64(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundUint64ToFloat32(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundUint64ToFloat64(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat64(), zone());
+}
+
+
+Type* Typer::Visitor::TypeBitcastFloat32ToInt32(Node* node) {
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeBitcastFloat64ToInt64(Node* node) {
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeBitcastInt32ToFloat32(Node* node) {
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeBitcastInt64ToFloat64(Node* node) {
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat32Add(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Sub(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Div(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Max(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Min(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Abs(Node* node) {
+ // TODO(turbofan): We should be able to infer a better type here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat32Sqrt(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Equal(Node* node) { return Type::Boolean(); }
+
+
+Type* Typer::Visitor::TypeFloat32LessThan(Node* node) {
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeFloat32LessThanOrEqual(Node* node) {
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeFloat64Add(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat64Sub(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat64Div(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat64Mod(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat64Max(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat64Min(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat64Abs(Node* node) {
+ // TODO(turbofan): We should be able to infer a better type here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64Sqrt(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat64Equal(Node* node) { return Type::Boolean(); }
+
+
+Type* Typer::Visitor::TypeFloat64LessThan(Node* node) {
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeFloat64LessThanOrEqual(Node* node) {
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeFloat32RoundDown(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64RoundDown(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat32RoundUp(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64RoundUp(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat32RoundTruncate(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat32RoundTiesEven(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64RoundTiesEven(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64ExtractLowWord32(Node* node) {
+ return Type::Signed32();
+}
+
+
+Type* Typer::Visitor::TypeFloat64ExtractHighWord32(Node* node) {
+ return Type::Signed32();
+}
+
+
+Type* Typer::Visitor::TypeFloat64InsertLowWord32(Node* node) {
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64InsertHighWord32(Node* node) {
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeLoadStackPointer(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeLoadFramePointer(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
+
+
+Type* Typer::Visitor::TypeCheckedStore(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+// Heap constants.
+
+
+Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
+ if (value->IsJSTypedArray()) {
switch (JSTypedArray::cast(*value)->type()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return typer_->cache_->Get(k##Type##Array);
+ return typer_->cache_.k##Type##Array;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
}
+ if (Type::IsInteger(*value)) {
+ return Type::Range(value->Number(), value->Number(), zone());
+ }
return Type::Constant(value, zone());
}
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index b65a9a5..4177026 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -5,70 +5,70 @@
#ifndef V8_COMPILER_TYPER_H_
#define V8_COMPILER_TYPER_H_
-#include "src/v8.h"
-
+#include "src/base/flags.h"
#include "src/compiler/graph.h"
-#include "src/compiler/opcodes.h"
#include "src/types.h"
namespace v8 {
namespace internal {
-namespace compiler {
// Forward declarations.
-class LazyTypeCache;
+class CompilationDependencies;
+class TypeCache;
+
+namespace compiler {
class Typer {
public:
- explicit Typer(Graph* graph, MaybeHandle<Context> context);
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ Typer(Isolate* isolate, Graph* graph, Flags flags = kNoFlags,
+ CompilationDependencies* dependencies = nullptr,
+ Type::FunctionType* function_type = nullptr);
~Typer();
void Run();
-
- Graph* graph() { return graph_; }
- MaybeHandle<Context> context() { return context_; }
- Zone* zone() { return graph_->zone(); }
- Isolate* isolate() { return zone()->isolate(); }
+ // TODO(bmeurer,jarin): Remove this once we have a notion of "roots" on Graph.
+ void Run(const ZoneVector<Node*>& roots);
private:
class Visitor;
class Decorator;
- Graph* graph_;
- MaybeHandle<Context> context_;
+ Graph* graph() const { return graph_; }
+ Zone* zone() const { return graph()->zone(); }
+ Isolate* isolate() const { return isolate_; }
+ Flags flags() const { return flags_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Type::FunctionType* function_type() const { return function_type_; }
+
+ Isolate* const isolate_;
+ Graph* const graph_;
+ Flags const flags_;
+ CompilationDependencies* const dependencies_;
+ Type::FunctionType* function_type_;
Decorator* decorator_;
+ TypeCache const& cache_;
- Zone* zone_;
- Type* boolean_or_number;
- Type* undefined_or_null;
- Type* undefined_or_number;
- Type* negative_signed32;
- Type* non_negative_signed32;
- Type* singleton_false;
- Type* singleton_true;
- Type* singleton_zero;
- Type* singleton_one;
- Type* zero_or_one;
- Type* zeroish;
- Type* signed32ish;
- Type* unsigned32ish;
- Type* falsish;
- Type* truish;
- Type* integer;
- Type* weakint;
- Type* number_fun0_;
- Type* number_fun1_;
- Type* number_fun2_;
- Type* weakint_fun1_;
- Type* random_fun_;
- LazyTypeCache* cache_;
+ Type* singleton_false_;
+ Type* singleton_true_;
+ Type* singleton_the_hole_;
+ Type* signed32ish_;
+ Type* unsigned32ish_;
+ Type* falsish_;
+ Type* truish_;
- ZoneVector<Handle<Object> > weaken_min_limits_;
- ZoneVector<Handle<Object> > weaken_max_limits_;
DISALLOW_COPY_AND_ASSIGN(Typer);
};
+DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/value-numbering-reducer.cc b/src/compiler/value-numbering-reducer.cc
index 734b3e8..555570d 100644
--- a/src/compiler/value-numbering-reducer.cc
+++ b/src/compiler/value-numbering-reducer.cc
@@ -50,7 +50,7 @@
Reduction ValueNumberingReducer::Reduce(Node* node) {
- if (!node->op()->HasProperty(Operator::kEliminatable)) return NoChange();
+ if (!node->op()->HasProperty(Operator::kIdempotent)) return NoChange();
const size_t hash = HashCode(node);
if (!entries_) {
@@ -135,7 +135,7 @@
Node** const old_entries = entries_;
size_t const old_capacity = capacity_;
capacity_ *= kCapacityToSizeRatio;
- entries_ = zone()->NewArray<Node*>(static_cast<int>(capacity_));
+ entries_ = zone()->NewArray<Node*>(capacity_);
memset(entries_, 0, sizeof(*entries_) * capacity_);
size_ = 0;
size_t const mask = capacity_ - 1;
diff --git a/src/compiler/value-numbering-reducer.h b/src/compiler/value-numbering-reducer.h
index 546226c..822b607 100644
--- a/src/compiler/value-numbering-reducer.h
+++ b/src/compiler/value-numbering-reducer.h
@@ -11,12 +11,12 @@
namespace internal {
namespace compiler {
-class ValueNumberingReducer FINAL : public Reducer {
+class ValueNumberingReducer final : public Reducer {
public:
explicit ValueNumberingReducer(Zone* zone);
~ValueNumberingReducer();
- Reduction Reduce(Node* node) OVERRIDE;
+ Reduction Reduce(Node* node) override;
private:
enum { kInitialCapacity = 256u, kCapacityToSizeRatio = 2u };
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 693b414..1a3ef8e 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -4,23 +4,25 @@
#include "src/compiler/verifier.h"
+#include <algorithm>
#include <deque>
#include <queue>
#include <sstream>
#include <string>
#include "src/bit-vector.h"
-#include "src/compiler/generic-algorithm.h"
-#include "src/compiler/graph-inl.h"
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties.h"
#include "src/compiler/schedule.h"
#include "src/compiler/simplified-operator.h"
#include "src/ostreams.h"
+#include "src/types-inl.h"
namespace v8 {
namespace internal {
@@ -28,97 +30,83 @@
static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
- Node::Uses uses = def->uses();
- for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
- if (*it == use) return true;
- }
- return false;
+ auto const uses = def->uses();
+ return std::find(uses.begin(), uses.end(), use) != uses.end();
}
static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
- Node::Inputs inputs = use->inputs();
- for (Node::Inputs::iterator it = inputs.begin(); it != inputs.end(); ++it) {
- if (*it == def) return true;
- }
- return false;
+ auto const inputs = use->inputs();
+ return std::find(inputs.begin(), inputs.end(), def) != inputs.end();
}
-class Verifier::Visitor : public NullNodeVisitor {
+class Verifier::Visitor {
public:
Visitor(Zone* z, Typing typed) : zone(z), typing(typed) {}
- // Fulfills the PreNodeCallback interface.
- void Pre(Node* node);
+ void Check(Node* node);
Zone* zone;
Typing typing;
private:
- // TODO(rossberg): Get rid of these once we got rid of NodeProperties.
- Bounds bounds(Node* node) { return NodeProperties::GetBounds(node); }
- Node* ValueInput(Node* node, int i = 0) {
- return NodeProperties::GetValueInput(node, i);
- }
- FieldAccess Field(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kLoadField ||
- node->opcode() == IrOpcode::kStoreField);
- return OpParameter<FieldAccess>(node);
- }
- ElementAccess Element(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kLoadElement ||
- node->opcode() == IrOpcode::kStoreElement);
- return OpParameter<ElementAccess>(node);
- }
void CheckNotTyped(Node* node) {
if (NodeProperties::IsTyped(node)) {
std::ostringstream str;
- str << "TypeError: node #" << node->opcode() << ":"
- << node->op()->mnemonic() << " should never have a type";
- V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " should never have a type";
+ FATAL(str.str().c_str());
}
}
void CheckUpperIs(Node* node, Type* type) {
- if (typing == TYPED && !bounds(node).upper->Is(type)) {
+ if (typing == TYPED && !NodeProperties::GetType(node)->Is(type)) {
std::ostringstream str;
- str << "TypeError: node #" << node->opcode() << ":"
- << node->op()->mnemonic() << " upper bound ";
- bounds(node).upper->PrintTo(str);
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " type ";
+ NodeProperties::GetType(node)->PrintTo(str);
str << " is not ";
type->PrintTo(str);
- V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+ FATAL(str.str().c_str());
}
}
void CheckUpperMaybe(Node* node, Type* type) {
- if (typing == TYPED && !bounds(node).upper->Maybe(type)) {
+ if (typing == TYPED && !NodeProperties::GetType(node)->Maybe(type)) {
std::ostringstream str;
- str << "TypeError: node #" << node->opcode() << ":"
- << node->op()->mnemonic() << " upper bound ";
- bounds(node).upper->PrintTo(str);
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " type ";
+ NodeProperties::GetType(node)->PrintTo(str);
str << " must intersect ";
type->PrintTo(str);
- V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+ FATAL(str.str().c_str());
}
}
void CheckValueInputIs(Node* node, int i, Type* type) {
- Node* input = ValueInput(node, i);
- if (typing == TYPED && !bounds(input).upper->Is(type)) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ if (typing == TYPED && !NodeProperties::GetType(input)->Is(type)) {
std::ostringstream str;
- str << "TypeError: node #" << node->opcode() << ":"
- << node->op()->mnemonic() << "(input @" << i << " = "
- << input->opcode() << ":" << input->op()->mnemonic()
- << ") upper bound ";
- bounds(input).upper->PrintTo(str);
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << "(input @" << i << " = " << input->opcode() << ":"
+ << input->op()->mnemonic() << ") type ";
+ NodeProperties::GetType(input)->PrintTo(str);
str << " is not ";
type->PrintTo(str);
- V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+ FATAL(str.str().c_str());
+ }
+ }
+ void CheckOutput(Node* node, Node* use, int count, const char* kind) {
+ if (count <= 0) {
+ std::ostringstream str;
+ str << "GraphError: node #" << node->id() << ":" << *node->op()
+ << " does not produce " << kind << " output used by node #"
+ << use->id() << ":" << *use->op();
+ FATAL(str.str().c_str());
}
}
};
-void Verifier::Visitor::Pre(Node* node) {
+void Verifier::Visitor::Check(Node* node) {
int value_count = node->op()->ValueInputCount();
int context_count = OperatorProperties::GetContextInputCount(node->op());
int frame_state_count =
@@ -132,12 +120,12 @@
CHECK_EQ(input_count, node->InputCount());
// Verify that frame state has been inserted for the nodes that need it.
- if (OperatorProperties::HasFrameStateInput(node->op())) {
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ for (int i = 0; i < frame_state_count; i++) {
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, i);
CHECK(frame_state->opcode() == IrOpcode::kFrameState ||
- // kFrameState uses undefined as a sentinel.
+ // kFrameState uses Start as a sentinel.
(node->opcode() == IrOpcode::kFrameState &&
- frame_state->opcode() == IrOpcode::kHeapConstant));
+ frame_state->opcode() == IrOpcode::kStart));
CHECK(IsDefUseChainLinkPresent(frame_state, node));
CHECK(IsUseDefChainLinkPresent(frame_state, node));
}
@@ -145,7 +133,7 @@
// Verify all value inputs actually produce a value.
for (int i = 0; i < value_count; ++i) {
Node* value = NodeProperties::GetValueInput(node, i);
- CHECK(value->op()->ValueOutputCount() > 0);
+ CheckOutput(value, node, value->op()->ValueOutputCount(), "value");
CHECK(IsDefUseChainLinkPresent(value, node));
CHECK(IsUseDefChainLinkPresent(value, node));
}
@@ -153,7 +141,7 @@
// Verify all context inputs are value nodes.
for (int i = 0; i < context_count; ++i) {
Node* context = NodeProperties::GetContextInput(node);
- CHECK(context->op()->ValueOutputCount() > 0);
+ CheckOutput(context, node, context->op()->ValueOutputCount(), "context");
CHECK(IsDefUseChainLinkPresent(context, node));
CHECK(IsUseDefChainLinkPresent(context, node));
}
@@ -161,7 +149,7 @@
// Verify all effect inputs actually have an effect.
for (int i = 0; i < effect_count; ++i) {
Node* effect = NodeProperties::GetEffectInput(node);
- CHECK(effect->op()->EffectOutputCount() > 0);
+ CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
CHECK(IsDefUseChainLinkPresent(effect, node));
CHECK(IsUseDefChainLinkPresent(effect, node));
}
@@ -169,7 +157,7 @@
// Verify all control inputs are control nodes.
for (int i = 0; i < control_count; ++i) {
Node* control = NodeProperties::GetControlInput(node, i);
- CHECK(control->op()->ControlOutputCount() > 0);
+ CheckOutput(control, node, control->op()->ControlOutputCount(), "control");
CHECK(IsDefUseChainLinkPresent(control, node));
CHECK(IsUseDefChainLinkPresent(control, node));
}
@@ -203,17 +191,18 @@
case IrOpcode::kDead:
// Dead is never connected to the graph.
UNREACHABLE();
+ break;
case IrOpcode::kBranch: {
// Branch uses are IfTrue and IfFalse.
- Node::Uses uses = node->uses();
int count_true = 0, count_false = 0;
- for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
- CHECK((*it)->opcode() == IrOpcode::kIfTrue ||
- (*it)->opcode() == IrOpcode::kIfFalse);
- if ((*it)->opcode() == IrOpcode::kIfTrue) ++count_true;
- if ((*it)->opcode() == IrOpcode::kIfFalse) ++count_false;
+ for (auto use : node->uses()) {
+ CHECK(use->opcode() == IrOpcode::kIfTrue ||
+ use->opcode() == IrOpcode::kIfFalse);
+ if (use->opcode() == IrOpcode::kIfTrue) ++count_true;
+ if (use->opcode() == IrOpcode::kIfFalse) ++count_false;
}
- CHECK(count_true == 1 && count_false == 1);
+ CHECK_EQ(1, count_true);
+ CHECK_EQ(1, count_false);
// Type is empty.
CheckNotTyped(node);
break;
@@ -225,27 +214,99 @@
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kIfSuccess: {
+ // IfSuccess and IfException continuation only on throwing nodes.
+ Node* input = NodeProperties::GetControlInput(node, 0);
+ CHECK(!input->op()->HasProperty(Operator::kNoThrow));
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
+ }
+ case IrOpcode::kIfException: {
+ // IfSuccess and IfException continuation only on throwing nodes.
+ Node* input = NodeProperties::GetControlInput(node, 0);
+ CHECK(!input->op()->HasProperty(Operator::kNoThrow));
+ // Type can be anything.
+ CheckUpperIs(node, Type::Any());
+ break;
+ }
+ case IrOpcode::kSwitch: {
+ // Switch uses are Case and Default.
+ int count_case = 0, count_default = 0;
+ for (auto use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfValue: {
+ for (auto user : node->uses()) {
+ if (user != use && user->opcode() == IrOpcode::kIfValue) {
+ CHECK_NE(OpParameter<int32_t>(use->op()),
+ OpParameter<int32_t>(user->op()));
+ }
+ }
+ ++count_case;
+ break;
+ }
+ case IrOpcode::kIfDefault: {
+ ++count_default;
+ break;
+ }
+ default: {
+ V8_Fatal(__FILE__, __LINE__, "Switch #%d illegally used by #%d:%s",
+ node->id(), use->id(), use->op()->mnemonic());
+ break;
+ }
+ }
+ }
+ CHECK_EQ(1, count_default);
+ CHECK_EQ(node->op()->ControlOutputCount(), count_case + count_default);
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
+ }
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
+ CHECK_EQ(IrOpcode::kSwitch,
+ NodeProperties::GetControlInput(node)->opcode());
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
case IrOpcode::kLoop:
case IrOpcode::kMerge:
CHECK_EQ(control_count, input_count);
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kDeoptimize:
case IrOpcode::kReturn:
- // TODO(rossberg): check successor is End
- // Type is empty.
- CheckNotTyped(node);
- break;
case IrOpcode::kThrow:
- // TODO(rossberg): what are the constraints on these?
+ // Deoptimize, Return and Throw uses are End.
+ for (auto use : node->uses()) {
+ CHECK_EQ(IrOpcode::kEnd, use->opcode());
+ }
// Type is empty.
CheckNotTyped(node);
break;
case IrOpcode::kTerminate:
+ // Terminates take one loop and effect.
+ CHECK_EQ(1, control_count);
+ CHECK_EQ(1, effect_count);
+ CHECK_EQ(2, input_count);
+ CHECK_EQ(IrOpcode::kLoop,
+ NodeProperties::GetControlInput(node)->opcode());
+ // Terminate uses are End.
+ for (auto use : node->uses()) {
+ CHECK_EQ(IrOpcode::kEnd, use->opcode());
+ }
// Type is empty.
CheckNotTyped(node);
+ break;
+ case IrOpcode::kOsrNormalEntry:
+ case IrOpcode::kOsrLoopEntry:
+ // Osr entries take one control and effect.
CHECK_EQ(1, control_count);
- CHECK_EQ(input_count, 1 + effect_count);
+ CHECK_EQ(1, effect_count);
+ CHECK_EQ(2, input_count);
+ // Type is empty.
+ CheckNotTyped(node);
break;
// Common operators
@@ -253,13 +314,13 @@
case IrOpcode::kParameter: {
// Parameters have the start node as inputs.
CHECK_EQ(1, input_count);
- CHECK_EQ(IrOpcode::kStart,
- NodeProperties::GetValueInput(node, 0)->opcode());
// Parameter has an input that produces enough values.
- int index = OpParameter<int>(node);
- Node* input = NodeProperties::GetValueInput(node, 0);
+ int const index = ParameterIndexOf(node->op());
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ CHECK_EQ(IrOpcode::kStart, start->opcode());
// Currently, parameter indices start at -1 instead of 0.
- CHECK_GT(input->op()->ValueOutputCount(), index + 1);
+ CHECK_LE(-1, index);
+ CHECK_LT(index + 1, start->op()->ValueOutputCount());
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
@@ -297,9 +358,16 @@
// Type is considered internal.
CheckUpperIs(node, Type::Internal());
break;
+ case IrOpcode::kOsrValue:
+ // OSR values have a value and a control input.
+ CHECK_EQ(1, control_count);
+ CHECK_EQ(1, input_count);
+ // Type is merged from other values in the graph and could be any.
+ CheckUpperIs(node, Type::Any());
+ break;
case IrOpcode::kProjection: {
// Projection has an input that produces enough values.
- int index = static_cast<int>(OpParameter<size_t>(node->op()));
+ int index = static_cast<int>(ProjectionIndexOf(node->op()));
Node* input = NodeProperties::GetValueInput(node, 0);
CHECK_GT(input->op()->ValueOutputCount(), index);
// Type can be anything.
@@ -325,9 +393,7 @@
// TODO(rossberg): for now at least, narrowing does not really hold.
/*
for (int i = 0; i < value_count; ++i) {
- // TODO(rossberg, jarin): Figure out what to do about lower bounds.
- // CHECK(bounds(node).lower->Is(bounds(ValueInput(node, i)).lower));
- CHECK(bounds(ValueInput(node, i)).upper->Is(bounds(node).upper));
+ CHECK(type_of(ValueInput(node, i))->Is(type_of(node)));
}
*/
break;
@@ -341,27 +407,45 @@
CHECK_EQ(input_count, 1 + effect_count);
break;
}
- case IrOpcode::kValueEffect:
+ case IrOpcode::kEffectSet: {
+ CHECK_EQ(0, value_count);
+ CHECK_EQ(0, control_count);
+ CHECK_LT(1, effect_count);
+ break;
+ }
+ case IrOpcode::kGuard:
+ // TODO(bmeurer): what are the constraints on these?
+ break;
+ case IrOpcode::kBeginRegion:
// TODO(rossberg): what are the constraints on these?
break;
- case IrOpcode::kFinish: {
+ case IrOpcode::kFinishRegion: {
// TODO(rossberg): what are the constraints on these?
// Type must be subsumed by input type.
if (typing == TYPED) {
- CHECK(bounds(ValueInput(node)).lower->Is(bounds(node).lower));
- CHECK(bounds(ValueInput(node)).upper->Is(bounds(node).upper));
+ Node* val = NodeProperties::GetValueInput(node, 0);
+ CHECK(NodeProperties::GetType(val)->Is(NodeProperties::GetType(node)));
}
break;
}
case IrOpcode::kFrameState:
// TODO(jarin): what are the constraints on these?
+ CHECK_EQ(5, value_count);
+ CHECK_EQ(0, control_count);
+ CHECK_EQ(0, effect_count);
+ CHECK_EQ(6, input_count);
break;
case IrOpcode::kStateValues:
+ case IrOpcode::kObjectState:
+ case IrOpcode::kTypedStateValues:
// TODO(jarin): what are the constraints on these?
break;
case IrOpcode::kCall:
// TODO(rossberg): what are the constraints on these?
break;
+ case IrOpcode::kTailCall:
+ // TODO(bmeurer): what are the constraints on these?
+ break;
// JavaScript operators
// --------------------
@@ -373,7 +457,6 @@
case IrOpcode::kJSGreaterThan:
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSGreaterThanOrEqual:
- case IrOpcode::kJSUnaryNot:
// Type is Boolean.
CheckUpperIs(node, Type::Boolean());
break;
@@ -424,13 +507,37 @@
// Type is Object.
CheckUpperIs(node, Type::Object());
break;
+ case IrOpcode::kJSCreateArguments:
+ // Type is OtherObject.
+ CheckUpperIs(node, Type::OtherObject());
+ break;
+ case IrOpcode::kJSCreateArray:
+ // Type is OtherObject.
+ CheckUpperIs(node, Type::OtherObject());
+ break;
+ case IrOpcode::kJSCreateClosure:
+ // Type is Function.
+ CheckUpperIs(node, Type::Function());
+ break;
+ case IrOpcode::kJSCreateIterResultObject:
+ // Type is OtherObject.
+ CheckUpperIs(node, Type::OtherObject());
+ break;
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ // Type is OtherObject.
+ CheckUpperIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSLoadGlobal:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSStoreNamed:
+ case IrOpcode::kJSStoreGlobal:
// Type is empty.
CheckNotTyped(node);
break;
@@ -446,6 +553,7 @@
break;
case IrOpcode::kJSLoadContext:
+ case IrOpcode::kJSLoadDynamic:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
@@ -464,28 +572,56 @@
// TODO(rossberg): This should really be Is(Internal), but the typer
// currently can't do backwards propagation.
CheckUpperMaybe(context, Type::Internal());
- if (typing == TYPED) CHECK(bounds(node).upper->IsContext());
+ if (typing == TYPED) CHECK(NodeProperties::GetType(node)->IsContext());
break;
}
case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSConvertReceiver:
// Type is Receiver.
CheckUpperIs(node, Type::Receiver());
break;
case IrOpcode::kJSCallFunction:
case IrOpcode::kJSCallRuntime:
case IrOpcode::kJSYield:
- case IrOpcode::kJSDebugger:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
+ case IrOpcode::kJSForInPrepare: {
+ // TODO(bmeurer): What are the constraints on thse?
+ CheckUpperIs(node, Type::Any());
+ break;
+ }
+ case IrOpcode::kJSForInDone: {
+ // TODO(bmeurer): OSR breaks this invariant, although the node is not user
+ // visible, so we know it is safe (fullcodegen has an unsigned smi there).
+ // CheckValueInputIs(node, 0, Type::UnsignedSmall());
+ break;
+ }
+ case IrOpcode::kJSForInNext: {
+ CheckUpperIs(node, Type::Union(Type::Name(), Type::Undefined(), zone));
+ break;
+ }
+ case IrOpcode::kJSForInStep: {
+ // TODO(bmeurer): OSR breaks this invariant, although the node is not user
+ // visible, so we know it is safe (fullcodegen has an unsigned smi there).
+ // CheckValueInputIs(node, 0, Type::UnsignedSmall());
+ CheckUpperIs(node, Type::UnsignedSmall());
+ break;
+ }
+
+ case IrOpcode::kJSLoadMessage:
+ case IrOpcode::kJSStoreMessage:
+ break;
+
+ case IrOpcode::kJSStackCheck:
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
+
// Simplified operators
// -------------------------------
- case IrOpcode::kAnyToBoolean:
- // Type is Boolean.
- CheckUpperIs(node, Type::Boolean());
- break;
case IrOpcode::kBooleanNot:
// Boolean -> Boolean
CheckValueInputIs(node, 0, Type::Boolean());
@@ -515,6 +651,27 @@
// TODO(rossberg): activate once we retype after opcode changes.
// CheckUpperIs(node, Type::Number());
break;
+ case IrOpcode::kNumberBitwiseOr:
+ case IrOpcode::kNumberBitwiseXor:
+ case IrOpcode::kNumberBitwiseAnd:
+ // (Signed32, Signed32) -> Signed32
+ CheckValueInputIs(node, 0, Type::Signed32());
+ CheckValueInputIs(node, 1, Type::Signed32());
+ CheckUpperIs(node, Type::Signed32());
+ break;
+ case IrOpcode::kNumberShiftLeft:
+ case IrOpcode::kNumberShiftRight:
+ // (Signed32, Unsigned32) -> Signed32
+ CheckValueInputIs(node, 0, Type::Signed32());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckUpperIs(node, Type::Signed32());
+ break;
+ case IrOpcode::kNumberShiftRightLogical:
+ // (Unsigned32, Unsigned32) -> Unsigned32
+ CheckValueInputIs(node, 0, Type::Unsigned32());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckUpperIs(node, Type::Unsigned32());
+ break;
case IrOpcode::kNumberToInt32:
// Number -> Signed32
CheckValueInputIs(node, 0, Type::Number());
@@ -525,6 +682,16 @@
CheckValueInputIs(node, 0, Type::Number());
CheckUpperIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kNumberIsHoleNaN:
+ // Number -> Boolean
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckUpperIs(node, Type::Boolean());
+ break;
+ case IrOpcode::kPlainPrimitiveToNumber:
+ // PlainPrimitive -> Number
+ CheckValueInputIs(node, 0, Type::PlainPrimitive());
+ CheckUpperIs(node, Type::Number());
+ break;
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual:
@@ -533,29 +700,20 @@
CheckValueInputIs(node, 1, Type::String());
CheckUpperIs(node, Type::Boolean());
break;
- case IrOpcode::kStringAdd:
- // (String, String) -> String
- CheckValueInputIs(node, 0, Type::String());
- CheckValueInputIs(node, 1, Type::String());
- CheckUpperIs(node, Type::String());
- break;
case IrOpcode::kReferenceEqual: {
// (Unique, Any) -> Boolean and
// (Any, Unique) -> Boolean
- if (typing == TYPED) {
- CHECK(bounds(ValueInput(node, 0)).upper->Is(Type::Unique()) ||
- bounds(ValueInput(node, 1)).upper->Is(Type::Unique()));
- }
CheckUpperIs(node, Type::Boolean());
break;
}
+ case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsSmi:
CheckValueInputIs(node, 0, Type::Any());
CheckUpperIs(node, Type::Boolean());
break;
- case IrOpcode::kObjectIsNonNegativeSmi:
- CheckValueInputIs(node, 0, Type::Any());
- CheckUpperIs(node, Type::Boolean());
+ case IrOpcode::kAllocate:
+ CheckValueInputIs(node, 0, Type::PlainNumber());
+ CheckUpperIs(node, Type::TaggedPointer());
break;
case IrOpcode::kChangeTaggedToInt32: {
@@ -635,7 +793,7 @@
// Object -> fieldtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
- // CheckUpperIs(node, Field(node).type));
+ // CheckUpperIs(node, FieldAccessOf(node->op()).type));
break;
case IrOpcode::kLoadBuffer:
break;
@@ -643,13 +801,13 @@
// Object -> elementtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
- // CheckUpperIs(node, Element(node).type));
+ // CheckUpperIs(node, ElementAccessOf(node->op()).type));
break;
case IrOpcode::kStoreField:
// (Object, fieldtype) -> _|_
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
- // CheckValueInputIs(node, 1, Field(node).type));
+ // CheckValueInputIs(node, 1, FieldAccessOf(node->op()).type));
CheckNotTyped(node);
break;
case IrOpcode::kStoreBuffer:
@@ -658,7 +816,7 @@
// (Object, elementtype) -> _|_
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
- // CheckValueInputIs(node, 1, Element(node).type));
+ // CheckValueInputIs(node, 1, ElementAccessOf(node->op()).type));
CheckNotTyped(node);
break;
@@ -674,6 +832,9 @@
case IrOpcode::kWord32Sar:
case IrOpcode::kWord32Ror:
case IrOpcode::kWord32Equal:
+ case IrOpcode::kWord32Clz:
+ case IrOpcode::kWord32Ctz:
+ case IrOpcode::kWord32Popcnt:
case IrOpcode::kWord64And:
case IrOpcode::kWord64Or:
case IrOpcode::kWord64Xor:
@@ -681,6 +842,9 @@
case IrOpcode::kWord64Shr:
case IrOpcode::kWord64Sar:
case IrOpcode::kWord64Ror:
+ case IrOpcode::kWord64Clz:
+ case IrOpcode::kWord64Popcnt:
+ case IrOpcode::kWord64Ctz:
case IrOpcode::kWord64Equal:
case IrOpcode::kInt32Add:
case IrOpcode::kInt32AddWithOverflow:
@@ -698,7 +862,9 @@
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kInt64Add:
+ case IrOpcode::kInt64AddWithOverflow:
case IrOpcode::kInt64Sub:
+ case IrOpcode::kInt64SubWithOverflow:
case IrOpcode::kInt64Mul:
case IrOpcode::kInt64Div:
case IrOpcode::kInt64Mod:
@@ -707,22 +873,50 @@
case IrOpcode::kUint64Div:
case IrOpcode::kUint64Mod:
case IrOpcode::kUint64LessThan:
+ case IrOpcode::kUint64LessThanOrEqual:
+ case IrOpcode::kFloat32Add:
+ case IrOpcode::kFloat32Sub:
+ case IrOpcode::kFloat32Mul:
+ case IrOpcode::kFloat32Div:
+ case IrOpcode::kFloat32Max:
+ case IrOpcode::kFloat32Min:
+ case IrOpcode::kFloat32Abs:
+ case IrOpcode::kFloat32Sqrt:
+ case IrOpcode::kFloat32Equal:
+ case IrOpcode::kFloat32LessThan:
+ case IrOpcode::kFloat32LessThanOrEqual:
case IrOpcode::kFloat64Add:
case IrOpcode::kFloat64Sub:
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
+ case IrOpcode::kFloat64Max:
+ case IrOpcode::kFloat64Min:
+ case IrOpcode::kFloat64Abs:
case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64Floor:
- case IrOpcode::kFloat64Ceil:
+ case IrOpcode::kFloat32RoundDown:
+ case IrOpcode::kFloat64RoundDown:
+ case IrOpcode::kFloat32RoundUp:
+ case IrOpcode::kFloat64RoundUp:
+ case IrOpcode::kFloat32RoundTruncate:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
+ case IrOpcode::kFloat32RoundTiesEven:
+ case IrOpcode::kFloat64RoundTiesEven:
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
case IrOpcode::kTruncateInt64ToInt32:
+ case IrOpcode::kRoundInt64ToFloat32:
+ case IrOpcode::kRoundInt64ToFloat64:
+ case IrOpcode::kRoundUint64ToFloat64:
+ case IrOpcode::kRoundUint64ToFloat32:
case IrOpcode::kTruncateFloat64ToFloat32:
case IrOpcode::kTruncateFloat64ToInt32:
+ case IrOpcode::kBitcastFloat32ToInt32:
+ case IrOpcode::kBitcastFloat64ToInt64:
+ case IrOpcode::kBitcastInt32ToFloat32:
+ case IrOpcode::kBitcastInt64ToFloat64:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
case IrOpcode::kChangeInt32ToFloat64:
@@ -730,20 +924,46 @@
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ case IrOpcode::kTryTruncateFloat64ToUint64:
+ case IrOpcode::kFloat64ExtractLowWord32:
+ case IrOpcode::kFloat64ExtractHighWord32:
+ case IrOpcode::kFloat64InsertLowWord32:
+ case IrOpcode::kFloat64InsertHighWord32:
case IrOpcode::kLoadStackPointer:
+ case IrOpcode::kLoadFramePointer:
case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore:
// TODO(rossberg): Check.
break;
}
-}
+} // NOLINT(readability/fn_size)
void Verifier::Run(Graph* graph, Typing typing) {
- Visitor visitor(graph->zone(), typing);
- CHECK_NE(NULL, graph->start());
- CHECK_NE(NULL, graph->end());
- graph->VisitNodeInputsFromEnd(&visitor);
+ CHECK_NOT_NULL(graph->start());
+ CHECK_NOT_NULL(graph->end());
+ Zone zone;
+ Visitor visitor(&zone, typing);
+ AllNodes all(&zone, graph);
+ for (Node* node : all.live) visitor.Check(node);
+
+ // Check the uniqueness of projections.
+ for (Node* proj : all.live) {
+ if (proj->opcode() != IrOpcode::kProjection) continue;
+ Node* node = proj->InputAt(0);
+ for (Node* other : node->uses()) {
+ if (all.IsLive(other) && other != proj &&
+ other->opcode() == IrOpcode::kProjection &&
+ ProjectionIndexOf(other->op()) == ProjectionIndexOf(proj->op())) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Node #%d:%s has duplicate projections #%d and #%d",
+ node->id(), node->op()->mnemonic(), proj->id(), other->id());
+ }
+ }
+ }
}
@@ -759,7 +979,7 @@
use_pos--;
}
block = block->dominator();
- if (block == NULL) break;
+ if (block == nullptr) break;
use_pos = static_cast<int>(block->NodeCount()) - 1;
if (node == block->control_input()) return true;
}
@@ -770,7 +990,7 @@
static bool Dominates(Schedule* schedule, Node* dominator, Node* dominatee) {
BasicBlock* dom = schedule->block(dominator);
BasicBlock* sub = schedule->block(dominatee);
- while (sub != NULL) {
+ while (sub != nullptr) {
if (sub == dom) {
return true;
}
@@ -793,7 +1013,7 @@
use_pos)) {
V8_Fatal(__FILE__, __LINE__,
"Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
- node->id(), node->op()->mnemonic(), block->id().ToInt(), j,
+ node->id(), node->op()->mnemonic(), block->rpo_number(), j,
input->id(), input->op()->mnemonic());
}
}
@@ -806,8 +1026,8 @@
if (!Dominates(schedule, ctl, node)) {
V8_Fatal(__FILE__, __LINE__,
"Node #%d:%s in B%d is not dominated by control input #%d:%s",
- node->id(), node->op()->mnemonic(), block->id(), ctl->id(),
- ctl->op()->mnemonic());
+ node->id(), node->op()->mnemonic(), block->rpo_number(),
+ ctl->id(), ctl->op()->mnemonic());
}
}
}
@@ -815,7 +1035,7 @@
void ScheduleVerifier::Run(Schedule* schedule) {
const size_t count = schedule->BasicBlockCount();
- Zone tmp_zone(schedule->zone()->isolate());
+ Zone tmp_zone;
Zone* zone = &tmp_zone;
BasicBlock* start = schedule->start();
BasicBlockVector* rpo_order = schedule->rpo_order();
@@ -826,15 +1046,13 @@
++b) {
CHECK_EQ((*b), schedule->GetBlockById((*b)->id()));
// All predecessors and successors should be in rpo and in this schedule.
- for (BasicBlock::Predecessors::iterator j = (*b)->predecessors_begin();
- j != (*b)->predecessors_end(); ++j) {
- CHECK_GE((*j)->rpo_number(), 0);
- CHECK_EQ((*j), schedule->GetBlockById((*j)->id()));
+ for (BasicBlock const* predecessor : (*b)->predecessors()) {
+ CHECK_GE(predecessor->rpo_number(), 0);
+ CHECK_EQ(predecessor, schedule->GetBlockById(predecessor->id()));
}
- for (BasicBlock::Successors::iterator j = (*b)->successors_begin();
- j != (*b)->successors_end(); ++j) {
- CHECK_GE((*j)->rpo_number(), 0);
- CHECK_EQ((*j), schedule->GetBlockById((*j)->id()));
+ for (BasicBlock const* successor : (*b)->successors()) {
+ CHECK_GE(successor->rpo_number(), 0);
+ CHECK_EQ(successor, schedule->GetBlockById(successor->id()));
}
}
@@ -846,10 +1064,10 @@
BasicBlock* dom = block->dominator();
if (b == 0) {
// All blocks except start should have a dominator.
- CHECK_EQ(NULL, dom);
+ CHECK_NULL(dom);
} else {
// Check that the immediate dominator appears somewhere before the block.
- CHECK_NE(NULL, dom);
+ CHECK_NOT_NULL(dom);
CHECK_LT(dom->rpo_number(), block->rpo_number());
}
}
@@ -888,7 +1106,7 @@
{
// Verify the dominance relation.
ZoneVector<BitVector*> dominators(zone);
- dominators.resize(count, NULL);
+ dominators.resize(count, nullptr);
// Compute a set of all the nodes that dominate a given node by using
// a forward fixpoint. O(n^2).
@@ -901,15 +1119,15 @@
queue.pop();
BitVector* block_doms = dominators[block->id().ToSize()];
BasicBlock* idom = block->dominator();
- if (idom != NULL && !block_doms->Contains(idom->id().ToInt())) {
+ if (idom != nullptr && !block_doms->Contains(idom->id().ToInt())) {
V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
- block->id().ToInt(), idom->id().ToInt());
+ block->rpo_number(), idom->rpo_number());
}
for (size_t s = 0; s < block->SuccessorCount(); s++) {
BasicBlock* succ = block->SuccessorAt(s);
BitVector* succ_doms = dominators[succ->id().ToSize()];
- if (succ_doms == NULL) {
+ if (succ_doms == nullptr) {
// First time visiting the node. S.doms = B U B.doms
succ_doms = new (zone) BitVector(static_cast<int>(count), zone);
succ_doms->CopyFrom(*block_doms);
@@ -931,7 +1149,7 @@
b != rpo_order->end(); ++b) {
BasicBlock* block = *b;
BasicBlock* idom = block->dominator();
- if (idom == NULL) continue;
+ if (idom == nullptr) continue;
BitVector* block_doms = dominators[block->id().ToSize()];
for (BitVector::Iterator it(block_doms); !it.Done(); it.Advance()) {
@@ -941,7 +1159,7 @@
!dominators[idom->id().ToSize()]->Contains(dom->id().ToInt())) {
V8_Fatal(__FILE__, __LINE__,
"Block B%d is not immediately dominated by B%d",
- block->id().ToInt(), idom->id().ToInt());
+ block->rpo_number(), idom->rpo_number());
}
}
}
@@ -971,7 +1189,7 @@
// Check inputs to control for this block.
Node* control = block->control_input();
- if (control != NULL) {
+ if (control != nullptr) {
CHECK_EQ(block, schedule->block(control));
CheckInputsDominate(schedule, block, control,
static_cast<int>(block->NodeCount()) - 1);
@@ -983,6 +1201,68 @@
}
}
}
+
+
+#ifdef DEBUG
+
+// static
+void Verifier::VerifyNode(Node* node) {
+ CHECK_EQ(OperatorProperties::GetTotalInputCount(node->op()),
+ node->InputCount());
+ // If this node has no effect or no control outputs,
+ // we check that no its uses are effect or control inputs.
+ bool check_no_control = node->op()->ControlOutputCount() == 0;
+ bool check_no_effect = node->op()->EffectOutputCount() == 0;
+ bool check_no_frame_state = node->opcode() != IrOpcode::kFrameState;
+ if (check_no_effect || check_no_control) {
+ for (Edge edge : node->use_edges()) {
+ Node* const user = edge.from();
+ CHECK(!user->IsDead());
+ if (NodeProperties::IsControlEdge(edge)) {
+ CHECK(!check_no_control);
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ CHECK(!check_no_effect);
+ } else if (NodeProperties::IsFrameStateEdge(edge)) {
+ CHECK(!check_no_frame_state);
+ }
+ }
+ }
+ // Frame state inputs should be frame states (or sentinels).
+ for (int i = 0; i < OperatorProperties::GetFrameStateInputCount(node->op());
+ i++) {
+ Node* input = NodeProperties::GetFrameStateInput(node, i);
+ CHECK(input->opcode() == IrOpcode::kFrameState ||
+ input->opcode() == IrOpcode::kStart ||
+ input->opcode() == IrOpcode::kDead);
+ }
+ // Effect inputs should be effect-producing nodes (or sentinels).
+ for (int i = 0; i < node->op()->EffectInputCount(); i++) {
+ Node* input = NodeProperties::GetEffectInput(node, i);
+ CHECK(input->op()->EffectOutputCount() > 0 ||
+ input->opcode() == IrOpcode::kDead);
+ }
+ // Control inputs should be control-producing nodes (or sentinels).
+ for (int i = 0; i < node->op()->ControlInputCount(); i++) {
+ Node* input = NodeProperties::GetControlInput(node, i);
+ CHECK(input->op()->ControlOutputCount() > 0 ||
+ input->opcode() == IrOpcode::kDead);
+ }
}
+
+
+void Verifier::VerifyEdgeInputReplacement(const Edge& edge,
+ const Node* replacement) {
+ // Check that the user does not misuse the replacement.
+ DCHECK(!NodeProperties::IsControlEdge(edge) ||
+ replacement->op()->ControlOutputCount() > 0);
+ DCHECK(!NodeProperties::IsEffectEdge(edge) ||
+ replacement->op()->EffectOutputCount() > 0);
+ DCHECK(!NodeProperties::IsFrameStateEdge(edge) ||
+ replacement->opcode() == IrOpcode::kFrameState);
}
-} // namespace v8::internal::compiler
+
+#endif // DEBUG
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/verifier.h b/src/compiler/verifier.h
index 67b7ba6..428558d 100644
--- a/src/compiler/verifier.h
+++ b/src/compiler/verifier.h
@@ -5,13 +5,15 @@
#ifndef V8_COMPILER_VERIFIER_H_
#define V8_COMPILER_VERIFIER_H_
-#include "src/v8.h"
+#include "src/base/macros.h"
namespace v8 {
namespace internal {
namespace compiler {
class Graph;
+class Edge;
+class Node;
class Schedule;
// Verifies properties of a graph, such as the well-formedness of inputs to
@@ -22,6 +24,28 @@
static void Run(Graph* graph, Typing typing = TYPED);
+#ifdef DEBUG
+ // Verifies consistency of node inputs and uses:
+ // - node inputs should agree with the input count computed from
+ // the node's operator.
+ // - effect inputs should have effect outputs.
+ // - control inputs should have control outputs.
+ // - frame state inputs should be frame states.
+ // - if the node has control uses, it should produce control.
+ // - if the node has effect uses, it should produce effect.
+ // - if the node has frame state uses, it must be a frame state.
+ static void VerifyNode(Node* node);
+
+ // Verify that {replacement} has the required outputs
+ // (effect, control or frame state) to be used as an input for {edge}.
+ static void VerifyEdgeInputReplacement(const Edge& edge,
+ const Node* replacement);
+#else
+ static void VerifyNode(Node* node) {}
+ static void VerifyEdgeInputReplacement(const Edge& edge,
+ const Node* replacement) {}
+#endif // DEBUG
+
private:
class Visitor;
DISALLOW_COPY_AND_ASSIGN(Verifier);
@@ -32,8 +56,8 @@
public:
static void Run(Schedule* schedule);
};
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_VERIFIER_H_
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
new file mode 100644
index 0000000..17065d6
--- /dev/null
+++ b/src/compiler/wasm-compiler.cc
@@ -0,0 +1,2031 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-compiler.h"
+
+#include "src/isolate-inl.h"
+
+#include "src/base/platform/platform.h"
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/source-position.h"
+#include "src/compiler/typer.h"
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+// TODO(titzer): pull WASM_64 up to a common header.
+#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
+#define WASM_64 1
+#else
+#define WASM_64 0
+#endif
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+const Operator* UnsupportedOpcode(wasm::WasmOpcode opcode) {
+ if (wasm::WasmOpcodes::IsSupported(opcode)) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Unsupported opcode #%d:%s reported as supported", opcode,
+ wasm::WasmOpcodes::OpcodeName(opcode));
+ }
+ V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", opcode,
+ wasm::WasmOpcodes::OpcodeName(opcode));
+ return nullptr;
+}
+
+
+void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
+ Graph* g = jsgraph->graph();
+ if (g->end()) {
+ NodeProperties::MergeControlToEnd(g, jsgraph->common(), node);
+ } else {
+ g->SetEnd(g->NewNode(jsgraph->common()->End(1), node));
+ }
+}
+
+
+enum TrapReason {
+ kTrapUnreachable,
+ kTrapMemOutOfBounds,
+ kTrapDivByZero,
+ kTrapDivUnrepresentable,
+ kTrapRemByZero,
+ kTrapFloatUnrepresentable,
+ kTrapFuncInvalid,
+ kTrapFuncSigMismatch,
+ kTrapCount
+};
+
+
+static const char* kTrapMessages[] = {
+ "unreachable", "memory access out of bounds",
+ "divide by zero", "divide result unrepresentable",
+ "remainder by zero", "integer result unrepresentable",
+ "invalid function", "function signature mismatch"};
+} // namespace
+
+
+// A helper that handles building graph fragments for trapping.
+// To avoid generating a ton of redundant code that just calls the runtime
+// to trap, we generate a per-trap-reason block of code that all trap sites
+// in this function will branch to.
+class WasmTrapHelper : public ZoneObject {
+ public:
+ explicit WasmTrapHelper(WasmGraphBuilder* builder)
+ : builder_(builder),
+ jsgraph_(builder->jsgraph()),
+ graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {
+ for (int i = 0; i < kTrapCount; i++) traps_[i] = nullptr;
+ }
+
+ // Make the current control path trap to unreachable.
+ void Unreachable() { ConnectTrap(kTrapUnreachable); }
+
+ // Add a check that traps if {node} is equal to {val}.
+ Node* TrapIfEq32(TrapReason reason, Node* node, int32_t val) {
+ Int32Matcher m(node);
+ if (m.HasValue() && !m.Is(val)) return graph()->start();
+ if (val == 0) {
+ AddTrapIfFalse(reason, node);
+ } else {
+ AddTrapIfTrue(reason,
+ graph()->NewNode(jsgraph()->machine()->Word32Equal(), node,
+ jsgraph()->Int32Constant(val)));
+ }
+ return builder_->Control();
+ }
+
+ // Add a check that traps if {node} is zero.
+ Node* ZeroCheck32(TrapReason reason, Node* node) {
+ return TrapIfEq32(reason, node, 0);
+ }
+
+ // Add a check that traps if {node} is equal to {val}.
+ Node* TrapIfEq64(TrapReason reason, Node* node, int64_t val) {
+ Int64Matcher m(node);
+ if (m.HasValue() && !m.Is(val)) return graph()->start();
+ AddTrapIfTrue(reason,
+ graph()->NewNode(jsgraph()->machine()->Word64Equal(), node,
+ jsgraph()->Int64Constant(val)));
+ return builder_->Control();
+ }
+
+ // Add a check that traps if {node} is zero.
+ Node* ZeroCheck64(TrapReason reason, Node* node) {
+ return TrapIfEq64(reason, node, 0);
+ }
+
+ // Add a trap if {cond} is true.
+ void AddTrapIfTrue(TrapReason reason, Node* cond) {
+ AddTrapIf(reason, cond, true);
+ }
+
+ // Add a trap if {cond} is false.
+ void AddTrapIfFalse(TrapReason reason, Node* cond) {
+ AddTrapIf(reason, cond, false);
+ }
+
+ // Add a trap if {cond} is true or false according to {iftrue}.
+ void AddTrapIf(TrapReason reason, Node* cond, bool iftrue) {
+ Node** effect_ptr = builder_->effect_;
+ Node** control_ptr = builder_->control_;
+ Node* before = *effect_ptr;
+ BranchHint hint = iftrue ? BranchHint::kFalse : BranchHint::kTrue;
+ Node* branch = graph()->NewNode(common()->Branch(hint), cond, *control_ptr);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+ *control_ptr = iftrue ? if_true : if_false;
+ ConnectTrap(reason);
+ *control_ptr = iftrue ? if_false : if_true;
+ *effect_ptr = before;
+ }
+
+ private:
+ WasmGraphBuilder* builder_;
+ JSGraph* jsgraph_;
+ Graph* graph_;
+ Node* traps_[kTrapCount];
+ Node* effects_[kTrapCount];
+
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph() { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() { return jsgraph()->common(); }
+
+ void ConnectTrap(TrapReason reason) {
+ if (traps_[reason] == nullptr) {
+ // Create trap code for the first time this trap is used.
+ return BuildTrapCode(reason);
+ }
+ // Connect the current control and effect to the existing trap code.
+ builder_->AppendToMerge(traps_[reason], builder_->Control());
+ builder_->AppendToPhi(traps_[reason], effects_[reason], builder_->Effect());
+ }
+
+ void BuildTrapCode(TrapReason reason) {
+ Node* exception = builder_->String(kTrapMessages[reason]);
+ Node* end;
+ Node** control_ptr = builder_->control_;
+ Node** effect_ptr = builder_->effect_;
+ wasm::ModuleEnv* module = builder_->module_;
+ *control_ptr = traps_[reason] =
+ graph()->NewNode(common()->Merge(1), *control_ptr);
+ *effect_ptr = effects_[reason] =
+ graph()->NewNode(common()->EffectPhi(1), *effect_ptr, *control_ptr);
+
+ if (module && !module->context.is_null()) {
+ // Use the module context to call the runtime to throw an exception.
+ Runtime::FunctionId f = Runtime::kThrow;
+ const Runtime::Function* fun = Runtime::FunctionForId(f);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ Node* inputs[] = {
+ jsgraph()->CEntryStubConstant(fun->result_size), // C entry
+ exception, // exception
+ jsgraph()->ExternalConstant(
+ ExternalReference(f, jsgraph()->isolate())), // ref
+ jsgraph()->Int32Constant(fun->nargs), // arity
+ jsgraph()->Constant(module->context), // context
+ *effect_ptr,
+ *control_ptr};
+
+ Node* node = graph()->NewNode(
+ common()->Call(desc), static_cast<int>(arraysize(inputs)), inputs);
+ *control_ptr = node;
+ *effect_ptr = node;
+ }
+ if (false) {
+ // End the control flow with a throw
+ Node* thrw =
+ graph()->NewNode(common()->Throw(), jsgraph()->ZeroConstant(),
+ *effect_ptr, *control_ptr);
+ end = thrw;
+ } else {
+ // End the control flow with returning 0xdeadbeef
+ Node* ret_value;
+ if (builder_->GetFunctionSignature()->return_count() > 0) {
+ switch (builder_->GetFunctionSignature()->GetReturn()) {
+ case wasm::kAstI32:
+ ret_value = jsgraph()->Int32Constant(0xdeadbeef);
+ break;
+ case wasm::kAstI64:
+ ret_value = jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
+ break;
+ case wasm::kAstF32:
+ ret_value = jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
+ break;
+ case wasm::kAstF64:
+ ret_value = jsgraph()->Float64Constant(
+ bit_cast<double>(0xdeadbeefdeadbeef));
+ break;
+ default:
+ UNREACHABLE();
+ ret_value = nullptr;
+ }
+ } else {
+ ret_value = jsgraph()->Int32Constant(0xdeadbeef);
+ }
+ end = graph()->NewNode(jsgraph()->common()->Return(), ret_value,
+ *effect_ptr, *control_ptr);
+ }
+
+ MergeControlToEnd(jsgraph(), end);
+ }
+};
+
+
+WasmGraphBuilder::WasmGraphBuilder(Zone* zone, JSGraph* jsgraph,
+ wasm::FunctionSig* function_signature)
+ : zone_(zone),
+ jsgraph_(jsgraph),
+ module_(nullptr),
+ mem_buffer_(nullptr),
+ mem_size_(nullptr),
+ function_table_(nullptr),
+ control_(nullptr),
+ effect_(nullptr),
+ cur_buffer_(def_buffer_),
+ cur_bufsize_(kDefaultBufferSize),
+ trap_(new (zone) WasmTrapHelper(this)),
+ function_signature_(function_signature) {
+ DCHECK_NOT_NULL(jsgraph_);
+}
+
+
+Node* WasmGraphBuilder::Error() { return jsgraph()->Dead(); }
+
+
+Node* WasmGraphBuilder::Start(unsigned params) {
+ Node* start = graph()->NewNode(jsgraph()->common()->Start(params));
+ graph()->SetStart(start);
+ return start;
+}
+
+
+Node* WasmGraphBuilder::Param(unsigned index, wasm::LocalType type) {
+ return graph()->NewNode(jsgraph()->common()->Parameter(index),
+ graph()->start());
+}
+
+
+Node* WasmGraphBuilder::Loop(Node* entry) {
+ return graph()->NewNode(jsgraph()->common()->Loop(1), entry);
+}
+
+
+Node* WasmGraphBuilder::Terminate(Node* effect, Node* control) {
+ Node* terminate =
+ graph()->NewNode(jsgraph()->common()->Terminate(), effect, control);
+ MergeControlToEnd(jsgraph(), terminate);
+ return terminate;
+}
+
+
+unsigned WasmGraphBuilder::InputCount(Node* node) {
+ return static_cast<unsigned>(node->InputCount());
+}
+
+
+bool WasmGraphBuilder::IsPhiWithMerge(Node* phi, Node* merge) {
+ return phi && IrOpcode::IsPhiOpcode(phi->opcode()) &&
+ NodeProperties::GetControlInput(phi) == merge;
+}
+
+
+void WasmGraphBuilder::AppendToMerge(Node* merge, Node* from) {
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ merge->AppendInput(jsgraph()->zone(), from);
+ int new_size = merge->InputCount();
+ NodeProperties::ChangeOp(
+ merge, jsgraph()->common()->ResizeMergeOrPhi(merge->op(), new_size));
+}
+
+
+void WasmGraphBuilder::AppendToPhi(Node* merge, Node* phi, Node* from) {
+ DCHECK(IrOpcode::IsPhiOpcode(phi->opcode()));
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ int new_size = phi->InputCount();
+ phi->InsertInput(jsgraph()->zone(), phi->InputCount() - 1, from);
+ NodeProperties::ChangeOp(
+ phi, jsgraph()->common()->ResizeMergeOrPhi(phi->op(), new_size));
+}
+
+
+Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
+ return graph()->NewNode(jsgraph()->common()->Merge(count), count, controls);
+}
+
+
+Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
+ Node* control) {
+ DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
+ Node** buf = Realloc(vals, count);
+ buf = Realloc(buf, count + 1);
+ buf[count] = control;
+ return graph()->NewNode(jsgraph()->common()->Phi(type, count), count + 1,
+ buf);
+}
+
+
+Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
+ Node* control) {
+ DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
+ Node** buf = Realloc(effects, count);
+ buf = Realloc(buf, count + 1);
+ buf[count] = control;
+ return graph()->NewNode(jsgraph()->common()->EffectPhi(count), count + 1,
+ buf);
+}
+
+
+Node* WasmGraphBuilder::Int32Constant(int32_t value) {
+ return jsgraph()->Int32Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Int64Constant(int64_t value) {
+ return jsgraph()->Int64Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
+ Node* right) {
+ const Operator* op;
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ switch (opcode) {
+ case wasm::kExprI32Add:
+ op = m->Int32Add();
+ break;
+ case wasm::kExprI32Sub:
+ op = m->Int32Sub();
+ break;
+ case wasm::kExprI32Mul:
+ op = m->Int32Mul();
+ break;
+ case wasm::kExprI32DivS: {
+ trap_->ZeroCheck32(kTrapDivByZero, right);
+ Node* before = *control_;
+ Node* denom_is_m1;
+ Node* denom_is_not_m1;
+ Branch(graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
+ jsgraph()->Int32Constant(-1)),
+ &denom_is_m1, &denom_is_not_m1);
+ *control_ = denom_is_m1;
+ trap_->TrapIfEq32(kTrapDivUnrepresentable, left, kMinInt);
+ if (*control_ != denom_is_m1) {
+ *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
+ denom_is_not_m1, *control_);
+ } else {
+ *control_ = before;
+ }
+ return graph()->NewNode(m->Int32Div(), left, right, *control_);
+ }
+ case wasm::kExprI32DivU:
+ op = m->Uint32Div();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck32(kTrapDivByZero, right));
+ case wasm::kExprI32RemS: {
+ trap_->ZeroCheck32(kTrapRemByZero, right);
+ Diamond d(graph(), jsgraph()->common(),
+ graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
+ jsgraph()->Int32Constant(-1)));
+
+ Node* rem = graph()->NewNode(m->Int32Mod(), left, right, d.if_false);
+
+ return d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ rem);
+ }
+ case wasm::kExprI32RemU:
+ op = m->Uint32Mod();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck32(kTrapRemByZero, right));
+ case wasm::kExprI32And:
+ op = m->Word32And();
+ break;
+ case wasm::kExprI32Ior:
+ op = m->Word32Or();
+ break;
+ case wasm::kExprI32Xor:
+ op = m->Word32Xor();
+ break;
+ case wasm::kExprI32Shl:
+ op = m->Word32Shl();
+ break;
+ case wasm::kExprI32ShrU:
+ op = m->Word32Shr();
+ break;
+ case wasm::kExprI32ShrS:
+ op = m->Word32Sar();
+ break;
+ case wasm::kExprI32Eq:
+ op = m->Word32Equal();
+ break;
+ case wasm::kExprI32Ne:
+ return Invert(Binop(wasm::kExprI32Eq, left, right));
+ case wasm::kExprI32LtS:
+ op = m->Int32LessThan();
+ break;
+ case wasm::kExprI32LeS:
+ op = m->Int32LessThanOrEqual();
+ break;
+ case wasm::kExprI32LtU:
+ op = m->Uint32LessThan();
+ break;
+ case wasm::kExprI32LeU:
+ op = m->Uint32LessThanOrEqual();
+ break;
+ case wasm::kExprI32GtS:
+ op = m->Int32LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI32GeS:
+ op = m->Int32LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI32GtU:
+ op = m->Uint32LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI32GeU:
+ op = m->Uint32LessThanOrEqual();
+ std::swap(left, right);
+ break;
+#if WASM_64
+ // Opcodes only supported on 64-bit platforms.
+ // TODO(titzer): query the machine operator builder here instead of #ifdef.
+ case wasm::kExprI64Add:
+ op = m->Int64Add();
+ break;
+ case wasm::kExprI64Sub:
+ op = m->Int64Sub();
+ break;
+ case wasm::kExprI64Mul:
+ op = m->Int64Mul();
+ break;
+ case wasm::kExprI64DivS: {
+ trap_->ZeroCheck64(kTrapDivByZero, right);
+ Node* before = *control_;
+ Node* denom_is_m1;
+ Node* denom_is_not_m1;
+ Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+ jsgraph()->Int64Constant(-1)),
+ &denom_is_m1, &denom_is_not_m1);
+ *control_ = denom_is_m1;
+ trap_->TrapIfEq64(kTrapDivUnrepresentable, left,
+ std::numeric_limits<int64_t>::min());
+ if (*control_ != denom_is_m1) {
+ *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
+ denom_is_not_m1, *control_);
+ } else {
+ *control_ = before;
+ }
+ return graph()->NewNode(m->Int64Div(), left, right, *control_);
+ }
+ case wasm::kExprI64DivU:
+ op = m->Uint64Div();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck64(kTrapDivByZero, right));
+ case wasm::kExprI64RemS: {
+ trap_->ZeroCheck64(kTrapRemByZero, right);
+ Diamond d(jsgraph()->graph(), jsgraph()->common(),
+ graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+ jsgraph()->Int64Constant(-1)));
+
+ Node* rem = graph()->NewNode(m->Int64Mod(), left, right, d.if_false);
+
+ return d.Phi(MachineRepresentation::kWord64, jsgraph()->Int64Constant(0),
+ rem);
+ }
+ case wasm::kExprI64RemU:
+ op = m->Uint64Mod();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck64(kTrapRemByZero, right));
+ case wasm::kExprI64And:
+ op = m->Word64And();
+ break;
+ case wasm::kExprI64Ior:
+ op = m->Word64Or();
+ break;
+ case wasm::kExprI64Xor:
+ op = m->Word64Xor();
+ break;
+ case wasm::kExprI64Shl:
+ op = m->Word64Shl();
+ break;
+ case wasm::kExprI64ShrU:
+ op = m->Word64Shr();
+ break;
+ case wasm::kExprI64ShrS:
+ op = m->Word64Sar();
+ break;
+ case wasm::kExprI64Eq:
+ op = m->Word64Equal();
+ break;
+ case wasm::kExprI64Ne:
+ return Invert(Binop(wasm::kExprI64Eq, left, right));
+ case wasm::kExprI64LtS:
+ op = m->Int64LessThan();
+ break;
+ case wasm::kExprI64LeS:
+ op = m->Int64LessThanOrEqual();
+ break;
+ case wasm::kExprI64LtU:
+ op = m->Uint64LessThan();
+ break;
+ case wasm::kExprI64LeU:
+ op = m->Uint64LessThanOrEqual();
+ break;
+ case wasm::kExprI64GtS:
+ op = m->Int64LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI64GeS:
+ op = m->Int64LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI64GtU:
+ op = m->Uint64LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI64GeU:
+ op = m->Uint64LessThanOrEqual();
+ std::swap(left, right);
+ break;
+#endif
+
+ case wasm::kExprF32CopySign:
+ return BuildF32CopySign(left, right);
+ case wasm::kExprF64CopySign:
+ return BuildF64CopySign(left, right);
+ case wasm::kExprF32Add:
+ op = m->Float32Add();
+ break;
+ case wasm::kExprF32Sub:
+ op = m->Float32Sub();
+ break;
+ case wasm::kExprF32Mul:
+ op = m->Float32Mul();
+ break;
+ case wasm::kExprF32Div:
+ op = m->Float32Div();
+ break;
+ case wasm::kExprF32Eq:
+ op = m->Float32Equal();
+ break;
+ case wasm::kExprF32Ne:
+ return Invert(Binop(wasm::kExprF32Eq, left, right));
+ case wasm::kExprF32Lt:
+ op = m->Float32LessThan();
+ break;
+ case wasm::kExprF32Ge:
+ op = m->Float32LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF32Gt:
+ op = m->Float32LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF32Le:
+ op = m->Float32LessThanOrEqual();
+ break;
+ case wasm::kExprF64Add:
+ op = m->Float64Add();
+ break;
+ case wasm::kExprF64Sub:
+ op = m->Float64Sub();
+ break;
+ case wasm::kExprF64Mul:
+ op = m->Float64Mul();
+ break;
+ case wasm::kExprF64Div:
+ op = m->Float64Div();
+ break;
+ case wasm::kExprF64Eq:
+ op = m->Float64Equal();
+ break;
+ case wasm::kExprF64Ne:
+ return Invert(Binop(wasm::kExprF64Eq, left, right));
+ case wasm::kExprF64Lt:
+ op = m->Float64LessThan();
+ break;
+ case wasm::kExprF64Le:
+ op = m->Float64LessThanOrEqual();
+ break;
+ case wasm::kExprF64Gt:
+ op = m->Float64LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF64Ge:
+ op = m->Float64LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF32Min:
+ return BuildF32Min(left, right);
+ case wasm::kExprF64Min:
+ return BuildF64Min(left, right);
+ case wasm::kExprF32Max:
+ return BuildF32Max(left, right);
+ case wasm::kExprF64Max:
+ return BuildF64Max(left, right);
+ default:
+ op = UnsupportedOpcode(opcode);
+ }
+ return graph()->NewNode(op, left, right);
+}
+
+
+Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
+ const Operator* op;
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ switch (opcode) {
+ case wasm::kExprBoolNot:
+ op = m->Word32Equal();
+ return graph()->NewNode(op, input, jsgraph()->Int32Constant(0));
+ case wasm::kExprF32Abs:
+ op = m->Float32Abs();
+ break;
+ case wasm::kExprF32Neg:
+ return BuildF32Neg(input);
+ case wasm::kExprF32Sqrt:
+ op = m->Float32Sqrt();
+ break;
+ case wasm::kExprF64Abs:
+ op = m->Float64Abs();
+ break;
+ case wasm::kExprF64Neg:
+ return BuildF64Neg(input);
+ case wasm::kExprF64Sqrt:
+ op = m->Float64Sqrt();
+ break;
+ case wasm::kExprI32SConvertF64:
+ return BuildI32SConvertF64(input);
+ case wasm::kExprI32UConvertF64:
+ return BuildI32UConvertF64(input);
+ case wasm::kExprF32ConvertF64:
+ op = m->TruncateFloat64ToFloat32();
+ break;
+ case wasm::kExprF64SConvertI32:
+ op = m->ChangeInt32ToFloat64();
+ break;
+ case wasm::kExprF64UConvertI32:
+ op = m->ChangeUint32ToFloat64();
+ break;
+ case wasm::kExprF32SConvertI32:
+ op = m->ChangeInt32ToFloat64(); // TODO(titzer): two conversions
+ input = graph()->NewNode(op, input);
+ op = m->TruncateFloat64ToFloat32();
+ break;
+ case wasm::kExprF32UConvertI32:
+ op = m->ChangeUint32ToFloat64();
+ input = graph()->NewNode(op, input);
+ op = m->TruncateFloat64ToFloat32();
+ break;
+ case wasm::kExprI32SConvertF32:
+ return BuildI32SConvertF32(input);
+ case wasm::kExprI32UConvertF32:
+ return BuildI32UConvertF32(input);
+ case wasm::kExprF64ConvertF32:
+ op = m->ChangeFloat32ToFloat64();
+ break;
+ case wasm::kExprF32ReinterpretI32:
+ op = m->BitcastInt32ToFloat32();
+ break;
+ case wasm::kExprI32ReinterpretF32:
+ op = m->BitcastFloat32ToInt32();
+ break;
+ case wasm::kExprI32Clz:
+ op = m->Word32Clz();
+ break;
+ case wasm::kExprI32Ctz: {
+ if (m->Word32Ctz().IsSupported()) {
+ op = m->Word32Ctz().op();
+ break;
+ } else {
+ return BuildI32Ctz(input);
+ }
+ }
+ case wasm::kExprI32Popcnt: {
+ if (m->Word32Popcnt().IsSupported()) {
+ op = m->Word32Popcnt().op();
+ break;
+ } else {
+ return BuildI32Popcnt(input);
+ }
+ }
+ case wasm::kExprF32Floor: {
+ if (m->Float32RoundDown().IsSupported()) {
+ op = m->Float32RoundDown().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF32Ceil: {
+ if (m->Float32RoundUp().IsSupported()) {
+ op = m->Float32RoundUp().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF32Trunc: {
+ if (m->Float32RoundTruncate().IsSupported()) {
+ op = m->Float32RoundTruncate().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF32NearestInt: {
+ if (m->Float32RoundTiesEven().IsSupported()) {
+ op = m->Float32RoundTiesEven().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64Floor: {
+ if (m->Float64RoundDown().IsSupported()) {
+ op = m->Float64RoundDown().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64Ceil: {
+ if (m->Float64RoundUp().IsSupported()) {
+ op = m->Float64RoundUp().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64Trunc: {
+ if (m->Float64RoundTruncate().IsSupported()) {
+ op = m->Float64RoundTruncate().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64NearestInt: {
+ if (m->Float64RoundTiesEven().IsSupported()) {
+ op = m->Float64RoundTiesEven().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+
+#if WASM_64
+ // Opcodes only supported on 64-bit platforms.
+ // TODO(titzer): query the machine operator builder here instead of #ifdef.
+ case wasm::kExprI32ConvertI64:
+ op = m->TruncateInt64ToInt32();
+ break;
+ case wasm::kExprI64SConvertI32:
+ op = m->ChangeInt32ToInt64();
+ break;
+ case wasm::kExprI64UConvertI32:
+ op = m->ChangeUint32ToUint64();
+ break;
+ case wasm::kExprF32SConvertI64:
+ op = m->RoundInt64ToFloat32();
+ break;
+ case wasm::kExprF32UConvertI64:
+ op = m->RoundUint64ToFloat32();
+ break;
+ case wasm::kExprF64SConvertI64:
+ op = m->RoundInt64ToFloat64();
+ break;
+ case wasm::kExprF64UConvertI64:
+ op = m->RoundUint64ToFloat64();
+ break;
+ case wasm::kExprI64SConvertF32: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToInt64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprI64SConvertF64: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToInt64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprI64UConvertF32: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToUint64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprI64UConvertF64: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToUint64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprF64ReinterpretI64:
+ op = m->BitcastInt64ToFloat64();
+ break;
+ case wasm::kExprI64ReinterpretF64:
+ op = m->BitcastFloat64ToInt64();
+ break;
+ case wasm::kExprI64Clz:
+ op = m->Word64Clz();
+ break;
+ case wasm::kExprI64Ctz: {
+ if (m->Word64Ctz().IsSupported()) {
+ op = m->Word64Ctz().op();
+ break;
+ } else {
+ return BuildI64Ctz(input);
+ }
+ }
+ case wasm::kExprI64Popcnt: {
+ if (m->Word64Popcnt().IsSupported()) {
+ op = m->Word64Popcnt().op();
+ break;
+ } else {
+ return BuildI64Popcnt(input);
+ }
+ }
+#endif
+ default:
+ op = UnsupportedOpcode(opcode);
+ }
+ return graph()->NewNode(op, input);
+}
+
+
+Node* WasmGraphBuilder::Float32Constant(float value) {
+ return jsgraph()->Float32Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Float64Constant(double value) {
+ return jsgraph()->Float64Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Constant(Handle<Object> value) {
+ return jsgraph()->Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Branch(Node* cond, Node** true_node,
+ Node** false_node) {
+ DCHECK_NOT_NULL(cond);
+ DCHECK_NOT_NULL(*control_);
+ Node* branch =
+ graph()->NewNode(jsgraph()->common()->Branch(), cond, *control_);
+ *true_node = graph()->NewNode(jsgraph()->common()->IfTrue(), branch);
+ *false_node = graph()->NewNode(jsgraph()->common()->IfFalse(), branch);
+ return branch;
+}
+
+
+Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
+ return graph()->NewNode(jsgraph()->common()->Switch(count), key, *control_);
+}
+
+
+Node* WasmGraphBuilder::IfValue(int32_t value, Node* sw) {
+ DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
+ return graph()->NewNode(jsgraph()->common()->IfValue(value), sw);
+}
+
+
+Node* WasmGraphBuilder::IfDefault(Node* sw) {
+ DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
+ return graph()->NewNode(jsgraph()->common()->IfDefault(), sw);
+}
+
+
+Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
+ DCHECK_NOT_NULL(*control_);
+ DCHECK_NOT_NULL(*effect_);
+
+ if (count == 0) {
+ // Handle a return of void.
+ vals[0] = jsgraph()->Int32Constant(0);
+ count = 1;
+ }
+
+ Node** buf = Realloc(vals, count);
+ buf = Realloc(buf, count + 2);
+ buf[count] = *effect_;
+ buf[count + 1] = *control_;
+ Node* ret = graph()->NewNode(jsgraph()->common()->Return(), count + 2, vals);
+
+ MergeControlToEnd(jsgraph(), ret);
+ return ret;
+}
+
+
+Node* WasmGraphBuilder::ReturnVoid() { return Return(0, Buffer(0)); }
+
+
+Node* WasmGraphBuilder::Unreachable() {
+ trap_->Unreachable();
+ return nullptr;
+}
+
+
+Node* WasmGraphBuilder::BuildF32Neg(Node* input) {
+ Node* result =
+ Unop(wasm::kExprF32ReinterpretI32,
+ Binop(wasm::kExprI32Xor, Unop(wasm::kExprI32ReinterpretF32, input),
+ jsgraph()->Int32Constant(0x80000000)));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildF64Neg(Node* input) {
+#if WASM_64
+ Node* result =
+ Unop(wasm::kExprF64ReinterpretI64,
+ Binop(wasm::kExprI64Xor, Unop(wasm::kExprI64ReinterpretF64, input),
+ jsgraph()->Int64Constant(0x8000000000000000)));
+
+ return result;
+#else
+ MachineOperatorBuilder* m = jsgraph()->machine();
+
+ Node* old_high_word = graph()->NewNode(m->Float64ExtractHighWord32(), input);
+ Node* new_high_word = Binop(wasm::kExprI32Xor, old_high_word,
+ jsgraph()->Int32Constant(0x80000000));
+
+ return graph()->NewNode(m->Float64InsertHighWord32(), input, new_high_word);
+#endif
+}
+
+
+Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
+ Node* result = Unop(
+ wasm::kExprF32ReinterpretI32,
+ Binop(wasm::kExprI32Ior,
+ Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, left),
+ jsgraph()->Int32Constant(0x7fffffff)),
+ Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, right),
+ jsgraph()->Int32Constant(0x80000000))));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
+#if WASM_64
+ Node* result = Unop(
+ wasm::kExprF64ReinterpretI64,
+ Binop(wasm::kExprI64Ior,
+ Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, left),
+ jsgraph()->Int64Constant(0x7fffffffffffffff)),
+ Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, right),
+ jsgraph()->Int64Constant(0x8000000000000000))));
+
+ return result;
+#else
+ MachineOperatorBuilder* m = jsgraph()->machine();
+
+ Node* high_word_left = graph()->NewNode(m->Float64ExtractHighWord32(), left);
+ Node* high_word_right =
+ graph()->NewNode(m->Float64ExtractHighWord32(), right);
+
+ Node* new_high_word =
+ Binop(wasm::kExprI32Ior, Binop(wasm::kExprI32And, high_word_left,
+ jsgraph()->Int32Constant(0x7fffffff)),
+ Binop(wasm::kExprI32And, high_word_right,
+ jsgraph()->Int32Constant(0x80000000)));
+
+ return graph()->NewNode(m->Float64InsertHighWord32(), left, new_high_word);
+#endif
+}
+
+
+Node* WasmGraphBuilder::BuildF32Min(Node* left, Node* right) {
+ Diamond left_le_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Le, left, right));
+
+ Diamond right_lt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Lt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Eq, left, left));
+
+ return left_le_right.Phi(
+ wasm::kAstF32, left,
+ right_lt_left.Phi(wasm::kAstF32, right,
+ left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildF32Max(Node* left, Node* right) {
+ Diamond left_ge_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Ge, left, right));
+
+ Diamond right_gt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Gt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Eq, left, left));
+
+ return left_ge_right.Phi(
+ wasm::kAstF32, left,
+ right_gt_left.Phi(wasm::kAstF32, right,
+ left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildF64Min(Node* left, Node* right) {
+ Diamond left_le_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Le, left, right));
+
+ Diamond right_lt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Lt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Eq, left, left));
+
+ return left_le_right.Phi(
+ wasm::kAstF64, left,
+ right_lt_left.Phi(wasm::kAstF64, right,
+ left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildF64Max(Node* left, Node* right) {
+ Diamond left_ge_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Ge, left, right));
+
+ Diamond right_gt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Lt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Eq, left, left));
+
+ return left_ge_right.Phi(
+ wasm::kAstF64, left,
+ right_gt_left.Phi(wasm::kAstF64, right,
+ left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF32Trunc, input);
+ // TODO(titzer): two conversions
+ Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), f64_trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64SConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF64Trunc, input);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64SConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF32Trunc, input);
+ // TODO(titzer): two conversions
+ Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), f64_trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64UConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF64Trunc, input);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64UConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32Ctz(Node* input) {
+ //// Implement the following code as TF graph.
+ // value = value | (value << 1);
+ // value = value | (value << 2);
+ // value = value | (value << 4);
+ // value = value | (value << 8);
+ // value = value | (value << 16);
+ // return CountPopulation32(0xffffffff XOR value);
+
+ Node* result =
+ Binop(wasm::kExprI32Ior, input,
+ Binop(wasm::kExprI32Shl, input, jsgraph()->Int32Constant(1)));
+
+ result = Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(2)));
+
+ result = Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(4)));
+
+ result = Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(8)));
+
+ result =
+ Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(16)));
+
+ result = BuildI32Popcnt(
+ Binop(wasm::kExprI32Xor, jsgraph()->Int32Constant(0xffffffff), result));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI64Ctz(Node* input) {
+ //// Implement the following code as TF graph.
+ // value = value | (value << 1);
+ // value = value | (value << 2);
+ // value = value | (value << 4);
+ // value = value | (value << 8);
+ // value = value | (value << 16);
+ // value = value | (value << 32);
+ // return CountPopulation64(0xffffffffffffffff XOR value);
+
+ Node* result =
+ Binop(wasm::kExprI64Ior, input,
+ Binop(wasm::kExprI64Shl, input, jsgraph()->Int64Constant(1)));
+
+ result = Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(2)));
+
+ result = Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(4)));
+
+ result = Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(8)));
+
+ result =
+ Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(16)));
+
+ result =
+ Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(32)));
+
+ result = BuildI64Popcnt(Binop(
+ wasm::kExprI64Xor, jsgraph()->Int64Constant(0xffffffffffffffff), result));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32Popcnt(Node* input) {
+ //// Implement the following code as a TF graph.
+ // value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
+ // value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
+ // value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
+ // value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
+ // value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
+
+ Node* result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, input, jsgraph()->Int32Constant(1)),
+ jsgraph()->Int32Constant(0x55555555)),
+ Binop(wasm::kExprI32And, input, jsgraph()->Int32Constant(0x55555555)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(2)),
+ jsgraph()->Int32Constant(0x33333333)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x33333333)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(4)),
+ jsgraph()->Int32Constant(0x0f0f0f0f)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0f0f0f0f)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(8)),
+ jsgraph()->Int32Constant(0x00ff00ff)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x00ff00ff)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(16)),
+ jsgraph()->Int32Constant(0x0000ffff)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0000ffff)));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI64Popcnt(Node* input) {
+ //// Implement the following code as a TF graph.
+ // value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
+ // value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
+ // value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
+ // value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
+ // value = ((value >> 16) & 0x0000ffff0000ffff) + (value &
+ // 0x0000ffff0000ffff);
+ // value = ((value >> 32) & 0x00000000ffffffff) + (value &
+ // 0x00000000ffffffff);
+
+ Node* result =
+ Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And,
+ Binop(wasm::kExprI64ShrU, input, jsgraph()->Int64Constant(1)),
+ jsgraph()->Int64Constant(0x5555555555555555)),
+ Binop(wasm::kExprI64And, input,
+ jsgraph()->Int64Constant(0x5555555555555555)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(2)),
+ jsgraph()->Int64Constant(0x3333333333333333)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x3333333333333333)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(4)),
+ jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(8)),
+ jsgraph()->Int64Constant(0x00ff00ff00ff00ff)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x00ff00ff00ff00ff)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(16)),
+ jsgraph()->Int64Constant(0x0000ffff0000ffff)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x0000ffff0000ffff)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(32)),
+ jsgraph()->Int64Constant(0x00000000ffffffff)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x00000000ffffffff)));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
+ const size_t params = sig->parameter_count();
+ const size_t extra = 2; // effect and control inputs.
+ const size_t count = 1 + params + extra;
+
+ // Reallocate the buffer to make space for extra inputs.
+ args = Realloc(args, count);
+
+ // Add effect and control inputs.
+ args[params + 1] = *effect_;
+ args[params + 2] = *control_;
+
+ const Operator* op = jsgraph()->common()->Call(
+ module_->GetWasmCallDescriptor(jsgraph()->zone(), sig));
+ Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+
+ *effect_ = call;
+ return call;
+}
+
+
+Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args) {
+ DCHECK_NULL(args[0]);
+
+ // Add code object as constant.
+ args[0] = Constant(module_->GetFunctionCode(index));
+ wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
+
+ return BuildWasmCall(sig, args);
+}
+
+
+Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
+ DCHECK_NOT_NULL(args[0]);
+
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+
+ // Compute the code object by loading it from the function table.
+ Node* key = args[0];
+ Node* table = FunctionTable();
+
+ // Bounds check the index.
+ int table_size = static_cast<int>(module_->FunctionTableSize());
+ {
+ Node* size = Int32Constant(static_cast<int>(table_size));
+ Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
+ trap_->AddTrapIfFalse(kTrapFuncInvalid, in_bounds);
+ }
+
+ // Load signature from the table and check.
+ // The table is a FixedArray; signatures are encoded as SMIs.
+ // [sig1, sig2, sig3, ...., code1, code2, code3 ...]
+ ElementAccess access = AccessBuilder::ForFixedArrayElement();
+ const int fixed_offset = access.header_size - access.tag();
+ {
+ Node* load_sig = graph()->NewNode(
+ machine->Load(MachineType::AnyTagged()), table,
+ graph()->NewNode(machine->Int32Add(),
+ graph()->NewNode(machine->Word32Shl(), key,
+ Int32Constant(kPointerSizeLog2)),
+ Int32Constant(fixed_offset)),
+ *effect_, *control_);
+ Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
+ jsgraph()->SmiConstant(index));
+ trap_->AddTrapIfFalse(kTrapFuncSigMismatch, sig_match);
+ }
+
+ // Load code object from the table.
+ int offset = fixed_offset + kPointerSize * table_size;
+ Node* load_code = graph()->NewNode(
+ machine->Load(MachineType::AnyTagged()), table,
+ graph()->NewNode(machine->Int32Add(),
+ graph()->NewNode(machine->Word32Shl(), key,
+ Int32Constant(kPointerSizeLog2)),
+ Int32Constant(offset)),
+ *effect_, *control_);
+
+ args[0] = load_code;
+ wasm::FunctionSig* sig = module_->GetSignature(index);
+ return BuildWasmCall(sig, args);
+}
+
+
+Node* WasmGraphBuilder::ToJS(Node* node, Node* context, wasm::LocalType type) {
+ SimplifiedOperatorBuilder simplified(jsgraph()->zone());
+ switch (type) {
+ case wasm::kAstI32:
+ return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
+ case wasm::kAstI64:
+ // TODO(titzer): i64->JS has no good solution right now. Using lower 32
+ // bits.
+ node =
+ graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(), node);
+ return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
+ case wasm::kAstF32:
+ node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
+ node);
+ return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
+ case wasm::kAstF64:
+ return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
+ case wasm::kAstStmt:
+ return jsgraph()->UndefinedConstant();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
+ wasm::LocalType type) {
+ // Do a JavaScript ToNumber.
+ Node* num =
+ graph()->NewNode(jsgraph()->javascript()->ToNumber(), node, context,
+ jsgraph()->EmptyFrameState(), *effect_, *control_);
+ *control_ = num;
+ *effect_ = num;
+
+ // Change representation.
+ SimplifiedOperatorBuilder simplified(jsgraph()->zone());
+ num = graph()->NewNode(simplified.ChangeTaggedToFloat64(), num);
+
+ switch (type) {
+ case wasm::kAstI32: {
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript),
+ num);
+ break;
+ }
+ case wasm::kAstI64:
+ // TODO(titzer): JS->i64 has no good solution right now. Using 32 bits.
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript),
+ num);
+ num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
+ break;
+ case wasm::kAstF32:
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
+ num);
+ break;
+ case wasm::kAstF64:
+ break;
+ case wasm::kAstStmt:
+ num = jsgraph()->Int32Constant(0);
+ break;
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+ return num;
+}
+
+
+Node* WasmGraphBuilder::Invert(Node* node) {
+ return Unop(wasm::kExprBoolNot, node);
+}
+
+
+void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
+ wasm::FunctionSig* sig) {
+ int params = static_cast<int>(sig->parameter_count());
+ int count = params + 3;
+ Node** args = Buffer(count);
+
+ // Build the start and the JS parameter nodes.
+ Node* start = Start(params + 3);
+ *control_ = start;
+ *effect_ = start;
+ // JS context is the last parameter.
+ Node* context = graph()->NewNode(
+ jsgraph()->common()->Parameter(params + 1, "context"), start);
+
+ int pos = 0;
+ args[pos++] = Constant(wasm_code);
+
+ // Convert JS parameters to WASM numbers.
+ for (int i = 0; i < params; i++) {
+ Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+ args[pos++] = FromJS(param, context, sig->GetParam(i));
+ }
+
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ // Call the WASM code.
+ CallDescriptor* desc = module_->GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+ Node* jsval =
+ ToJS(call, context,
+ sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
+ Node* ret =
+ graph()->NewNode(jsgraph()->common()->Return(), jsval, call, start);
+
+ MergeControlToEnd(jsgraph(), ret);
+}
+
+
+void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSFunction> function,
+ wasm::FunctionSig* sig) {
+ int js_count = function->shared()->internal_formal_parameter_count();
+ int wasm_count = static_cast<int>(sig->parameter_count());
+
+ // Build the start and the parameter nodes.
+ Isolate* isolate = jsgraph()->isolate();
+ CallDescriptor* desc;
+ Node* start = Start(wasm_count + 3);
+ *effect_ = start;
+ *control_ = start;
+ // JS context is the last parameter.
+ Node* context = Constant(Handle<Context>(function->context(), isolate));
+ Node** args = Buffer(wasm_count + 7);
+
+ bool arg_count_before_args = false;
+ bool add_new_target_undefined = false;
+
+ int pos = 0;
+ if (js_count == wasm_count) {
+ // exact arity match, just call the function directly.
+ desc = Linkage::GetJSCallDescriptor(graph()->zone(), false, wasm_count + 1,
+ CallDescriptor::kNoFlags);
+ arg_count_before_args = false;
+ add_new_target_undefined = true;
+ } else {
+ // Use the Call builtin.
+ Callable callable = CodeFactory::Call(isolate);
+ args[pos++] = jsgraph()->HeapConstant(callable.code());
+ desc = Linkage::GetStubCallDescriptor(isolate, graph()->zone(),
+ callable.descriptor(), wasm_count + 1,
+ CallDescriptor::kNoFlags);
+ arg_count_before_args = true;
+ }
+
+ args[pos++] = jsgraph()->Constant(function); // JS function.
+ if (arg_count_before_args) {
+ args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
+ }
+ // JS receiver.
+ Handle<Object> global(function->context()->global_object(), isolate);
+ args[pos++] = jsgraph()->Constant(global);
+
+ // Convert WASM numbers to JS values.
+ for (int i = 0; i < wasm_count; i++) {
+ Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+ args[pos++] = ToJS(param, context, sig->GetParam(i));
+ }
+
+ if (add_new_target_undefined) {
+ args[pos++] = jsgraph()->UndefinedConstant(); // new target
+ }
+
+ if (!arg_count_before_args) {
+ args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
+ }
+ args[pos++] = context;
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+
+ // Convert the return value back.
+ Node* val =
+ FromJS(call, context,
+ sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
+ Node* ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+
+ MergeControlToEnd(jsgraph(), ret);
+}
+
+
+Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
+ if (offset == 0) {
+ if (!mem_buffer_)
+ mem_buffer_ = jsgraph()->IntPtrConstant(module_->mem_start);
+ return mem_buffer_;
+ } else {
+ return jsgraph()->IntPtrConstant(module_->mem_start + offset);
+ }
+}
+
+
+Node* WasmGraphBuilder::MemSize(uint32_t offset) {
+ int32_t size = static_cast<int>(module_->mem_end - module_->mem_start);
+ if (offset == 0) {
+ if (!mem_size_) mem_size_ = jsgraph()->Int32Constant(size);
+ return mem_size_;
+ } else {
+ return jsgraph()->Int32Constant(size + offset);
+ }
+}
+
+
+Node* WasmGraphBuilder::FunctionTable() {
+ if (!function_table_) {
+ DCHECK(!module_->function_table.is_null());
+ function_table_ = jsgraph()->Constant(module_->function_table);
+ }
+ return function_table_;
+}
+
+
+Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
+ MachineType mem_type = module_->GetGlobalType(index);
+ Node* addr = jsgraph()->IntPtrConstant(
+ module_->globals_area + module_->module->globals->at(index).offset);
+ const Operator* op = jsgraph()->machine()->Load(mem_type);
+ Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
+ *control_);
+ *effect_ = node;
+ return node;
+}
+
+
+Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
+ MachineType mem_type = module_->GetGlobalType(index);
+ Node* addr = jsgraph()->IntPtrConstant(
+ module_->globals_area + module_->module->globals->at(index).offset);
+ const Operator* op = jsgraph()->machine()->Store(
+ StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
+ Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
+ *effect_, *control_);
+ *effect_ = node;
+ return node;
+}
+
+
+void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
+ uint32_t offset) {
+ // TODO(turbofan): fold bounds checks for constant indexes.
+ CHECK_GE(module_->mem_end, module_->mem_start);
+ ptrdiff_t size = module_->mem_end - module_->mem_start;
+ byte memsize = wasm::WasmOpcodes::MemSize(memtype);
+ Node* cond;
+ if (static_cast<ptrdiff_t>(offset) >= size ||
+ static_cast<ptrdiff_t>(offset + memsize) > size) {
+ // The access will always throw.
+ cond = jsgraph()->Int32Constant(0);
+ } else {
+ // Check against the limit.
+ size_t limit = size - offset - memsize;
+ CHECK(limit <= kMaxUInt32);
+ cond = graph()->NewNode(
+ jsgraph()->machine()->Uint32LessThanOrEqual(), index,
+ jsgraph()->Int32Constant(static_cast<uint32_t>(limit)));
+ }
+
+ trap_->AddTrapIfFalse(kTrapMemOutOfBounds, cond);
+}
+
+
+Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
+ Node* index, uint32_t offset) {
+ Node* load;
+
+ if (module_ && module_->asm_js) {
+ // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
+ DCHECK_EQ(0, offset);
+ const Operator* op = jsgraph()->machine()->CheckedLoad(memtype);
+ load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
+ *control_);
+ } else {
+ // WASM semantics throw on OOB. Introduce explicit bounds check.
+ BoundsCheckMem(memtype, index, offset);
+ load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
+ MemBuffer(offset), index, *effect_, *control_);
+ }
+
+ *effect_ = load;
+
+ if (type == wasm::kAstI64 &&
+ ElementSizeLog2Of(memtype.representation()) < 3) {
+ // TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
+ if (memtype.IsSigned()) {
+ // sign extend
+ load = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load);
+ } else {
+ // zero extend
+ load =
+ graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), load);
+ }
+ }
+
+ return load;
+}
+
+
+Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
+ uint32_t offset, Node* val) {
+ Node* store;
+ if (module_ && module_->asm_js) {
+ // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
+ DCHECK_EQ(0, offset);
+ const Operator* op =
+ jsgraph()->machine()->CheckedStore(memtype.representation());
+ store = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), val, *effect_,
+ *control_);
+ } else {
+ // WASM semantics throw on OOB. Introduce explicit bounds check.
+ BoundsCheckMem(memtype, index, offset);
+ StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+ store =
+ graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+ index, val, *effect_, *control_);
+ }
+ *effect_ = store;
+ return store;
+}
+
+
+void WasmGraphBuilder::PrintDebugName(Node* node) {
+ PrintF("#%d:%s", node->id(), node->op()->mnemonic());
+}
+
+
+Node* WasmGraphBuilder::String(const char* string) {
+ return jsgraph()->Constant(
+ jsgraph()->isolate()->factory()->NewStringFromAsciiChecked(string));
+}
+
+
+Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
+
+
+Handle<JSFunction> CompileJSToWasmWrapper(
+ Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
+ Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index) {
+ wasm::WasmFunction* func = &module->module->functions->at(index);
+
+ //----------------------------------------------------------------------------
+ // Create the JSFunction object.
+ //----------------------------------------------------------------------------
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfo(name, wasm_code, false);
+ int params = static_cast<int>(func->sig->parameter_count());
+ shared->set_length(params);
+ shared->set_internal_formal_parameter_count(1 + params);
+ Handle<JSFunction> function = isolate->factory()->NewFunction(
+ isolate->wasm_function_map(), name, MaybeHandle<Code>());
+ function->SetInternalField(0, *module_object);
+ function->set_shared(*shared);
+
+ //----------------------------------------------------------------------------
+ // Create the Graph
+ //----------------------------------------------------------------------------
+ Zone zone;
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ JSOperatorBuilder javascript(&zone);
+ MachineOperatorBuilder machine(&zone);
+ JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+
+ Node* control = nullptr;
+ Node* effect = nullptr;
+
+ WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+ builder.set_control_ptr(&control);
+ builder.set_effect_ptr(&effect);
+ builder.set_module(module);
+ builder.BuildJSToWasmWrapper(wasm_code, func->sig);
+
+ //----------------------------------------------------------------------------
+ // Run the compilation pipeline.
+ //----------------------------------------------------------------------------
+ {
+ // Changes lowering requires types.
+ Typer typer(isolate, &graph);
+ NodeVector roots(&zone);
+ jsgraph.GetCachedNodes(&roots);
+ typer.Run(roots);
+
+ // Run generic and change lowering.
+ JSGenericLowering generic(true, &jsgraph);
+ ChangeLowering changes(&jsgraph);
+ GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
+ graph_reducer.AddReducer(&changes);
+ graph_reducer.AddReducer(&generic);
+ graph_reducer.ReduceGraph();
+
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Graph after change lowering -- " << std::endl;
+ os << AsRPO(graph);
+ }
+
+ // Schedule and compile to machine code.
+ int params = static_cast<int>(
+ module->GetFunctionSignature(index)->parameter_count());
+ CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
+ &zone, false, params + 1, CallDescriptor::kNoFlags);
+ CompilationInfo info("js-to-wasm", isolate, &zone);
+ // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
+ info.set_output_code_kind(Code::WASM_FUNCTION);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+
+#ifdef ENABLE_DISASSEMBLER
+ // Disassemble the wrapper code for debugging.
+ if (!code.is_null() && FLAG_print_opt_code) {
+ Vector<char> buffer;
+ const char* name = "";
+ if (func->name_offset > 0) {
+ const byte* ptr = module->module->module_start + func->name_offset;
+ name = reinterpret_cast<const char*>(ptr);
+ }
+ SNPrintF(buffer, "JS->WASM function wrapper #%d:%s", index, name);
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
+ // Set the JSFunction's machine code.
+ function->set_code(*code);
+ }
+ return function;
+}
+
+
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+ Handle<JSFunction> function,
+ uint32_t index) {
+ wasm::WasmFunction* func = &module->module->functions->at(index);
+
+ //----------------------------------------------------------------------------
+ // Create the Graph
+ //----------------------------------------------------------------------------
+ Zone zone;
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ JSOperatorBuilder javascript(&zone);
+ MachineOperatorBuilder machine(&zone);
+ JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+
+ Node* control = nullptr;
+ Node* effect = nullptr;
+
+ WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+ builder.set_control_ptr(&control);
+ builder.set_effect_ptr(&effect);
+ builder.set_module(module);
+ builder.BuildWasmToJSWrapper(function, func->sig);
+
+ Handle<Code> code = Handle<Code>::null();
+ {
+ // Changes lowering requires types.
+ Typer typer(isolate, &graph);
+ NodeVector roots(&zone);
+ jsgraph.GetCachedNodes(&roots);
+ typer.Run(roots);
+
+ // Run generic and change lowering.
+ JSGenericLowering generic(true, &jsgraph);
+ ChangeLowering changes(&jsgraph);
+ GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
+ graph_reducer.AddReducer(&changes);
+ graph_reducer.AddReducer(&generic);
+ graph_reducer.ReduceGraph();
+
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Graph after change lowering -- " << std::endl;
+ os << AsRPO(graph);
+ }
+
+ // Schedule and compile to machine code.
+ CallDescriptor* incoming = module->GetWasmCallDescriptor(&zone, func->sig);
+ CompilationInfo info("wasm-to-js", isolate, &zone);
+ // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
+ info.set_output_code_kind(Code::WASM_FUNCTION);
+ code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+
+#ifdef ENABLE_DISASSEMBLER
+ // Disassemble the wrapper code for debugging.
+ if (!code.is_null() && FLAG_print_opt_code) {
+ Vector<char> buffer;
+ const char* name = "";
+ if (func->name_offset > 0) {
+ const byte* ptr = module->module->module_start + func->name_offset;
+ name = reinterpret_cast<const char*>(ptr);
+ }
+ SNPrintF(buffer, "WASM->JS function wrapper #%d:%s", index, name);
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
+ }
+ return code;
+}
+
+
+// Helper function to compile a single function.
+Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction& function,
+ int index) {
+ if (FLAG_trace_wasm_compiler || FLAG_trace_wasm_decode_time) {
+ // TODO(titzer): clean me up a bit.
+ OFStream os(stdout);
+ os << "Compiling WASM function #" << index << ":";
+ if (function.name_offset > 0) {
+ os << module_env->module->GetName(function.name_offset);
+ }
+ os << std::endl;
+ }
+ // Initialize the function environment for decoding.
+ wasm::FunctionEnv env;
+ env.module = module_env;
+ env.sig = function.sig;
+ env.local_int32_count = function.local_int32_count;
+ env.local_int64_count = function.local_int64_count;
+ env.local_float32_count = function.local_float32_count;
+ env.local_float64_count = function.local_float64_count;
+ env.SumLocals();
+
+ // Create a TF graph during decoding.
+ Zone zone;
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ WasmGraphBuilder builder(&zone, &jsgraph, function.sig);
+ wasm::TreeResult result = wasm::BuildTFGraph(
+ &builder, &env, // --
+ module_env->module->module_start, // --
+ module_env->module->module_start + function.code_start_offset, // --
+ module_env->module->module_start + function.code_end_offset); // --
+
+ if (result.failed()) {
+ if (FLAG_trace_wasm_compiler) {
+ OFStream os(stdout);
+ os << "Compilation failed: " << result << std::endl;
+ }
+ // Add the function as another context for the exception
+ Vector<char> buffer;
+ SNPrintF(buffer, "Compiling WASM function #%d:%s failed:", index,
+ module_env->module->GetName(function.name_offset));
+ thrower.Failed(buffer.start(), result);
+ return Handle<Code>::null();
+ }
+
+ // Run the compiler pipeline to generate machine code.
+ CallDescriptor* descriptor = const_cast<CallDescriptor*>(
+ module_env->GetWasmCallDescriptor(&zone, function.sig));
+ CompilationInfo info("wasm", isolate, &zone);
+ info.set_output_code_kind(Code::WASM_FUNCTION);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, descriptor, &graph);
+
+#ifdef ENABLE_DISASSEMBLER
+ // Disassemble the code for debugging.
+ if (!code.is_null() && FLAG_print_opt_code) {
+ Vector<char> buffer;
+ const char* name = "";
+ if (function.name_offset > 0) {
+ const byte* ptr = module_env->module->module_start + function.name_offset;
+ name = reinterpret_cast<const char*>(ptr);
+ }
+ SNPrintF(buffer, "WASM function #%d:%s", index, name);
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
+ return code;
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
new file mode 100644
index 0000000..1a17a83
--- /dev/null
+++ b/src/compiler/wasm-compiler.h
@@ -0,0 +1,190 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_WASM_COMPILER_H_
+#define V8_COMPILER_WASM_COMPILER_H_
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+// Forward declarations for some compiler data structures.
+class Node;
+class JSGraph;
+class Graph;
+}
+
+namespace wasm {
+// Forward declarations for some WASM data structures.
+struct ModuleEnv;
+struct WasmFunction;
+class ErrorThrower;
+
+// Expose {Node} and {Graph} opaquely as {wasm::TFNode} and {wasm::TFGraph}.
+typedef compiler::Node TFNode;
+typedef compiler::JSGraph TFGraph;
+}
+
+namespace compiler {
+// Compiles a single function, producing a code object.
+Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction& function, int index);
+
+// Wraps a JS function, producing a code object that can be called from WASM.
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+ Handle<JSFunction> function,
+ uint32_t index);
+
+// Wraps a given wasm code object, producing a JSFunction that can be called
+// from JavaScript.
+Handle<JSFunction> CompileJSToWasmWrapper(
+ Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
+ Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index);
+
+// Abstracts details of building TurboFan graph nodes for WASM to separate
+// the WASM decoder from the internal details of TurboFan.
+class WasmTrapHelper;
+class WasmGraphBuilder {
+ public:
+ WasmGraphBuilder(Zone* z, JSGraph* g, wasm::FunctionSig* function_signature);
+
+ Node** Buffer(size_t count) {
+ if (count > cur_bufsize_) {
+ size_t new_size = count + cur_bufsize_ + 5;
+ cur_buffer_ =
+ reinterpret_cast<Node**>(zone_->New(new_size * sizeof(Node*)));
+ cur_bufsize_ = new_size;
+ }
+ return cur_buffer_;
+ }
+
+ //-----------------------------------------------------------------------
+ // Operations independent of {control} or {effect}.
+ //-----------------------------------------------------------------------
+ Node* Error();
+ Node* Start(unsigned params);
+ Node* Param(unsigned index, wasm::LocalType type);
+ Node* Loop(Node* entry);
+ Node* Terminate(Node* effect, Node* control);
+ Node* Merge(unsigned count, Node** controls);
+ Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
+ Node* EffectPhi(unsigned count, Node** effects, Node* control);
+ Node* Int32Constant(int32_t value);
+ Node* Int64Constant(int64_t value);
+ Node* Float32Constant(float value);
+ Node* Float64Constant(double value);
+ Node* Constant(Handle<Object> value);
+ Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right);
+ Node* Unop(wasm::WasmOpcode opcode, Node* input);
+ unsigned InputCount(Node* node);
+ bool IsPhiWithMerge(Node* phi, Node* merge);
+ void AppendToMerge(Node* merge, Node* from);
+ void AppendToPhi(Node* merge, Node* phi, Node* from);
+
+ //-----------------------------------------------------------------------
+ // Operations that read and/or write {control} and {effect}.
+ //-----------------------------------------------------------------------
+ Node* Branch(Node* cond, Node** true_node, Node** false_node);
+ Node* Switch(unsigned count, Node* key);
+ Node* IfValue(int32_t value, Node* sw);
+ Node* IfDefault(Node* sw);
+ Node* Return(unsigned count, Node** vals);
+ Node* ReturnVoid();
+ Node* Unreachable();
+
+ Node* CallDirect(uint32_t index, Node** args);
+ Node* CallIndirect(uint32_t index, Node** args);
+ void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
+ void BuildWasmToJSWrapper(Handle<JSFunction> function,
+ wasm::FunctionSig* sig);
+ Node* ToJS(Node* node, Node* context, wasm::LocalType type);
+ Node* FromJS(Node* node, Node* context, wasm::LocalType type);
+ Node* Invert(Node* node);
+ Node* FunctionTable();
+
+ //-----------------------------------------------------------------------
+ // Operations that concern the linear memory.
+ //-----------------------------------------------------------------------
+ Node* MemSize(uint32_t offset);
+ Node* LoadGlobal(uint32_t index);
+ Node* StoreGlobal(uint32_t index, Node* val);
+ Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
+ uint32_t offset);
+ Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val);
+
+ static void PrintDebugName(Node* node);
+
+ Node* Control() { return *control_; }
+ Node* Effect() { return *effect_; }
+
+ void set_module(wasm::ModuleEnv* module) { this->module_ = module; }
+
+ void set_control_ptr(Node** control) { this->control_ = control; }
+
+ void set_effect_ptr(Node** effect) { this->effect_ = effect; }
+
+ wasm::FunctionSig* GetFunctionSignature() { return function_signature_; }
+
+ private:
+ static const int kDefaultBufferSize = 16;
+ friend class WasmTrapHelper;
+
+ Zone* zone_;
+ JSGraph* jsgraph_;
+ wasm::ModuleEnv* module_;
+ Node* mem_buffer_;
+ Node* mem_size_;
+ Node* function_table_;
+ Node** control_;
+ Node** effect_;
+ Node** cur_buffer_;
+ size_t cur_bufsize_;
+ Node* def_buffer_[kDefaultBufferSize];
+
+ WasmTrapHelper* trap_;
+ wasm::FunctionSig* function_signature_;
+
+ // Internal helper methods.
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph();
+
+ Node* String(const char* string);
+ Node* MemBuffer(uint32_t offset);
+ void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
+
+ Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
+ Node* BuildF32Neg(Node* input);
+ Node* BuildF64Neg(Node* input);
+ Node* BuildF32CopySign(Node* left, Node* right);
+ Node* BuildF64CopySign(Node* left, Node* right);
+ Node* BuildF32Min(Node* left, Node* right);
+ Node* BuildF32Max(Node* left, Node* right);
+ Node* BuildF64Min(Node* left, Node* right);
+ Node* BuildF64Max(Node* left, Node* right);
+ Node* BuildI32SConvertF32(Node* input);
+ Node* BuildI32SConvertF64(Node* input);
+ Node* BuildI32UConvertF32(Node* input);
+ Node* BuildI32UConvertF64(Node* input);
+ Node* BuildI32Ctz(Node* input);
+ Node* BuildI32Popcnt(Node* input);
+ Node* BuildI64Ctz(Node* input);
+ Node* BuildI64Popcnt(Node* input);
+
+ Node** Realloc(Node** buffer, size_t count) {
+ Node** buf = Buffer(count);
+ if (buf != buffer) memcpy(buf, buffer, count * sizeof(Node*));
+ return buf;
+ }
+};
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_WASM_COMPILER_H_
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
new file mode 100644
index 0000000..92363dd
--- /dev/null
+++ b/src/compiler/wasm-linkage.cc
@@ -0,0 +1,282 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+
+#include "src/wasm/wasm-module.h"
+
+#include "src/compiler/linkage.h"
+
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+// TODO(titzer): this should not be in the WASM namespace.
+namespace wasm {
+
+using compiler::LocationSignature;
+using compiler::CallDescriptor;
+using compiler::LinkageLocation;
+
+namespace {
+MachineType MachineTypeFor(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return MachineType::Int32();
+ case kAstI64:
+ return MachineType::Int64();
+ case kAstF64:
+ return MachineType::Float64();
+ case kAstF32:
+ return MachineType::Float32();
+ default:
+ UNREACHABLE();
+ return MachineType::AnyTagged();
+ }
+}
+
+
+// Platform-specific configuration for C calling convention.
+LinkageLocation regloc(Register reg) {
+ return LinkageLocation::ForRegister(reg.code());
+}
+
+
+LinkageLocation regloc(DoubleRegister reg) {
+ return LinkageLocation::ForRegister(reg.code());
+}
+
+
+LinkageLocation stackloc(int i) {
+ return LinkageLocation::ForCallerFrameSlot(i);
+}
+
+
+#if V8_TARGET_ARCH_IA32
+// ===========================================================================
+// == ia32 ===================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_RETURN_REGISTERS eax, edx
+#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
+#define FP_RETURN_REGISTERS xmm1, xmm2
+
+#elif V8_TARGET_ARCH_X64
+// ===========================================================================
+// == x64 ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS rax, rdx, rcx, rbx, rsi, rdi
+#define GP_RETURN_REGISTERS rax, rdx
+#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
+#define FP_RETURN_REGISTERS xmm1, xmm2
+
+#elif V8_TARGET_ARCH_X87
+// ===========================================================================
+// == x87 ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_RETURN_REGISTERS eax, edx
+#define FP_RETURN_REGISTERS stX_0
+
+#elif V8_TARGET_ARCH_ARM
+// ===========================================================================
+// == arm ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r0, r1, r2, r3
+#define GP_RETURN_REGISTERS r0, r1
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTERS d0, d1
+
+#elif V8_TARGET_ARCH_ARM64
+// ===========================================================================
+// == arm64 ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS x0, x1, x2, x3, x4, x5, x6, x7
+#define GP_RETURN_REGISTERS x0, x1
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTERS d0, d1
+
+#elif V8_TARGET_ARCH_MIPS
+// ===========================================================================
+// == mips ===================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS a0, a1, a2, a3
+#define GP_RETURN_REGISTERS v0, v1
+#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
+#define FP_RETURN_REGISTERS f2, f4
+
+#elif V8_TARGET_ARCH_MIPS64
+// ===========================================================================
+// == mips64 =================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+#define GP_RETURN_REGISTERS v0, v1
+#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
+#define FP_RETURN_REGISTERS f2, f4
+
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+// ===========================================================================
+// == ppc & ppc64 ============================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r3, r4, r5, r6, r7, r8, r9, r10
+#define GP_RETURN_REGISTERS r3, r4
+#define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
+#define FP_RETURN_REGISTERS d1, d2
+
+#else
+// ===========================================================================
+// == unknown ================================================================
+// ===========================================================================
+// Don't define anything. We'll just always use the stack.
+#endif
+
+
+// Helper for allocating either an GP or FP reg, or the next stack slot.
+struct Allocator {
+ Allocator(const Register* gp, int gpc, const DoubleRegister* fp, int fpc)
+ : gp_count(gpc),
+ gp_offset(0),
+ gp_regs(gp),
+ fp_count(fpc),
+ fp_offset(0),
+ fp_regs(fp),
+ stack_offset(0) {}
+
+ int gp_count;
+ int gp_offset;
+ const Register* gp_regs;
+
+ int fp_count;
+ int fp_offset;
+ const DoubleRegister* fp_regs;
+
+ int stack_offset;
+
+ LinkageLocation Next(LocalType type) {
+ if (IsFloatingPoint(type)) {
+ // Allocate a floating point register/stack location.
+ if (fp_offset < fp_count) {
+ return regloc(fp_regs[fp_offset++]);
+ } else {
+ int offset = -1 - stack_offset;
+ stack_offset += Words(type);
+ return stackloc(offset);
+ }
+ } else {
+ // Allocate a general purpose register/stack location.
+ if (gp_offset < gp_count) {
+ return regloc(gp_regs[gp_offset++]);
+ } else {
+ int offset = -1 - stack_offset;
+ stack_offset += Words(type);
+ return stackloc(offset);
+ }
+ }
+ }
+ bool IsFloatingPoint(LocalType type) {
+ return type == kAstF32 || type == kAstF64;
+ }
+ int Words(LocalType type) {
+ // The code generation for pushing parameters on the stack does not
+ // distinguish between float32 and float64. Therefore also float32 needs
+ // two words.
+ if (kPointerSize < 8 &&
+ (type == kAstI64 || type == kAstF64 || type == kAstF32)) {
+ return 2;
+ }
+ return 1;
+ }
+};
+} // namespace
+
+
+// General code uses the above configuration data.
+CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
+ FunctionSig* fsig) {
+ MachineSignature::Builder msig(zone, fsig->return_count(),
+ fsig->parameter_count());
+ LocationSignature::Builder locations(zone, fsig->return_count(),
+ fsig->parameter_count());
+
+#ifdef GP_RETURN_REGISTERS
+ static const Register kGPReturnRegisters[] = {GP_RETURN_REGISTERS};
+ static const int kGPReturnRegistersCount =
+ static_cast<int>(arraysize(kGPReturnRegisters));
+#else
+ static const Register* kGPReturnRegisters = nullptr;
+ static const int kGPReturnRegistersCount = 0;
+#endif
+
+#ifdef FP_RETURN_REGISTERS
+ static const DoubleRegister kFPReturnRegisters[] = {FP_RETURN_REGISTERS};
+ static const int kFPReturnRegistersCount =
+ static_cast<int>(arraysize(kFPReturnRegisters));
+#else
+ static const DoubleRegister* kFPReturnRegisters = nullptr;
+ static const int kFPReturnRegistersCount = 0;
+#endif
+
+ Allocator rets(kGPReturnRegisters, kGPReturnRegistersCount,
+ kFPReturnRegisters, kFPReturnRegistersCount);
+
+ // Add return location(s).
+ const int return_count = static_cast<int>(locations.return_count_);
+ for (int i = 0; i < return_count; i++) {
+ LocalType ret = fsig->GetReturn(i);
+ msig.AddReturn(MachineTypeFor(ret));
+ locations.AddReturn(rets.Next(ret));
+ }
+
+#ifdef GP_PARAM_REGISTERS
+ static const Register kGPParamRegisters[] = {GP_PARAM_REGISTERS};
+ static const int kGPParamRegistersCount =
+ static_cast<int>(arraysize(kGPParamRegisters));
+#else
+ static const Register* kGPParamRegisters = nullptr;
+ static const int kGPParamRegistersCount = 0;
+#endif
+
+#ifdef FP_PARAM_REGISTERS
+ static const DoubleRegister kFPParamRegisters[] = {FP_PARAM_REGISTERS};
+ static const int kFPParamRegistersCount =
+ static_cast<int>(arraysize(kFPParamRegisters));
+#else
+ static const DoubleRegister* kFPParamRegisters = nullptr;
+ static const int kFPParamRegistersCount = 0;
+#endif
+
+ Allocator params(kGPParamRegisters, kGPParamRegistersCount, kFPParamRegisters,
+ kFPParamRegistersCount);
+
+ // Add register and/or stack parameter(s).
+ const int parameter_count = static_cast<int>(fsig->parameter_count());
+ for (int i = 0; i < parameter_count; i++) {
+ LocalType param = fsig->GetParam(i);
+ msig.AddParam(MachineTypeFor(param));
+ locations.AddParam(params.Next(param));
+ }
+
+ const RegList kCalleeSaveRegisters = 0;
+ const RegList kCalleeSaveFPRegisters = 0;
+
+ // The target for WASM calls is always a code object.
+ MachineType target_type = MachineType::AnyTagged();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ msig.Build(), // machine_sig
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kUseNativeStack, // flags
+ "c-call");
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 0480f9d..be406fb 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -4,11 +4,11 @@
#include "src/compiler/code-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/scopes.h"
+#include "src/compiler/osr.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -19,33 +19,44 @@
#define __ masm()->
+#define kScratchDoubleReg xmm0
+
+
// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {
public:
X64OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- Immediate InputImmediate(int index) {
+ Immediate InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
- Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+ Operand InputOperand(size_t index, int extra = 0) {
+ return ToOperand(instr_->InputAt(index), extra);
+ }
Operand OutputOperand() { return ToOperand(instr_->Output()); }
Immediate ToImmediate(InstructionOperand* operand) {
- return Immediate(ToConstant(operand).ToInt32());
+ Constant constant = ToConstant(operand);
+ if (constant.type() == Constant::kFloat64) {
+ DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
+ return Immediate(0);
+ }
+ return Immediate(constant.ToInt32());
}
Operand ToOperand(InstructionOperand* op, int extra = 0) {
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
- return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
+ return Operand(offset.from_stack_pointer() ? rsp : rbp,
+ offset.offset() + extra);
}
- static int NextOffset(int* offset) {
- int i = *offset;
+ static size_t NextOffset(size_t* offset) {
+ size_t i = *offset;
(*offset)++;
return i;
}
@@ -60,7 +71,7 @@
return static_cast<ScaleFactor>(scale);
}
- Operand MemoryOperand(int* offset) {
+ Operand MemoryOperand(size_t* offset) {
AddressingMode mode = AddressingModeField::decode(instr_->opcode());
switch (mode) {
case kMode_MR: {
@@ -125,7 +136,7 @@
return Operand(no_reg, 0);
}
- Operand MemoryOperand(int first_input = 0) {
+ Operand MemoryOperand(size_t first_input = 0) {
return MemoryOperand(&first_input);
}
};
@@ -133,44 +144,44 @@
namespace {
-bool HasImmediateInput(Instruction* instr, int index) {
+bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-class OutOfLineLoadZero FINAL : public OutOfLineCode {
+class OutOfLineLoadZero final : public OutOfLineCode {
public:
OutOfLineLoadZero(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL { __ xorl(result_, result_); }
+ void Generate() final { __ xorl(result_, result_); }
private:
Register const result_;
};
-class OutOfLineLoadNaN FINAL : public OutOfLineCode {
+class OutOfLineLoadNaN final : public OutOfLineCode {
public:
OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL { __ pcmpeqd(result_, result_); }
+ void Generate() final { __ Pcmpeqd(result_, result_); }
private:
XMMRegister const result_;
};
-class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
+class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
XMMRegister input)
: OutOfLineCode(gen), result_(result), input_(input) {}
- void Generate() FINAL {
+ void Generate() final {
__ subp(rsp, Immediate(kDoubleSize));
- __ movsd(MemOperand(rsp, 0), input_);
+ __ Movsd(MemOperand(rsp, 0), input_);
__ SlowTruncateToI(result_, rsp, 0);
__ addp(rsp, Immediate(kDoubleSize));
}
@@ -180,6 +191,46 @@
XMMRegister const input_;
};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ operand_(operand),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ leap(scratch1_, operand_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Operand const operand_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
} // namespace
@@ -249,7 +300,19 @@
} while (0)
-#define ASSEMBLE_DOUBLE_BINOP(asm_instr) \
+#define ASSEMBLE_MOVX(asm_instr) \
+ do { \
+ if (instr->addressing_mode() != kMode_None) { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ } else if (instr->InputAt(0)->IsRegister()) { \
+ __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
+ } else { \
+ __ asm_instr(i.OutputRegister(), i.InputOperand(0)); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_SSE_BINOP(asm_instr) \
do { \
if (instr->InputAt(1)->IsDoubleRegister()) { \
__ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
@@ -259,7 +322,17 @@
} while (0)
-#define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr) \
+#define ASSEMBLE_SSE_UNOP(asm_instr) \
+ do { \
+ if (instr->InputAt(0)->IsDoubleRegister()) { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ } else { \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0)); \
+ } \
+ } while (0)
+
+
+#define ASSEMBLE_AVX_BINOP(asm_instr) \
do { \
CpuFeatureScope avx_scope(masm(), AVX); \
if (instr->InputAt(1)->IsDoubleRegister()) { \
@@ -288,7 +361,7 @@
auto length = i.InputInt32(3); \
DCHECK_LE(index2, length); \
__ cmpq(index1, Immediate(length - index2)); \
- class OutOfLineLoadFloat FINAL : public OutOfLineCode { \
+ class OutOfLineLoadFloat final : public OutOfLineCode { \
public: \
OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
Register buffer, Register index1, int32_t index2, \
@@ -300,9 +373,9 @@
index2_(index2), \
length_(length) {} \
\
- void Generate() FINAL { \
+ void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ pcmpeqd(result_, result_); \
+ __ Pcmpeqd(result_, result_); \
__ cmpl(kScratchRegister, Immediate(length_)); \
__ j(above_equal, exit()); \
__ asm_instr(result_, \
@@ -341,7 +414,7 @@
auto length = i.InputInt32(3); \
DCHECK_LE(index2, length); \
__ cmpq(index1, Immediate(length - index2)); \
- class OutOfLineLoadInteger FINAL : public OutOfLineCode { \
+ class OutOfLineLoadInteger final : public OutOfLineCode { \
public: \
OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
Register buffer, Register index1, int32_t index2, \
@@ -353,7 +426,7 @@
index2_(index2), \
length_(length) {} \
\
- void Generate() FINAL { \
+ void Generate() final { \
Label oob; \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
__ cmpl(kScratchRegister, Immediate(length_)); \
@@ -399,7 +472,7 @@
auto length = i.InputInt32(3); \
DCHECK_LE(index2, length); \
__ cmpq(index1, Immediate(length - index2)); \
- class OutOfLineStoreFloat FINAL : public OutOfLineCode { \
+ class OutOfLineStoreFloat final : public OutOfLineCode { \
public: \
OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
Register index1, int32_t index2, int32_t length, \
@@ -411,7 +484,7 @@
length_(length), \
value_(value) {} \
\
- void Generate() FINAL { \
+ void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
__ cmpl(kScratchRegister, Immediate(length_)); \
__ j(above_equal, exit()); \
@@ -452,7 +525,7 @@
auto length = i.InputInt32(3); \
DCHECK_LE(index2, length); \
__ cmpq(index1, Immediate(length - index2)); \
- class OutOfLineStoreInteger FINAL : public OutOfLineCode { \
+ class OutOfLineStoreInteger final : public OutOfLineCode { \
public: \
OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
Register index1, int32_t index2, int32_t length, \
@@ -464,7 +537,7 @@
length_(length), \
value_(value) {} \
\
- void Generate() FINAL { \
+ void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
__ cmpl(kScratchRegister, Immediate(length_)); \
__ j(above_equal, exit()); \
@@ -500,6 +573,28 @@
} while (false)
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ addq(rsp, Immediate(sp_slot_delta * kPointerSize));
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ movq(rbp, MemOperand(rbp, 0));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X64OperandConverter i(this, instr);
@@ -512,10 +607,25 @@
__ Call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- int entry = Code::kHeaderSize - kHeapObjectTag;
- __ Call(Operand(reg, entry));
+ __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(reg);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ jmp(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(reg);
+ }
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -527,31 +637,109 @@
__ Assert(equal, kWrongFunctionContext);
}
__ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
- AddSafepointAndDeopt(instr);
+ frame_access_state()->ClearSPDelta();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
+ __ Assert(equal, kWrongFunctionContext);
+ }
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters);
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
case kArchStackPointer:
__ movq(i.OutputRegister(), rsp);
break;
+ case kArchFramePointer:
+ __ movq(i.OutputRegister(), rbp);
+ break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
- __ cvttsd2siq(result, input);
+ __ Cvttsd2siq(result, input);
__ cmpq(result, Immediate(1));
__ j(overflow, ool->entry());
__ bind(ool->exit());
break;
}
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ Register value = i.InputRegister(index);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
+ scratch0, scratch1, mode);
+ __ movp(operand, value);
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ not_zero, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kX64Add32:
ASSEMBLE_BINOP(addl);
break;
@@ -666,27 +854,123 @@
case kX64Ror:
ASSEMBLE_SHIFT(rorq, 6);
break;
+ case kX64Lzcnt:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Lzcntq(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Lzcntq(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Lzcnt32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Tzcnt:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Tzcntq(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Tzcntq(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Tzcnt32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Tzcntl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Tzcntl(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Popcnt:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Popcntq(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Popcntq(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Popcnt32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Popcntl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Popcntl(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kSSEFloat32Cmp:
+ ASSEMBLE_SSE_BINOP(Ucomiss);
+ break;
+ case kSSEFloat32Add:
+ ASSEMBLE_SSE_BINOP(addss);
+ break;
+ case kSSEFloat32Sub:
+ ASSEMBLE_SSE_BINOP(subss);
+ break;
+ case kSSEFloat32Mul:
+ ASSEMBLE_SSE_BINOP(mulss);
+ break;
+ case kSSEFloat32Div:
+ ASSEMBLE_SSE_BINOP(divss);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a (v)mulss depending on the result.
+ __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ break;
+ case kSSEFloat32Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 33);
+ __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kSSEFloat32Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 31);
+ __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kSSEFloat32Sqrt:
+ ASSEMBLE_SSE_UNOP(sqrtss);
+ break;
+ case kSSEFloat32Max:
+ ASSEMBLE_SSE_BINOP(maxss);
+ break;
+ case kSSEFloat32Min:
+ ASSEMBLE_SSE_BINOP(minss);
+ break;
+ case kSSEFloat32ToFloat64:
+ ASSEMBLE_SSE_UNOP(Cvtss2sd);
+ break;
+ case kSSEFloat32Round: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ break;
+ }
case kSSEFloat64Cmp:
- ASSEMBLE_DOUBLE_BINOP(ucomisd);
+ ASSEMBLE_SSE_BINOP(Ucomisd);
break;
case kSSEFloat64Add:
- ASSEMBLE_DOUBLE_BINOP(addsd);
+ ASSEMBLE_SSE_BINOP(addsd);
break;
case kSSEFloat64Sub:
- ASSEMBLE_DOUBLE_BINOP(subsd);
+ ASSEMBLE_SSE_BINOP(subsd);
break;
case kSSEFloat64Mul:
- ASSEMBLE_DOUBLE_BINOP(mulsd);
+ ASSEMBLE_SSE_BINOP(mulsd);
break;
case kSSEFloat64Div:
- ASSEMBLE_DOUBLE_BINOP(divsd);
+ ASSEMBLE_SSE_BINOP(divsd);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a (v)mulsd depending on the result.
+ __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kSSEFloat64Mod: {
__ subq(rsp, Immediate(kDoubleSize));
// Move values to st(0) and st(1).
- __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
__ fld_d(Operand(rsp, 0));
- __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
__ fld_d(Operand(rsp, 0));
// Loop while fprem isn't done.
Label mod_loop;
@@ -709,107 +993,411 @@
// Move output to stack and clean up.
__ fstp(1);
__ fstp_d(Operand(rsp, 0));
- __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
+ __ Movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
break;
}
+ case kSSEFloat64Max:
+ ASSEMBLE_SSE_BINOP(maxsd);
+ break;
+ case kSSEFloat64Min:
+ ASSEMBLE_SSE_BINOP(minsd);
+ break;
+ case kSSEFloat64Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 1);
+ __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kSSEFloat64Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+ __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
case kSSEFloat64Sqrt:
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- } else {
- __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
- }
+ ASSEMBLE_SSE_UNOP(sqrtsd);
break;
- case kSSEFloat64Floor: {
+ case kSSEFloat64Round: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundDown);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
- case kSSEFloat64Ceil: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundUp);
- break;
- }
- case kSSEFloat64RoundTruncate: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundToZero);
- break;
- }
- case kSSECvtss2sd:
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- } else {
- __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
- }
- break;
- case kSSECvtsd2ss:
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- } else {
- __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
- }
+ case kSSEFloat64ToFloat32:
+ ASSEMBLE_SSE_UNOP(Cvtsd2ss);
break;
case kSSEFloat64ToInt32:
if (instr->InputAt(0)->IsDoubleRegister()) {
- __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
- __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+ __ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
}
break;
case kSSEFloat64ToUint32: {
if (instr->InputAt(0)->IsDoubleRegister()) {
- __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
- __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
+ __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
}
__ AssertZeroExtended(i.OutputRegister());
break;
}
+ case kSSEFloat32ToInt64:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ Label done;
+ Label fail;
+ __ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Ucomiss(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ Ucomiss(kScratchDoubleReg, i.InputOperand(0));
+ }
+ // If the input is NaN, then the conversion fails.
+ __ j(parity_even, &fail);
+ // If the input is INT64_MIN, then the conversion succeeds.
+ __ j(equal, &done);
+ __ cmpq(i.OutputRegister(0), Immediate(1));
+ // If the conversion results in INT64_MIN, but the input was not
+ // INT64_MIN, then the conversion fails.
+ __ j(no_overflow, &done);
+ __ bind(&fail);
+ __ Set(i.OutputRegister(1), 0);
+ __ bind(&done);
+ }
+ break;
+ case kSSEFloat64ToInt64:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
+ }
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ Label done;
+ Label fail;
+ __ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ Ucomisd(kScratchDoubleReg, i.InputOperand(0));
+ }
+ // If the input is NaN, then the conversion fails.
+ __ j(parity_even, &fail);
+ // If the input is INT64_MIN, then the conversion succeeds.
+ __ j(equal, &done);
+ __ cmpq(i.OutputRegister(0), Immediate(1));
+ // If the conversion results in INT64_MIN, but the input was not
+ // INT64_MIN, then the conversion fails.
+ __ j(no_overflow, &done);
+ __ bind(&fail);
+ __ Set(i.OutputRegister(1), 0);
+ __ bind(&done);
+ }
+ break;
+ case kSSEFloat32ToUint64: {
+ Label done;
+ Label success;
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 0);
+ }
+ // There does not exist a Float32ToUint64 instruction, so we have to use
+ // the Float32ToInt64 instruction.
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ // Check if the result of the Float32ToInt64 conversion is positive, we
+ // are already done.
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ __ j(positive, &success);
+ // The result of the first conversion was negative, which means that the
+ // input value was not within the positive int64 range. We subtract 2^64
+ // and convert it again to see if it is within the uint64 range.
+ __ Move(kScratchDoubleReg, -9223372036854775808.0f);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ addss(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ addss(kScratchDoubleReg, i.InputOperand(0));
+ }
+ __ Cvttss2siq(i.OutputRegister(), kScratchDoubleReg);
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ // The only possible negative value here is 0x80000000000000000, which is
+ // used on x64 to indicate an integer overflow.
+ __ j(negative, &done);
+ // The input value is within uint64 range and the second conversion worked
+ // successfully, but we still have to undo the subtraction we did
+ // earlier.
+ __ Set(kScratchRegister, 0x8000000000000000);
+ __ orq(i.OutputRegister(), kScratchRegister);
+ __ bind(&success);
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ }
+ __ bind(&done);
+ break;
+ }
+ case kSSEFloat64ToUint64: {
+ Label done;
+ Label success;
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 0);
+ }
+ // There does not exist a Float64ToUint64 instruction, so we have to use
+ // the Float64ToInt64 instruction.
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ // Check if the result of the Float64ToInt64 conversion is positive, we
+ // are already done.
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ __ j(positive, &success);
+ // The result of the first conversion was negative, which means that the
+ // input value was not within the positive int64 range. We subtract 2^64
+ // and convert it again to see if it is within the uint64 range.
+ __ Move(kScratchDoubleReg, -9223372036854775808.0);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ addsd(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ addsd(kScratchDoubleReg, i.InputOperand(0));
+ }
+ __ Cvttsd2siq(i.OutputRegister(), kScratchDoubleReg);
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ // The only possible negative value here is 0x80000000000000000, which is
+ // used on x64 to indicate an integer overflow.
+ __ j(negative, &done);
+ // The input value is within uint64 range and the second conversion worked
+ // successfully, but we still have to undo the subtraction we did
+ // earlier.
+ __ Set(kScratchRegister, 0x8000000000000000);
+ __ orq(i.OutputRegister(), kScratchRegister);
+ __ bind(&success);
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ }
+ __ bind(&done);
+ break;
+ }
case kSSEInt32ToFloat64:
if (instr->InputAt(0)->IsRegister()) {
- __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
+ case kSSEInt64ToFloat32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kSSEInt64ToFloat64:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kSSEUint64ToFloat32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(kScratchRegister, i.InputRegister(0));
+ } else {
+ __ movq(kScratchRegister, i.InputOperand(0));
+ }
+ __ Cvtqui2ss(i.OutputDoubleRegister(), kScratchRegister,
+ i.TempRegister(0));
+ break;
+ case kSSEUint64ToFloat64:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(kScratchRegister, i.InputRegister(0));
+ } else {
+ __ movq(kScratchRegister, i.InputOperand(0));
+ }
+ __ Cvtqui2sd(i.OutputDoubleRegister(), kScratchRegister,
+ i.TempRegister(0));
+ break;
case kSSEUint32ToFloat64:
if (instr->InputAt(0)->IsRegister()) {
__ movl(kScratchRegister, i.InputRegister(0));
} else {
__ movl(kScratchRegister, i.InputOperand(0));
}
- __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
+ __ Cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
break;
+ case kSSEFloat64ExtractLowWord32:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ movl(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kSSEFloat64ExtractHighWord32:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
+ } else {
+ __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
+ }
+ break;
+ case kSSEFloat64InsertLowWord32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
+ } else {
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
+ }
+ break;
+ case kSSEFloat64InsertHighWord32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
+ } else {
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
+ }
+ break;
+ case kSSEFloat64LoadLowWord32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ Movd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kAVXFloat32Cmp: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ if (instr->InputAt(1)->IsDoubleRegister()) {
+ __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ break;
+ }
+ case kAVXFloat32Add:
+ ASSEMBLE_AVX_BINOP(vaddss);
+ break;
+ case kAVXFloat32Sub:
+ ASSEMBLE_AVX_BINOP(vsubss);
+ break;
+ case kAVXFloat32Mul:
+ ASSEMBLE_AVX_BINOP(vmulss);
+ break;
+ case kAVXFloat32Div:
+ ASSEMBLE_AVX_BINOP(vdivss);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a (v)mulss depending on the result.
+ __ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ break;
+ case kAVXFloat32Max:
+ ASSEMBLE_AVX_BINOP(vmaxss);
+ break;
+ case kAVXFloat32Min:
+ ASSEMBLE_AVX_BINOP(vminss);
+ break;
+ case kAVXFloat64Cmp: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ if (instr->InputAt(1)->IsDoubleRegister()) {
+ __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ break;
+ }
case kAVXFloat64Add:
- ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
+ ASSEMBLE_AVX_BINOP(vaddsd);
break;
case kAVXFloat64Sub:
- ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
+ ASSEMBLE_AVX_BINOP(vsubsd);
break;
case kAVXFloat64Mul:
- ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
+ ASSEMBLE_AVX_BINOP(vmulsd);
break;
case kAVXFloat64Div:
- ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
+ ASSEMBLE_AVX_BINOP(vdivsd);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a (v)mulsd depending on the result.
+ __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kX64Movsxbl:
- if (instr->addressing_mode() != kMode_None) {
- __ movsxbl(i.OutputRegister(), i.MemoryOperand());
- } else if (instr->InputAt(0)->IsRegister()) {
- __ movsxbl(i.OutputRegister(), i.InputRegister(0));
+ case kAVXFloat64Max:
+ ASSEMBLE_AVX_BINOP(vmaxsd);
+ break;
+ case kAVXFloat64Min:
+ ASSEMBLE_AVX_BINOP(vminsd);
+ break;
+ case kAVXFloat32Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputDoubleRegister(0));
} else {
- __ movsxbl(i.OutputRegister(), i.InputOperand(0));
+ __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
}
+ break;
+ }
+ case kAVXFloat32Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputDoubleRegister(0));
+ } else {
+ __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ }
+ break;
+ }
+ case kAVXFloat64Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputDoubleRegister(0));
+ } else {
+ __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ }
+ break;
+ }
+ case kAVXFloat64Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputDoubleRegister(0));
+ } else {
+ __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ }
+ break;
+ }
+ case kX64Movsxbl:
+ ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxbl:
- __ movzxbl(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movzxbl);
+ __ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movb: {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ movb(operand, Immediate(i.InputInt8(index)));
@@ -819,21 +1407,15 @@
break;
}
case kX64Movsxwl:
- if (instr->addressing_mode() != kMode_None) {
- __ movsxwl(i.OutputRegister(), i.MemoryOperand());
- } else if (instr->InputAt(0)->IsRegister()) {
- __ movsxwl(i.OutputRegister(), i.InputRegister(0));
- } else {
- __ movsxwl(i.OutputRegister(), i.InputOperand(0));
- }
+ ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxwl:
- __ movzxwl(i.OutputRegister(), i.MemoryOperand());
+ ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movw: {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ movw(operand, Immediate(i.InputInt16(index)));
@@ -855,7 +1437,7 @@
}
__ AssertZeroExtended(i.OutputRegister());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ movl(operand, i.InputImmediate(index));
@@ -864,19 +1446,14 @@
}
}
break;
- case kX64Movsxlq: {
- if (instr->InputAt(0)->IsRegister()) {
- __ movsxlq(i.OutputRegister(), i.InputRegister(0));
- } else {
- __ movsxlq(i.OutputRegister(), i.InputOperand(0));
- }
+ case kX64Movsxlq:
+ ASSEMBLE_MOVX(movsxlq);
break;
- }
case kX64Movq:
if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ movq(operand, i.InputImmediate(index));
@@ -889,18 +1466,46 @@
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ movss(operand, i.InputDoubleRegister(index));
}
break;
case kX64Movsd:
if (instr->HasOutput()) {
- __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movsd(operand, i.InputDoubleRegister(index));
+ __ Movsd(operand, i.InputDoubleRegister(index));
+ }
+ break;
+ case kX64BitcastFI:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ movl(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kX64BitcastDL:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ movq(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ Movq(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kX64BitcastIF:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64BitcastLD:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Movq(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ Movsd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kX64Lea32: {
@@ -949,24 +1554,29 @@
case kX64Push:
if (HasImmediateInput(instr, 0)) {
__ pushq(i.InputImmediate(0));
+ frame_access_state()->IncreaseSPDelta(1);
} else {
if (instr->InputAt(0)->IsRegister()) {
__ pushq(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ // TODO(titzer): use another machine instruction?
+ __ subq(rsp, Immediate(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
} else {
__ pushq(i.InputOperand(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
}
break;
- case kX64StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ movsxlq(index, index);
- __ movq(Operand(object, index, times_1, 0), value);
- __ leaq(index, Operand(object, index, times_1, 0));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- __ RecordWrite(object, index, value, mode);
+ case kX64Poke: {
+ int const slot = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
+ } else {
+ __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0));
+ }
break;
}
case kCheckedLoadInt8:
@@ -984,11 +1594,14 @@
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
break;
+ case kCheckedLoadWord64:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
+ break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Movss);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(movb);
@@ -999,14 +1612,20 @@
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(movl);
break;
+ case kCheckedStoreWord64:
+ ASSEMBLE_CHECKED_STORE_INTEGER(movq);
+ break;
case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+ ASSEMBLE_CHECKED_STORE_FLOAT(Movss);
break;
case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+ ASSEMBLE_CHECKED_STORE_FLOAT(Movsd);
+ break;
+ case kX64StackCheck:
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
break;
}
-}
+} // NOLINT(readability/fn_size)
// Assembles branches after this instruction.
@@ -1041,27 +1660,15 @@
case kSignedGreaterThan:
__ j(greater, tlabel);
break;
- case kUnorderedLessThan:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kUnsignedLessThan:
__ j(below, tlabel);
break;
- case kUnorderedGreaterThanOrEqual:
- __ j(parity_even, tlabel);
- // Fall through.
case kUnsignedGreaterThanOrEqual:
__ j(above_equal, tlabel);
break;
- case kUnorderedLessThanOrEqual:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kUnsignedLessThanOrEqual:
__ j(below_equal, tlabel);
break;
- case kUnorderedGreaterThan:
- __ j(parity_even, tlabel);
- // Fall through.
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
@@ -1071,12 +1678,15 @@
case kNotOverflow:
__ j(no_overflow, tlabel);
break;
+ default:
+ UNREACHABLE();
+ break;
}
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
@@ -1090,8 +1700,8 @@
// Materialize a full 64-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label check;
- DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
- Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
+ DCHECK_NE(0u, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
case kUnorderedEqual:
@@ -1122,35 +1732,15 @@
case kSignedGreaterThan:
cc = greater;
break;
- case kUnorderedLessThan:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedLessThan:
cc = below;
break;
- case kUnorderedGreaterThanOrEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedGreaterThanOrEqual:
cc = above_equal;
break;
- case kUnorderedLessThanOrEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedLessThanOrEqual:
cc = below_equal;
break;
- case kUnorderedGreaterThan:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
case kUnsignedGreaterThan:
cc = above;
break;
@@ -1160,6 +1750,9 @@
case kNotOverflow:
cc = no_overflow;
break;
+ default:
+ UNREACHABLE();
+ break;
}
__ bind(&check);
__ setcc(cc, reg);
@@ -1168,84 +1761,166 @@
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ X64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmpl(input, Immediate(i.InputInt32(index + 0)));
+ __ j(equal, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ X64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (int32_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ cmpl(input, Immediate(case_count));
+ __ j(above_equal, GetLabel(i.InputRpo(1)));
+ __ leaq(kScratchRegister, Operand(table));
+ __ jmp(Operand(kScratchRegister, input, times_8, 0));
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
+namespace {
+
+static const int kQuadWordSize = 16;
+
+} // namespace
+
+
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ pushq(rbp);
__ movq(rbp, rsp);
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- int register_save_area_size = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- __ pushq(Register::from_code(i));
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- }
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
- if (stack_slots > 0) {
- __ subq(rsp, Immediate(stack_slots * kPointerSize));
+ frame_access_state()->SetFrameAccessToDefault();
+
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ stack_shrink_slots -=
+ static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
+ }
+
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ if (stack_shrink_slots > 0) {
+ __ subq(rsp, Immediate(stack_shrink_slots * kPointerSize));
+ }
+
+ if (saves_fp != 0) { // Save callee-saved XMM registers.
+ const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ const int stack_size = saves_fp_count * kQuadWordSize;
+ // Adjust the stack pointer.
+ __ subp(rsp, Immediate(stack_size));
+ // Store the registers on the stack.
+ int slot_idx = 0;
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ if (!((1 << i) & saves_fp)) continue;
+ __ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
+ XMMRegister::from_code(i));
+ slot_idx++;
+ }
+ frame()->AllocateSavedCalleeRegisterSlots(saves_fp_count *
+ (kQuadWordSize / kPointerSize));
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ __ pushq(Register::from_code(i));
+ frame()->AllocateSavedCalleeRegisterSlots(1);
+ }
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ addq(rsp, Immediate(stack_slots * kPointerSize));
- }
- const RegList saves = descriptor->CalleeSavedRegisters();
- // Restore registers.
- if (saves != 0) {
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (!((1 << i) & saves)) continue;
- __ popq(Register::from_code(i));
- }
- }
- __ popq(rbp); // Pop caller's frame pointer.
- __ ret(0);
- } else {
- // No saved registers.
- __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
- __ popq(rbp); // Pop caller's frame pointer.
- __ ret(0);
+
+ // Restore registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ if (!((1 << i) & saves)) continue;
+ __ popq(Register::from_code(i));
}
- } else {
+ }
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ const int stack_size = saves_fp_count * kQuadWordSize;
+ // Load the registers from the stack.
+ int slot_idx = 0;
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
+ if (!((1 << i) & saves_fp)) continue;
+ __ movdqu(XMMRegister::from_code(i),
+ Operand(rsp, kQuadWordSize * slot_idx));
+ slot_idx++;
+ }
+ // Adjust the stack pointer.
+ __ addp(rsp, Immediate(stack_size));
+ }
+
+ if (descriptor->IsCFunctionCall()) {
__ movq(rsp, rbp); // Move stack pointer back to frame pointer.
__ popq(rbp); // Pop caller's frame pointer.
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
- __ ret(pop_count * kPointerSize);
+ } else if (frame()->needs_frame()) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
+ __ popq(rbp); // Pop caller's frame pointer.
+ }
}
+ size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ // Might need rcx for scratch if pop_size is too big.
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
+ __ Ret(static_cast<int>(pop_size), rcx);
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- X64OperandConverter g(this, NULL);
+ X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1295,9 +1970,19 @@
case Constant::kExternalReference:
__ Move(dst, src.ToExternalReference());
break;
- case Constant::kHeapObject:
- __ Move(dst, src.ToHeapObject());
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ int offset;
+ if (IsMaterializableFromFrame(src_object, &offset)) {
+ __ movp(dst, Operand(rbp, offset));
+ } else if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Move(dst, src_object);
+ }
break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): load of labels on x64.
break;
@@ -1330,23 +2015,23 @@
XMMRegister src = g.ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movsd(dst, src);
+ __ Movapd(dst, src);
} else {
DCHECK(destination->IsDoubleStackSlot());
Operand dst = g.ToOperand(destination);
- __ movsd(dst, src);
+ __ Movsd(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
Operand src = g.ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movsd(dst, src);
+ __ Movsd(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = g.ToOperand(destination);
- __ movsd(xmm0, src);
- __ movsd(dst, xmm0);
+ __ Movsd(xmm0, src);
+ __ Movsd(dst, xmm0);
}
} else {
UNREACHABLE();
@@ -1356,16 +2041,25 @@
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- X64OperandConverter g(this, NULL);
+ X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
- __ xchgq(g.ToRegister(source), g.ToRegister(destination));
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ movq(kScratchRegister, src);
+ __ movq(src, dst);
+ __ movq(dst, kScratchRegister);
} else if (source->IsRegister() && destination->IsStackSlot()) {
Register src = g.ToRegister(source);
+ __ pushq(src);
+ frame_access_state()->IncreaseSPDelta(1);
Operand dst = g.ToOperand(destination);
- __ xchgq(src, dst);
+ __ movq(src, dst);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
+ __ popq(dst);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
(source->IsDoubleStackSlot() &&
destination->IsDoubleStackSlot())) {
@@ -1374,24 +2068,29 @@
Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
__ movq(tmp, dst);
- __ xchgq(tmp, src);
- __ movq(dst, tmp);
+ __ pushq(src);
+ frame_access_state()->IncreaseSPDelta(1);
+ src = g.ToOperand(source);
+ __ movq(src, tmp);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
+ __ popq(dst);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movsd(xmm0, src);
- __ movsd(src, dst);
- __ movsd(dst, xmm0);
+ __ Movapd(xmm0, src);
+ __ Movapd(src, dst);
+ __ Movapd(dst, xmm0);
} else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
- __ movsd(xmm0, src);
- __ movsd(src, dst);
- __ movsd(dst, xmm0);
+ __ Movsd(xmm0, src);
+ __ Movsd(src, dst);
+ __ Movsd(dst, xmm0);
} else {
// No other combinations are possible.
UNREACHABLE();
@@ -1399,25 +2098,33 @@
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ dq(targets[index]);
+ }
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
void CodeGenerator::EnsureSpaceForLazyDeopt() {
- int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
}
- MarkLazyDeoptSite();
+
+ int space_needed = Deoptimizer::patch_size();
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
}
#undef __
-} // namespace internal
} // namespace compiler
+} // namespace internal
} // namespace v8
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 77e3e52..8e8e765 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -46,26 +46,72 @@
V(X64Sar32) \
V(X64Ror) \
V(X64Ror32) \
+ V(X64Lzcnt) \
+ V(X64Lzcnt32) \
+ V(X64Tzcnt) \
+ V(X64Tzcnt32) \
+ V(X64Popcnt) \
+ V(X64Popcnt32) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Abs) \
+ V(SSEFloat32Neg) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
+ V(SSEFloat64Abs) \
+ V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \
- V(SSEFloat64Floor) \
- V(SSEFloat64Ceil) \
- V(SSEFloat64RoundTruncate) \
- V(SSECvtss2sd) \
- V(SSECvtsd2ss) \
+ V(SSEFloat64Round) \
+ V(SSEFloat64Max) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64ToFloat32) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
+ V(SSEFloat32ToInt64) \
+ V(SSEFloat64ToInt64) \
+ V(SSEFloat32ToUint64) \
+ V(SSEFloat64ToUint64) \
V(SSEInt32ToFloat64) \
+ V(SSEInt64ToFloat32) \
+ V(SSEInt64ToFloat64) \
+ V(SSEUint64ToFloat32) \
+ V(SSEUint64ToFloat64) \
V(SSEUint32ToFloat64) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(AVXFloat32Cmp) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat32Max) \
+ V(AVXFloat32Min) \
+ V(AVXFloat64Cmp) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
+ V(AVXFloat64Max) \
+ V(AVXFloat64Min) \
+ V(AVXFloat64Abs) \
+ V(AVXFloat64Neg) \
+ V(AVXFloat32Abs) \
+ V(AVXFloat32Neg) \
V(X64Movsxbl) \
V(X64Movzxbl) \
V(X64Movb) \
@@ -77,12 +123,17 @@
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
+ V(X64BitcastFI) \
+ V(X64BitcastDL) \
+ V(X64BitcastIF) \
+ V(X64BitcastLD) \
V(X64Lea32) \
V(X64Lea) \
V(X64Dec32) \
V(X64Inc32) \
V(X64Push) \
- V(X64StoreWriteBarrier)
+ V(X64Poke) \
+ V(X64StackCheck)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
new file mode 100644
index 0000000..f8537c8
--- /dev/null
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -0,0 +1,182 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kX64Add:
+ case kX64Add32:
+ case kX64And:
+ case kX64And32:
+ case kX64Cmp:
+ case kX64Cmp32:
+ case kX64Test:
+ case kX64Test32:
+ case kX64Or:
+ case kX64Or32:
+ case kX64Xor:
+ case kX64Xor32:
+ case kX64Sub:
+ case kX64Sub32:
+ case kX64Imul:
+ case kX64Imul32:
+ case kX64ImulHigh32:
+ case kX64UmulHigh32:
+ case kX64Idiv:
+ case kX64Idiv32:
+ case kX64Udiv:
+ case kX64Udiv32:
+ case kX64Not:
+ case kX64Not32:
+ case kX64Neg:
+ case kX64Neg32:
+ case kX64Shl:
+ case kX64Shl32:
+ case kX64Shr:
+ case kX64Shr32:
+ case kX64Sar:
+ case kX64Sar32:
+ case kX64Ror:
+ case kX64Ror32:
+ case kX64Lzcnt:
+ case kX64Lzcnt32:
+ case kX64Tzcnt:
+ case kX64Tzcnt32:
+ case kX64Popcnt:
+ case kX64Popcnt32:
+ case kSSEFloat32Cmp:
+ case kSSEFloat32Add:
+ case kSSEFloat32Sub:
+ case kSSEFloat32Mul:
+ case kSSEFloat32Div:
+ case kSSEFloat32Abs:
+ case kSSEFloat32Neg:
+ case kSSEFloat32Sqrt:
+ case kSSEFloat32Round:
+ case kSSEFloat32Max:
+ case kSSEFloat32Min:
+ case kSSEFloat32ToFloat64:
+ case kSSEFloat64Cmp:
+ case kSSEFloat64Add:
+ case kSSEFloat64Sub:
+ case kSSEFloat64Mul:
+ case kSSEFloat64Div:
+ case kSSEFloat64Mod:
+ case kSSEFloat64Abs:
+ case kSSEFloat64Neg:
+ case kSSEFloat64Sqrt:
+ case kSSEFloat64Round:
+ case kSSEFloat64Max:
+ case kSSEFloat64Min:
+ case kSSEFloat64ToFloat32:
+ case kSSEFloat64ToInt32:
+ case kSSEFloat64ToUint32:
+ case kSSEFloat64ToInt64:
+ case kSSEFloat32ToInt64:
+ case kSSEFloat64ToUint64:
+ case kSSEFloat32ToUint64:
+ case kSSEInt32ToFloat64:
+ case kSSEInt64ToFloat32:
+ case kSSEInt64ToFloat64:
+ case kSSEUint64ToFloat32:
+ case kSSEUint64ToFloat64:
+ case kSSEUint32ToFloat64:
+ case kSSEFloat64ExtractLowWord32:
+ case kSSEFloat64ExtractHighWord32:
+ case kSSEFloat64InsertLowWord32:
+ case kSSEFloat64InsertHighWord32:
+ case kSSEFloat64LoadLowWord32:
+ case kAVXFloat32Cmp:
+ case kAVXFloat32Add:
+ case kAVXFloat32Sub:
+ case kAVXFloat32Mul:
+ case kAVXFloat32Div:
+ case kAVXFloat32Max:
+ case kAVXFloat32Min:
+ case kAVXFloat64Cmp:
+ case kAVXFloat64Add:
+ case kAVXFloat64Sub:
+ case kAVXFloat64Mul:
+ case kAVXFloat64Div:
+ case kAVXFloat64Max:
+ case kAVXFloat64Min:
+ case kAVXFloat64Abs:
+ case kAVXFloat64Neg:
+ case kAVXFloat32Abs:
+ case kAVXFloat32Neg:
+ case kX64BitcastFI:
+ case kX64BitcastDL:
+ case kX64BitcastIF:
+ case kX64BitcastLD:
+ case kX64Lea32:
+ case kX64Lea:
+ case kX64Dec32:
+ case kX64Inc32:
+ return (instr->addressing_mode() == kMode_None)
+ ? kNoOpcodeFlags
+ : kIsLoadOperation | kHasSideEffect;
+
+ case kX64Movsxbl:
+ case kX64Movzxbl:
+ case kX64Movsxwl:
+ case kX64Movzxwl:
+ case kX64Movsxlq:
+ DCHECK(instr->InputCount() >= 1);
+ return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
+ : kIsLoadOperation;
+
+ case kX64Movb:
+ case kX64Movw:
+ return kHasSideEffect;
+
+ case kX64Movl:
+ if (instr->HasOutput()) {
+ DCHECK(instr->InputCount() >= 1);
+ return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
+ : kIsLoadOperation;
+ } else {
+ return kHasSideEffect;
+ }
+
+ case kX64Movq:
+ case kX64Movsd:
+ case kX64Movss:
+ return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
+
+ case kX64StackCheck:
+ return kIsLoadOperation;
+
+ case kX64Push:
+ case kX64Poke:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index aba480d..c47a42e 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -2,24 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
+
+#include "src/base/adapters.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
namespace compiler {
// Adds X64-specific methods for generating operands.
-class X64OperandGenerator FINAL : public OperandGenerator {
+class X64OperandGenerator final : public OperandGenerator {
public:
explicit X64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* TempRegister(Register reg) {
- return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
- }
-
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
@@ -28,6 +27,10 @@
const int64_t value = OpParameter<int64_t>(node);
return value == static_cast<int64_t>(static_cast<int32_t>(value));
}
+ case IrOpcode::kNumberConstant: {
+ const double value = OpParameter<double>(node);
+ return bit_cast<int64_t>(value) == 0;
+ }
default:
return false;
}
@@ -35,15 +38,15 @@
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
Node* base, Node* displacement,
- InstructionOperand* inputs[],
+ InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
- if (base != NULL) {
+ if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
- if (index != NULL) {
+ if (index != nullptr) {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
- if (displacement != NULL) {
+ if (displacement != nullptr) {
inputs[(*input_count)++] = UseImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
kMode_MR4I, kMode_MR8I};
@@ -54,7 +57,7 @@
mode = kMRn_modes[scale_exponent];
}
} else {
- if (displacement == NULL) {
+ if (displacement == nullptr) {
mode = kMode_MR;
} else {
inputs[(*input_count)++] = UseImmediate(displacement);
@@ -62,10 +65,10 @@
}
}
} else {
- DCHECK(index != NULL);
+ DCHECK_NOT_NULL(index);
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
- if (displacement != NULL) {
+ if (displacement != nullptr) {
inputs[(*input_count)++] = UseImmediate(displacement);
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
kMode_M4I, kMode_M8I};
@@ -84,11 +87,11 @@
}
AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
- InstructionOperand* inputs[],
+ InstructionOperand inputs[],
size_t* input_count) {
BaseWithIndexAndDisplacement64Matcher m(operand, true);
DCHECK(m.matches());
- if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+ if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
@@ -105,40 +108,39 @@
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
X64OperandGenerator g(this);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kX64Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kX64Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand* inputs[3];
+ InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
@@ -153,83 +155,118 @@
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
- Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
- g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
- temps);
- return;
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kX64Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kX64Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kX64Movb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kX64Movw;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kX64Movl;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kX64Movq;
+ break;
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code =
+ opcode | AddressingModeField::encode(addressing_mode);
+ InstructionOperand value_operand =
+ g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+ inputs[input_count++] = value_operand;
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+ inputs);
}
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kX64Movss;
- break;
- case kRepFloat64:
- opcode = kX64Movsd;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kX64Movb;
- break;
- case kRepWord16:
- opcode = kX64Movw;
- break;
- case kRepWord32:
- opcode = kX64Movl;
- break;
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kX64Movq;
- break;
- default:
- UNREACHABLE();
- return;
- }
- InstructionOperand* inputs[4];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- InstructionOperand* value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
- inputs[input_count++] = value_operand;
- Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
X64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedLoadWord64;
+ break;
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -245,7 +282,7 @@
return;
}
}
- InstructionOperand* length_operand =
+ InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
g.UseRegister(offset), g.TempImmediate(0), length_operand);
@@ -253,34 +290,39 @@
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
X64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedStoreWord64;
+ break;
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* value_operand =
+ InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
Int32Matcher mlength(length);
@@ -288,16 +330,16 @@
if (mlength.HasValue() && moffset.right().HasValue() &&
moffset.right().Value() >= 0 &&
mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, nullptr, g.UseRegister(buffer),
+ Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
g.UseRegister(moffset.left().node()),
g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
value_operand);
return;
}
}
- InstructionOperand* length_operand =
+ InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- Emit(opcode, nullptr, g.UseRegister(buffer), g.UseRegister(offset),
+ Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
g.TempImmediate(0), length_operand, value_operand);
}
@@ -309,9 +351,9 @@
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
@@ -323,7 +365,7 @@
// mov rax, [rbp-0x10]
// add rax, [rbp-0x10]
// jo label
- InstructionOperand* const input = g.UseRegister(left);
+ InstructionOperand const input = g.UseRegister(left);
inputs[input_count++] = input;
inputs[input_count++] = input;
} else if (g.CanBeImmediate(right)) {
@@ -348,14 +390,13 @@
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, static_cast<int>(input_count));
- DCHECK_NE(0, static_cast<int>(output_count));
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -368,7 +409,15 @@
void InstructionSelector::VisitWord32And(Node* node) {
- VisitBinop(this, node, kX64And32);
+ X64OperandGenerator g(this);
+ Uint32BinopMatcher m(node);
+ if (m.right().Is(0xff)) {
+ Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
+ } else if (m.right().Is(0xffff)) {
+ Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
+ } else {
+ VisitBinop(this, node, kX64And32);
+ }
}
@@ -460,15 +509,15 @@
Node* displacement) {
X64OperandGenerator g(selector);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
index, scale, base, displacement, inputs, &input_count);
- DCHECK_NE(0, static_cast<int>(input_count));
+ DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
- InstructionOperand* outputs[1];
+ InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(result);
opcode = AddressingModeField::encode(mode) | opcode;
@@ -483,8 +532,8 @@
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
return;
}
VisitWord32Shift(this, node, kX64Shl32);
@@ -549,13 +598,49 @@
}
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Tzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
X64OperandGenerator g(this);
// Try to match the Add to a leal pattern
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
- (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
m.displacement());
return;
@@ -571,6 +656,16 @@
}
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ VisitBinop(this, node, kX64Add, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX64Add, &cont);
+}
+
+
void InstructionSelector::VisitInt32Sub(Node* node) {
X64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -601,6 +696,16 @@
}
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kX64Sub, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX64Sub, &cont);
+}
+
+
namespace {
void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
@@ -638,7 +743,7 @@
void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
X64OperandGenerator g(selector);
- InstructionOperand* temps[] = {g.TempRegister(rdx)};
+ InstructionOperand temps[] = {g.TempRegister(rdx)};
selector->Emit(
opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
@@ -659,8 +764,8 @@
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
return;
}
VisitMul(this, node, kX64Imul32);
@@ -724,7 +829,7 @@
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
X64OperandGenerator g(this);
- Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ Emit(kSSEFloat32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
@@ -752,6 +857,70 @@
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
+}
+
+
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
X64OperandGenerator g(this);
Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -795,9 +964,62 @@
}
+namespace {
+
+void VisitRO(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void VisitRR(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void VisitFloatBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ X64OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.Use(node->InputAt(1));
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
+ }
+}
+
+
+void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ X64OperandGenerator g(selector);
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
+ }
+}
+
+} // namespace
+
+
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat64ToFloat32);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, node, kArchTruncateDoubleToI);
+ case TruncationMode::kRoundToZero:
+ return VisitRO(this, node, kSSEFloat64ToInt32);
+ }
+ UNREACHABLE();
}
@@ -824,96 +1046,200 @@
}
-void InstructionSelector::VisitFloat64Add(Node* node) {
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
X64OperandGenerator g(this);
- if (IsSupported(AVX)) {
- Emit(kAVXFloat64Add, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- } else {
- Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEInt64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kSSEUint64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kSSEUint64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
+}
+
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ X64OperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
+ kSSEFloat32Neg);
+ return;
}
+ VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
+}
+
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
+}
+
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitFloatBinop(this, node, kAVXFloat32Div, kSSEFloat32Div);
+}
+
+
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ VisitFloatBinop(this, node, kAVXFloat32Max, kSSEFloat32Max);
+}
+
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ VisitFloatBinop(this, node, kAVXFloat32Min, kSSEFloat32Min);
+}
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRO(this, node, kSSEFloat32Sqrt);
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
X64OperandGenerator g(this);
- if (IsSupported(AVX)) {
- Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- } else {
- Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ if (m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
+ g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
+ VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
+ kSSEFloat64Neg);
+ return;
}
+ VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
- X64OperandGenerator g(this);
- if (IsSupported(AVX)) {
- Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- } else {
- Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- }
+ VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
}
void InstructionSelector::VisitFloat64Div(Node* node) {
- X64OperandGenerator g(this);
- if (IsSupported(AVX)) {
- Emit(kAVXFloat64Div, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- } else {
- Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
- }
+ VisitFloatBinop(this, node, kAVXFloat64Div, kSSEFloat64Div);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
X64OperandGenerator g(this);
- InstructionOperand* temps[] = {g.TempRegister(rax)};
+ InstructionOperand temps[] = {g.TempRegister(rax)};
Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
temps);
}
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ VisitFloatBinop(this, node, kAVXFloat64Max, kSSEFloat64Max);
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ VisitFloatBinop(this, node, kAVXFloat64Min, kSSEFloat64Min);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
+}
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- X64OperandGenerator g(this);
- Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ VisitRO(this, node, kSSEFloat64Sqrt);
}
-namespace {
-
-void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
- X64OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-} // namespace
-
-
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Floor, node);
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Ceil, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
}
@@ -922,64 +1248,70 @@
}
-void InstructionSelector::VisitCall(Node* node) {
- X64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = NULL;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Push any stack arguments.
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- // TODO(titzer): handle pushing double parameters.
- Emit(kX64Push, NULL,
- g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- InstructionOperand** first_output =
- buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), first_output,
- buffer.instruction_args.size(), &buffer.instruction_args.front());
- call_instr->MarkAsCall();
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
}
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ X64OperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ int slot = static_cast<int>(n);
+ InstructionOperand value = g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
+ : g.UseRegister(input.node());
+ Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
+ }
+ }
+ } else {
+ // Push any stack arguments.
+ for (PushParameter input : base::Reversed(*arguments)) {
+ // TODO(titzer): X64Push cannot handle stack->stack double moves
+ // because there is no way to encode fixed double slots.
+ InstructionOperand value =
+ g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
+ : IsSupported(ATOM) ||
+ sequence()->IsFloat(GetVirtualRegister(input.node()))
+ ? g.UseRegister(input.node())
+ : g.Use(input.node());
+ Emit(kX64Push, g.NoOutput(), value);
+ }
+ }
+}
+
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
+
+
+namespace {
+
// Shared routine for multiple compare operations.
-static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
- FlagsContinuation* cont) {
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
X64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -988,9 +1320,9 @@
// Shared routine for multiple compare operations.
-static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- Node* left, Node* right, FlagsContinuation* cont,
- bool commutative) {
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ Node* left, Node* right, FlagsContinuation* cont,
+ bool commutative) {
X64OperandGenerator g(selector);
if (commutative && g.CanBeBetterLeftOperand(right)) {
std::swap(left, right);
@@ -1000,8 +1332,8 @@
// Shared routine for multiple word compare operations.
-static void VisitWordCompare(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
X64OperandGenerator g(selector);
Node* const left = node->InputAt(0);
Node* const right = node->InputAt(1);
@@ -1019,22 +1351,65 @@
}
+// Shared routine for 64-bit word comparison operations.
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ X64OperandGenerator g(selector);
+ Int64BinopMatcher m(node);
+ if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
+ LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
+ ExternalReference js_stack_limit =
+ ExternalReference::address_of_stack_limit(selector->isolate());
+ if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
+ // Compare(Load(js_stack_limit), LoadStackPointer)
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode = cont->Encode(kX64StackCheck);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()));
+ }
+ return;
+ }
+ }
+ VisitWordCompare(selector, node, kX64Cmp, cont);
+}
+
+
// Shared routine for comparison with zero.
-static void VisitCompareZero(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
+void VisitCompareZero(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
X64OperandGenerator g(selector);
VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
}
-// Shared routine for multiple float64 compare operations.
-static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- VisitCompare(selector, kSSEFloat64Cmp, node->InputAt(0), node->InputAt(1),
- cont, node->op()->HasProperty(Operator::kCommutative));
+// Shared routine for multiple float32 compare operations (inputs commuted).
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Node* const left = node->InputAt(0);
+ Node* const right = node->InputAt(1);
+ InstructionCode const opcode =
+ selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp;
+ VisitCompare(selector, opcode, right, left, cont, false);
}
+// Shared routine for multiple float64 compare operations (inputs commuted).
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Node* const left = node->InputAt(0);
+ Node* const right = node->InputAt(1);
+ InstructionCode const opcode =
+ selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
+ VisitCompare(selector, opcode, right, left, cont, false);
+}
+
+} // namespace
+
+
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
X64OperandGenerator g(this);
@@ -1044,25 +1419,12 @@
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (CanCover(user, value)) {
- if (value->opcode() == IrOpcode::kWord32Equal) {
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- } else if (value->opcode() == IrOpcode::kWord64Equal) {
- Int64BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
+ while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
} else {
break;
}
@@ -1086,39 +1448,69 @@
case IrOpcode::kUint32LessThanOrEqual:
cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(this, value, kX64Cmp32, &cont);
- case IrOpcode::kWord64Equal:
+ case IrOpcode::kWord64Equal: {
cont.OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ // Try to combine the branch with a comparison.
+ Node* const user = m.node();
+ Node* const value = m.left().node();
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt64Sub:
+ return VisitWord64Compare(this, value, &cont);
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, value, kX64Test, &cont);
+ default:
+ break;
+ }
+ }
+ return VisitCompareZero(this, value, kX64Cmp, &cont);
+ }
+ return VisitWord64Compare(this, value, &cont);
+ }
case IrOpcode::kInt64LessThan:
cont.OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kInt64LessThanOrEqual:
cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kUint64LessThan:
cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, &cont);
+ case IrOpcode::kFloat32Equal:
+ cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat32Compare(this, value, &cont);
+ case IrOpcode::kFloat32LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+ return VisitFloat32Compare(this, value, &cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+ return VisitFloat32Compare(this, value, &cont);
case IrOpcode::kFloat64Equal:
cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kFloat64LessThan:
- cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
- Node* node = value->InputAt(0);
- Node* result = node->FindProjection(0);
- if (result == NULL || IsDefined(result)) {
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
@@ -1126,6 +1518,12 @@
case IrOpcode::kInt32SubWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(this, node, kX64Sub32, &cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kX64Add, &cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kX64Sub, &cont);
default:
break;
}
@@ -1135,7 +1533,7 @@
case IrOpcode::kInt32Sub:
return VisitWordCompare(this, value, kX64Cmp32, &cont);
case IrOpcode::kInt64Sub:
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kWord32And:
return VisitWordCompare(this, value, kX64Test32, &cont);
case IrOpcode::kWord64And:
@@ -1150,6 +1548,37 @@
}
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ X64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = g.TempRegister();
+ if (sw.min_value) {
+ // The leal automatically zero extends, so result is a valid 64-bit index.
+ Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
+ } else {
+ // Zero extend, because we use it as 64-bit index into the jump table.
+ Emit(kX64Movl, index_operand, value_operand);
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
Node* user = node;
FlagsContinuation cont(kEqual, node);
@@ -1211,43 +1640,29 @@
void InstructionSelector::VisitWord64Equal(Node* const node) {
- Node* user = node;
FlagsContinuation cont(kEqual, node);
- Int64BinopMatcher m(user);
+ Int64BinopMatcher m(node);
if (m.right().Is(0)) {
- Node* value = m.left().node();
-
- // Try to combine with comparisons against 0 by simply inverting the branch.
- while (CanCover(user, value) && value->opcode() == IrOpcode::kWord64Equal) {
- Int64BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- }
-
- // Try to combine the branch with a comparison.
+ // Try to combine the equality check with a comparison.
+ Node* const user = m.node();
+ Node* const value = m.left().node();
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kWord64And:
return VisitWordCompare(this, value, kX64Test, &cont);
default:
break;
}
}
- return VisitCompareZero(this, value, kX64Cmp, &cont);
}
- VisitWordCompare(this, node, kX64Cmp, &cont);
+ VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
VisitBinop(this, node, kX64Add32, &cont);
}
@@ -1257,7 +1672,7 @@
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kX64Sub32, &cont);
}
@@ -1268,19 +1683,43 @@
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
- VisitWordCompare(this, node, kX64Cmp, &cont);
+ VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont(kSignedLessThanOrEqual, node);
- VisitWordCompare(this, node, kX64Cmp, &cont);
+ VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont(kUnsignedLessThan, node);
- VisitWordCompare(this, node, kX64Cmp, &cont);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
}
@@ -1291,27 +1730,79 @@
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedGreaterThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ X64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Float64Matcher mleft(left);
+ if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
+ Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
+ return;
+ }
+ Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.Use(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ X64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.Use(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- if (CpuFeatures::IsSupported(SSE4_1)) {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
- MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kWord32ShiftIsSafe;
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ flags |= MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt;
}
- return MachineOperatorBuilder::kNoFlags;
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
+ }
+ return flags;
}
} // namespace compiler
diff --git a/src/compiler/x64/linkage-x64.cc b/src/compiler/x64/linkage-x64.cc
deleted file mode 100644
index 0b76cc7..0000000
--- a/src/compiler/x64/linkage-x64.cc
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#ifdef _WIN64
-const bool kWin64 = true;
-#else
-const bool kWin64 = false;
-#endif
-
-struct X64LinkageHelperTraits {
- static Register ReturnValueReg() { return rax; }
- static Register ReturnValue2Reg() { return rdx; }
- static Register JSCallFunctionReg() { return rdi; }
- static Register ContextReg() { return rsi; }
- static Register RuntimeCallFunctionReg() { return rbx; }
- static Register RuntimeCallArgCountReg() { return rax; }
- static RegList CCalleeSaveRegisters() {
- if (kWin64) {
- return rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() |
- r14.bit() | r15.bit();
- } else {
- return rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit();
- }
- }
- static Register CRegisterParameter(int i) {
- if (kWin64) {
- static Register register_parameters[] = {rcx, rdx, r8, r9};
- return register_parameters[i];
- } else {
- static Register register_parameters[] = {rdi, rsi, rdx, rcx, r8, r9};
- return register_parameters[i];
- }
- }
- static int CRegisterParametersLength() { return kWin64 ? 4 : 6; }
-};
-
-typedef LinkageHelper<X64LinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/x87/OWNERS b/src/compiler/x87/OWNERS
new file mode 100644
index 0000000..61245ae
--- /dev/null
+++ b/src/compiler/x87/OWNERS
@@ -0,0 +1,2 @@
+weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
new file mode 100644
index 0000000..a7b7246
--- /dev/null
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -0,0 +1,2138 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/ast/scopes.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/x87/assembler-x87.h"
+#include "src/x87/frames-x87.h"
+#include "src/x87/macro-assembler-x87.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds X87 specific methods for decoding operands.
+class X87OperandConverter : public InstructionOperandConverter {
+ public:
+ X87OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ Operand InputOperand(size_t index, int extra = 0) {
+ return ToOperand(instr_->InputAt(index), extra);
+ }
+
+ Immediate InputImmediate(size_t index) {
+ return ToImmediate(instr_->InputAt(index));
+ }
+
+ Operand OutputOperand() { return ToOperand(instr_->Output()); }
+
+ Operand ToOperand(InstructionOperand* op, int extra = 0) {
+ if (op->IsRegister()) {
+ DCHECK(extra == 0);
+ return Operand(ToRegister(op));
+ }
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
+ return Operand(offset.from_stack_pointer() ? esp : ebp,
+ offset.offset() + extra);
+ }
+
+ Operand ToMaterializableOperand(int materializable_offset) {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ Frame::FPOffsetToSlot(materializable_offset));
+ return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ }
+
+ Operand HighOperand(InstructionOperand* op) {
+ DCHECK(op->IsDoubleStackSlot());
+ return ToOperand(op, kPointerSize);
+ }
+
+ Immediate ToImmediate(InstructionOperand* operand) {
+ Constant constant = ToConstant(operand);
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Immediate(constant.ToInt32());
+ case Constant::kFloat32:
+ return Immediate(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ case Constant::kFloat64:
+ return Immediate(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kExternalReference:
+ return Immediate(constant.ToExternalReference());
+ case Constant::kHeapObject:
+ return Immediate(constant.ToHeapObject());
+ case Constant::kInt64:
+ break;
+ case Constant::kRpoNumber:
+ return Immediate::CodeRelativeOffset(ToLabel(operand));
+ }
+ UNREACHABLE();
+ return Immediate(-1);
+ }
+
+ static size_t NextOffset(size_t* offset) {
+ size_t i = *offset;
+ (*offset)++;
+ return i;
+ }
+
+ static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
+ STATIC_ASSERT(0 == static_cast<int>(times_1));
+ STATIC_ASSERT(1 == static_cast<int>(times_2));
+ STATIC_ASSERT(2 == static_cast<int>(times_4));
+ STATIC_ASSERT(3 == static_cast<int>(times_8));
+ int scale = static_cast<int>(mode - one);
+ DCHECK(scale >= 0 && scale < 4);
+ return static_cast<ScaleFactor>(scale);
+ }
+
+ Operand MemoryOperand(size_t* offset) {
+ AddressingMode mode = AddressingModeField::decode(instr_->opcode());
+ switch (mode) {
+ case kMode_MR: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = 0;
+ return Operand(base, disp);
+ }
+ case kMode_MRI: {
+ Register base = InputRegister(NextOffset(offset));
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, disp);
+ }
+ case kMode_MR1:
+ case kMode_MR2:
+ case kMode_MR4:
+ case kMode_MR8: {
+ Register base = InputRegister(NextOffset(offset));
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1, mode);
+ int32_t disp = 0;
+ return Operand(base, index, scale, disp);
+ }
+ case kMode_MR1I:
+ case kMode_MR2I:
+ case kMode_MR4I:
+ case kMode_MR8I: {
+ Register base = InputRegister(NextOffset(offset));
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(base, index, scale, disp);
+ }
+ case kMode_M1:
+ case kMode_M2:
+ case kMode_M4:
+ case kMode_M8: {
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_M1, mode);
+ int32_t disp = 0;
+ return Operand(index, scale, disp);
+ }
+ case kMode_M1I:
+ case kMode_M2I:
+ case kMode_M4I:
+ case kMode_M8I: {
+ Register index = InputRegister(NextOffset(offset));
+ ScaleFactor scale = ScaleFor(kMode_M1I, mode);
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(index, scale, disp);
+ }
+ case kMode_MI: {
+ int32_t disp = InputInt32(NextOffset(offset));
+ return Operand(Immediate(disp));
+ }
+ case kMode_None:
+ UNREACHABLE();
+ return Operand(no_reg, 0);
+ }
+ UNREACHABLE();
+ return Operand(no_reg, 0);
+ }
+
+ Operand MemoryOperand(size_t first_input = 0) {
+ return MemoryOperand(&first_input);
+ }
+};
+
+
+namespace {
+
+bool HasImmediateInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsImmediate();
+}
+
+
+class OutOfLineLoadInteger final : public OutOfLineCode {
+ public:
+ OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final { __ xor_(result_, result_); }
+
+ private:
+ Register const result_;
+};
+
+
+class OutOfLineLoadFloat final : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat(CodeGenerator* gen, X87Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ DCHECK(result_.code() == 0);
+ USE(result_);
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ push(Immediate(0xffffffff));
+ __ push(Immediate(0x7fffffff));
+ __ fld_d(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ }
+
+ private:
+ X87Register const result_;
+};
+
+
+class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
+ public:
+ OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
+ X87Register input)
+ : OutOfLineCode(gen), result_(result), input_(input) {}
+
+ void Generate() final {
+ UNIMPLEMENTED();
+ USE(result_);
+ USE(input_);
+ }
+
+ private:
+ Register const result_;
+ X87Register const input_;
+};
+
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ operand_(operand),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ lea(scratch1_, operand_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Operand const operand_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+} // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
+ do { \
+ auto result = i.OutputDoubleRegister(); \
+ auto offset = i.InputRegister(0); \
+ DCHECK(result.code() == 0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ fstp(0); \
+ __ asm_instr(i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ DCHECK(i.InputDoubleRegister(2).code() == 0); \
+ __ j(above_equal, &done, Label::kNear); \
+ __ asm_instr(i.MemoryOperand(3)); \
+ __ bind(&done); \
+ } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ __ j(above_equal, &done, Label::kNear); \
+ if (instr->InputAt(2)->IsRegister()) { \
+ __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
+ } else { \
+ __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
+ } \
+ __ bind(&done); \
+ } while (false)
+
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ add(esp, Immediate(sp_slot_delta * kPointerSize));
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ mov(ebp, MemOperand(ebp, 0));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ X87OperandConverter i(this, instr);
+
+ switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchCallCodeObject: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ EnsureSpaceForLazyDeopt();
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(reg);
+ }
+ RecordCallPosition(instr);
+ bool double_result =
+ instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ if (double_result) {
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ }
+ __ fninit();
+ if (double_result) {
+ __ fld_d(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ } else {
+ __ fld1();
+ }
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ jmp(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(reg);
+ }
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
+ __ Assert(equal, kWrongFunctionContext);
+ }
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ RecordCallPosition(instr);
+ bool double_result =
+ instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ if (double_result) {
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ }
+ __ fninit();
+ if (double_result) {
+ __ fld_d(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ } else {
+ __ fld1();
+ }
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
+ __ Assert(equal, kWrongFunctionContext);
+ }
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ // Lazy Bailout entry, need to re-initialize FPU state.
+ __ fninit();
+ __ fld1();
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
+ case kArchCallCFunction: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ bool double_result =
+ instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ if (double_result) {
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ }
+ __ fninit();
+ if (double_result) {
+ __ fld_d(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ } else {
+ __ fld1();
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchNop:
+ case kArchThrowTerminator:
+ // don't emit code for nops.
+ break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ int double_register_param_count = 0;
+ int x87_layout = 0;
+ for (size_t i = 0; i < instr->InputCount(); i++) {
+ if (instr->InputAt(i)->IsDoubleRegister()) {
+ double_register_param_count++;
+ }
+ }
+ // Currently we use only one X87 register. If double_register_param_count
+ // is bigger than 1, it means duplicated double register is added to input
+ // of this instruction.
+ if (double_register_param_count > 0) {
+ x87_layout = (0 << 3) | 1;
+ }
+ // The layout of x87 register stack is loaded on the top of FPU register
+ // stack for deoptimization.
+ __ push(Immediate(x87_layout));
+ __ fild_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kPointerSize));
+
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ break;
+ }
+ case kArchRet:
+ AssembleReturn();
+ break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), ebp);
+ break;
+ case kArchStackPointer:
+ __ mov(i.OutputRegister(), esp);
+ break;
+ case kArchTruncateDoubleToI: {
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fld_d(i.InputOperand(0));
+ }
+ __ TruncateX87TOSToI(i.OutputRegister());
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fstp(0);
+ }
+ break;
+ }
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ Register value = i.InputRegister(index);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
+ scratch0, scratch1, mode);
+ __ mov(operand, value);
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ not_zero, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kX87Add:
+ if (HasImmediateInput(instr, 1)) {
+ __ add(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ add(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87And:
+ if (HasImmediateInput(instr, 1)) {
+ __ and_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ and_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Cmp:
+ if (HasImmediateInput(instr, 1)) {
+ __ cmp(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ cmp(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Test:
+ if (HasImmediateInput(instr, 1)) {
+ __ test(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ test(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Imul:
+ if (HasImmediateInput(instr, 1)) {
+ __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
+ } else {
+ __ imul(i.OutputRegister(), i.InputOperand(1));
+ }
+ break;
+ case kX87ImulHigh:
+ __ imul(i.InputRegister(1));
+ break;
+ case kX87UmulHigh:
+ __ mul(i.InputRegister(1));
+ break;
+ case kX87Idiv:
+ __ cdq();
+ __ idiv(i.InputOperand(1));
+ break;
+ case kX87Udiv:
+ __ Move(edx, Immediate(0));
+ __ div(i.InputOperand(1));
+ break;
+ case kX87Not:
+ __ not_(i.OutputOperand());
+ break;
+ case kX87Neg:
+ __ neg(i.OutputOperand());
+ break;
+ case kX87Or:
+ if (HasImmediateInput(instr, 1)) {
+ __ or_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ or_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Xor:
+ if (HasImmediateInput(instr, 1)) {
+ __ xor_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ xor_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Sub:
+ if (HasImmediateInput(instr, 1)) {
+ __ sub(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ sub(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kX87Shl:
+ if (HasImmediateInput(instr, 1)) {
+ __ shl(i.OutputOperand(), i.InputInt5(1));
+ } else {
+ __ shl_cl(i.OutputOperand());
+ }
+ break;
+ case kX87Shr:
+ if (HasImmediateInput(instr, 1)) {
+ __ shr(i.OutputOperand(), i.InputInt5(1));
+ } else {
+ __ shr_cl(i.OutputOperand());
+ }
+ break;
+ case kX87Sar:
+ if (HasImmediateInput(instr, 1)) {
+ __ sar(i.OutputOperand(), i.InputInt5(1));
+ } else {
+ __ sar_cl(i.OutputOperand());
+ }
+ break;
+ case kX87Ror:
+ if (HasImmediateInput(instr, 1)) {
+ __ ror(i.OutputOperand(), i.InputInt5(1));
+ } else {
+ __ ror_cl(i.OutputOperand());
+ }
+ break;
+ case kX87Lzcnt:
+ __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kX87Popcnt:
+ __ Popcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kX87LoadFloat64Constant: {
+ InstructionOperand* source = instr->InputAt(0);
+ InstructionOperand* destination = instr->Output();
+ DCHECK(source->IsConstant());
+ X87OperandConverter g(this, nullptr);
+ Constant src_constant = g.ToConstant(source);
+
+ DCHECK_EQ(Constant::kFloat64, src_constant.type());
+ uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+ uint32_t lower = static_cast<uint32_t>(src);
+ uint32_t upper = static_cast<uint32_t>(src >> 32);
+ if (destination->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ mov(MemOperand(esp, 0), Immediate(lower));
+ __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case kX87Float32Cmp: {
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ FCmp();
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ break;
+ }
+ case kX87Float32Add: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ faddp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float32Sub: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fsubp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float32Mul: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fmulp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float32Div: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fdivp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float32Max: {
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = below;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+
+ // At least one NaN.
+ // Return the second operands if one of the two operands is NaN
+ __ j(parity_even, &return_right, Label::kNear);
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+
+ __ fadd(1);
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ break;
+ }
+ case kX87Float32Min: {
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = above;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ // At least one NaN.
+ // Return the second operands if one of the two operands is NaN
+ __ j(parity_even, &return_right, Label::kNear);
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ // Push st0 and st1 to stack, then pop them to temp registers and OR them,
+ // load it to left.
+ __ push(eax);
+ __ fld(1);
+ __ fld(1);
+ __ sub(esp, Immediate(2 * kPointerSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fstp_s(MemOperand(esp, kPointerSize));
+ __ pop(eax);
+ __ xor_(MemOperand(esp, 0), eax);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ pop(eax); // restore esp
+ __ pop(eax); // restore esp
+ __ jmp(&return_left, Label::kNear);
+
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ break;
+ }
+ case kX87Float32Sqrt: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ fsqrt();
+ __ lea(esp, Operand(esp, kFloatSize));
+ break;
+ }
+ case kX87Float32Abs: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ fabs();
+ __ lea(esp, Operand(esp, kFloatSize));
+ break;
+ }
+ case kX87Float32Round: {
+ RoundingMode mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ // Set the correct round mode in x87 control register
+ __ X87SetRC((mode << 10));
+
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(i.InputOperand(0));
+ }
+ __ frndint();
+ __ X87SetRC(0x0000);
+ break;
+ }
+ case kX87Float64Add: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ faddp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float64Sub: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fsub_d(MemOperand(esp, 0));
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float64Mul: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fmul_d(MemOperand(esp, 0));
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float64Div: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fdiv_d(MemOperand(esp, 0));
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float64Mod: {
+ FrameScope frame_scope(&masm_, StackFrame::MANUAL);
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ mov(eax, esp);
+ __ PrepareCallCFunction(4, eax);
+ __ fstp(0);
+ __ fld_d(MemOperand(eax, 0));
+ __ fstp_d(Operand(esp, 1 * kDoubleSize));
+ __ fld_d(MemOperand(eax, kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 4);
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ break;
+ }
+ case kX87Float64Max: {
+ Label check_zero, return_left, return_right;
+ Condition condition = below;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fld_d(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &return_right,
+ Label::kNear); // At least one NaN, Return right.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ break;
+ }
+ case kX87Float64Min: {
+ Label check_zero, return_left, return_right;
+ Condition condition = above;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fld_d(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &return_right,
+ Label::kNear); // At least one NaN, return right value.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ break;
+ }
+ case kX87Float64Abs: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fabs();
+ __ lea(esp, Operand(esp, kDoubleSize));
+ break;
+ }
+ case kX87Int32ToFloat64: {
+ InstructionOperand* input = instr->InputAt(0);
+ DCHECK(input->IsRegister() || input->IsStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ if (input->IsRegister()) {
+ Register input_reg = i.InputRegister(0);
+ __ push(input_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(input_reg);
+ } else {
+ __ fild_s(i.InputOperand(0));
+ }
+ break;
+ }
+ case kX87Float32ToFloat64: {
+ InstructionOperand* input = instr->InputAt(0);
+ if (input->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ DCHECK(input->IsDoubleStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(i.InputOperand(0));
+ }
+ break;
+ }
+ case kX87Uint32ToFloat64: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ LoadUint32NoSSE2(i.InputRegister(0));
+ break;
+ }
+ case kX87Float64ToInt32: {
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fld_d(i.InputOperand(0));
+ }
+ __ TruncateX87TOSToI(i.OutputRegister(0));
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fstp(0);
+ }
+ break;
+ }
+ case kX87Float64ToFloat32: {
+ InstructionOperand* input = instr->InputAt(0);
+ if (input->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ DCHECK(input->IsDoubleStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_d(i.InputOperand(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ }
+ break;
+ }
+ case kX87Float64ToUint32: {
+ __ push_imm32(-2147483648);
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fld_d(i.InputOperand(0));
+ }
+ __ fild_s(Operand(esp, 0));
+ __ fadd(1);
+ __ fstp(0);
+ __ TruncateX87TOSToI(i.OutputRegister(0));
+ __ add(esp, Immediate(kInt32Size));
+ __ add(i.OutputRegister(), Immediate(0x80000000));
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fstp(0);
+ }
+ break;
+ }
+ case kX87Float64ExtractHighWord32: {
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
+ }
+ break;
+ }
+ case kX87Float64ExtractLowWord32: {
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ mov(i.OutputRegister(), MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ __ mov(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ }
+ case kX87Float64InsertHighWord32: {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+ __ mov(MemOperand(esp, kDoubleSize / 2), i.InputRegister(1));
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ break;
+ }
+ case kX87Float64InsertLowWord32: {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+ __ mov(MemOperand(esp, 0), i.InputRegister(1));
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ break;
+ }
+ case kX87Float64Sqrt: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fsqrt();
+ __ lea(esp, Operand(esp, kDoubleSize));
+ __ X87SetFPUCW(0x037F);
+ break;
+ }
+ case kX87Float64Round: {
+ RoundingMode mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ // Set the correct round mode in x87 control register
+ __ X87SetRC((mode << 10));
+
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_d(i.InputOperand(0));
+ }
+ __ frndint();
+ __ X87SetRC(0x0000);
+ break;
+ }
+ case kX87Float64Cmp: {
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fld_d(MemOperand(esp, 0));
+ __ FCmp();
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ break;
+ }
+ case kX87Movsxbl:
+ __ movsx_b(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX87Movzxbl:
+ __ movzx_b(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX87Movb: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ mov_b(operand, i.InputInt8(index));
+ } else {
+ __ mov_b(operand, i.InputRegister(index));
+ }
+ break;
+ }
+ case kX87Movsxwl:
+ __ movsx_w(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX87Movzxwl:
+ __ movzx_w(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kX87Movw: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ mov_w(operand, i.InputInt16(index));
+ } else {
+ __ mov_w(operand, i.InputRegister(index));
+ }
+ break;
+ }
+ case kX87Movl:
+ if (instr->HasOutput()) {
+ __ mov(i.OutputRegister(), i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ mov(operand, i.InputImmediate(index));
+ } else {
+ __ mov(operand, i.InputRegister(index));
+ }
+ }
+ break;
+ case kX87Movsd: {
+ if (instr->HasOutput()) {
+ X87Register output = i.OutputDoubleRegister();
+ USE(output);
+ DCHECK(output.code() == 0);
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_d(i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ fst_d(operand);
+ }
+ break;
+ }
+ case kX87Movss: {
+ if (instr->HasOutput()) {
+ X87Register output = i.OutputDoubleRegister();
+ USE(output);
+ DCHECK(output.code() == 0);
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ fst_s(operand);
+ }
+ break;
+ }
+ case kX87BitcastFI: {
+ __ mov(i.OutputRegister(), MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kFloatSize));
+ break;
+ }
+ case kX87BitcastIF: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ if (instr->InputAt(0)->IsRegister()) {
+ __ lea(esp, Operand(esp, -kFloatSize));
+ __ mov(MemOperand(esp, 0), i.InputRegister(0));
+ __ fld_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kFloatSize));
+ } else {
+ __ fld_s(i.InputOperand(0));
+ }
+ break;
+ }
+ case kX87Lea: {
+ AddressingMode mode = AddressingModeField::decode(instr->opcode());
+ // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
+ // and addressing mode just happens to work out. The "addl"/"subl" forms
+ // in these cases are faster based on measurements.
+ if (mode == kMode_MI) {
+ __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
+ } else if (i.InputRegister(0).is(i.OutputRegister())) {
+ if (mode == kMode_MRI) {
+ int32_t constant_summand = i.InputInt32(1);
+ if (constant_summand > 0) {
+ __ add(i.OutputRegister(), Immediate(constant_summand));
+ } else if (constant_summand < 0) {
+ __ sub(i.OutputRegister(), Immediate(-constant_summand));
+ }
+ } else if (mode == kMode_MR1) {
+ if (i.InputRegister(1).is(i.OutputRegister())) {
+ __ shl(i.OutputRegister(), 1);
+ } else {
+ __ lea(i.OutputRegister(), i.MemoryOperand());
+ }
+ } else if (mode == kMode_M2) {
+ __ shl(i.OutputRegister(), 1);
+ } else if (mode == kMode_M4) {
+ __ shl(i.OutputRegister(), 2);
+ } else if (mode == kMode_M8) {
+ __ shl(i.OutputRegister(), 3);
+ } else {
+ __ lea(i.OutputRegister(), i.MemoryOperand());
+ }
+ } else {
+ __ lea(i.OutputRegister(), i.MemoryOperand());
+ }
+ break;
+ }
+ case kX87Push:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
+ if (allocated.representation() == MachineRepresentation::kFloat32) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_s(Operand(esp, 0));
+ } else {
+ DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(Operand(esp, 0));
+ }
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
+ if (allocated.representation() == MachineRepresentation::kFloat32) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fld_s(i.InputOperand(0));
+ __ fstp_s(MemOperand(esp, 0));
+ } else {
+ DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fld_d(i.InputOperand(0));
+ __ fstp_d(MemOperand(esp, 0));
+ }
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else if (HasImmediateInput(instr, 0)) {
+ __ push(i.InputImmediate(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ } else {
+ __ push(i.InputOperand(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
+ break;
+ case kX87Poke: {
+ int const slot = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
+ } else {
+ __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
+ }
+ break;
+ }
+ case kX87PushFloat32:
+ __ lea(esp, Operand(esp, -kFloatSize));
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ fld_s(i.InputOperand(0));
+ __ fstp_s(MemOperand(esp, 0));
+ } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ fst_s(MemOperand(esp, 0));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case kX87PushFloat64:
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ fld_d(i.InputOperand(0));
+ __ fstp_d(MemOperand(esp, 0));
+ } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ fst_d(MemOperand(esp, 0));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(mov);
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(fst_s);
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(fst_d);
+ break;
+ case kX87StackCheck: {
+ ExternalReference const stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ break;
+ }
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord64:
+ UNREACHABLE(); // currently unsupported checked int64 load/store.
+ break;
+ }
+} // NOLINT(readability/fn_size)
+
+
+// Assembles a branch after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ X87OperandConverter i(this, instr);
+ Label::Distance flabel_distance =
+ branch->fallthru ? Label::kNear : Label::kFar;
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ switch (branch->condition) {
+ case kUnorderedEqual:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kEqual:
+ __ j(equal, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ j(not_equal, tlabel);
+ break;
+ case kSignedLessThan:
+ __ j(less, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ j(greater_equal, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ j(less_equal, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ j(greater, tlabel);
+ break;
+ case kUnsignedLessThan:
+ __ j(below, tlabel);
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ __ j(above_equal, tlabel);
+ break;
+ case kUnsignedLessThanOrEqual:
+ __ j(below_equal, tlabel);
+ break;
+ case kUnsignedGreaterThan:
+ __ j(above, tlabel);
+ break;
+ case kOverflow:
+ __ j(overflow, tlabel);
+ break;
+ case kNotOverflow:
+ __ j(no_overflow, tlabel);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ // Add a jump if not falling through to the next block.
+ if (!branch->fallthru) __ jmp(flabel);
+}
+
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ X87OperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ Label check;
+ DCHECK_NE(0u, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = no_condition;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ Move(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kEqual:
+ cc = equal;
+ break;
+ case kUnorderedNotEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kNotEqual:
+ cc = not_equal;
+ break;
+ case kSignedLessThan:
+ cc = less;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = greater_equal;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = less_equal;
+ break;
+ case kSignedGreaterThan:
+ cc = greater;
+ break;
+ case kUnsignedLessThan:
+ cc = below;
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ cc = above_equal;
+ break;
+ case kUnsignedLessThanOrEqual:
+ cc = below_equal;
+ break;
+ case kUnsignedGreaterThan:
+ cc = above;
+ break;
+ case kOverflow:
+ cc = overflow;
+ break;
+ case kNotOverflow:
+ cc = no_overflow;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ bind(&check);
+ if (reg.is_byte_register()) {
+ // setcc for byte registers (al, bl, cl, dl).
+ __ setcc(cc, reg);
+ __ movzx_b(reg, reg);
+ } else {
+ // Emit a branch to set a register to either 1 or 0.
+ Label set;
+ __ j(cc, &set, Label::kNear);
+ __ Move(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&set);
+ __ mov(reg, Immediate(1));
+ }
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ X87OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ cmp(input, Immediate(i.InputInt32(index + 0)));
+ __ j(equal, GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ X87OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (size_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ cmp(input, Immediate(case_count));
+ __ j(above_equal, GetLabel(i.InputRpo(1)));
+ __ jmp(Operand::JumpTable(input, times_4, table));
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, bailout_type);
+ __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+// The calling convention for JSFunctions on X87 passes arguments on the
+// stack and the JSFunction and context in EDI and ESI, respectively, thus
+// the steps of the call look as follows:
+
+// --{ before the call instruction }--------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// --{ push arguments and setup ESI, EDI }--------------------------------------
+// | args + receiver | caller frame |
+// ^ esp ^ ebp
+// [edi = JSFunction, esi = context]
+
+// --{ call [edi + kCodeEntryOffset] }------------------------------------------
+// | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+// | FP | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+// | FP | RET | args + receiver | caller frame |
+// ^ ebp,esp
+
+// --{ push esi }---------------------------------------------------------------
+// | CTX | FP | RET | args + receiver | caller frame |
+// ^esp ^ ebp
+
+// --{ push edi }---------------------------------------------------------------
+// | FNC | CTX | FP | RET | args + receiver | caller frame |
+// ^esp ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
+// ^esp ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ mov esp, ebp }-----------------------------------------------------------
+// | FP | RET | args + receiver | caller frame |
+// ^ esp,ebp
+
+// --{ pop ebp }-----------------------------------------------------------
+// | | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// --{ ret #A+1 }-----------------------------------------------------------
+// | | caller frame |
+// ^ esp ^ ebp
+
+
+// Runtime function calls are accomplished by doing a stub call to the
+// CEntryStub (a real code object). On X87 passes arguments on the
+// stack, the number of arguments in EAX, the address of the runtime function
+// in EBX, and the context in ESI.
+
+// --{ before the call instruction }--------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
+// | args + receiver | caller frame |
+// ^ esp ^ ebp
+// [eax = #args, ebx = runtime function, esi = context]
+
+// --{ call #CEntryStub }-------------------------------------------------------
+// | RET | args + receiver | caller frame |
+// ^ esp ^ ebp
+
+// =={ body of runtime function }===============================================
+
+// --{ runtime returns }--------------------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// Other custom linkages (e.g. for calling directly into and out of C++) may
+// need to save callee-saved registers on the stack, which is done in the
+// function prologue of generated code.
+
+// --{ before the call instruction }--------------------------------------------
+// | caller frame |
+// ^ esp ^ ebp
+
+// --{ set up arguments in registers on stack }---------------------------------
+// | args | caller frame |
+// ^ esp ^ ebp
+// [r0 = arg0, r1 = arg1, ...]
+
+// --{ call code }--------------------------------------------------------------
+// | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+// | FP | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+// | FP | RET | args | caller frame |
+// ^ ebp,esp
+
+// --{ save registers }---------------------------------------------------------
+// | regs | FP | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+// | callee frame | regs | FP | RET | args | caller frame |
+// ^esp ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ restore registers }------------------------------------------------------
+// | regs | FP | RET | args | caller frame |
+// ^ esp ^ ebp
+
+// --{ mov esp, ebp }-----------------------------------------------------------
+// | FP | RET | args | caller frame |
+// ^ esp,ebp
+
+// --{ pop ebp }----------------------------------------------------------------
+// | RET | args | caller frame |
+// ^ esp ^ ebp
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->IsCFunctionCall()) {
+ // Assemble a prologue similar the to cdecl calling convention.
+ __ push(ebp);
+ __ mov(ebp, esp);
+ } else if (descriptor->IsJSFunctionCall()) {
+ // TODO(turbofan): this prologue is redundant with OSR, but needed for
+ // code aging.
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
+ __ StubPrologue();
+ } else {
+ frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (stack_shrink_slots > 0) {
+ __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
+ }
+
+ if (saves != 0) { // Save callee-saved registers.
+ DCHECK(!info()->is_osr());
+ int pushed = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ __ push(Register::from_code(i));
+ ++pushed;
+ }
+ frame()->AllocateSavedCalleeRegisterSlots(pushed);
+ }
+
+ // Initailize FPU state.
+ __ fninit();
+ __ fld1();
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+
+ // Clear the FPU stack only if there is no return value in the stack.
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ bool clear_stack = true;
+ for (int i = 0; i < descriptor->ReturnCount(); i++) {
+ MachineRepresentation rep = descriptor->GetReturnType(i).representation();
+ LinkageLocation loc = descriptor->GetReturnLocation(i);
+ if (IsFloatingPoint(rep) && loc == LinkageLocation::ForRegister(0)) {
+ clear_stack = false;
+ break;
+ }
+ }
+ if (clear_stack) __ fstp(0);
+
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ // Restore registers.
+ if (saves != 0) {
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ if (!((1 << i) & saves)) continue;
+ __ pop(Register::from_code(i));
+ }
+ }
+
+ if (descriptor->IsCFunctionCall()) {
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
+ } else if (frame()->needs_frame()) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
+ }
+ }
+ if (pop_count == 0) {
+ __ ret(0);
+ } else {
+ __ Ret(pop_count * kPointerSize, ebx);
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ X87OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ Operand dst = g.ToOperand(destination);
+ __ mov(dst, src);
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Operand src = g.ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mov(dst, src);
+ } else {
+ Operand dst = g.ToOperand(destination);
+ __ push(src);
+ __ pop(dst);
+ }
+ } else if (source->IsConstant()) {
+ Constant src_constant = g.ToConstant(source);
+ if (src_constant.type() == Constant::kHeapObject) {
+ Handle<HeapObject> src = src_constant.ToHeapObject();
+ int offset;
+ if (IsMaterializableFromFrame(src, &offset)) {
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mov(dst, g.ToMaterializableOperand(offset));
+ } else {
+ DCHECK(destination->IsStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ push(g.ToMaterializableOperand(offset));
+ __ pop(dst);
+ }
+ } else if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ LoadHeapObject(dst, src);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ Operand dst = g.ToOperand(destination);
+ AllowDeferredHandleDereference embedding_raw_address;
+ if (isolate()->heap()->InNewSpace(*src)) {
+ __ PushHeapObject(src);
+ __ pop(dst);
+ } else {
+ __ mov(dst, src);
+ }
+ }
+ } else if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(dst, g.ToImmediate(source));
+ } else if (destination->IsStackSlot()) {
+ Operand dst = g.ToOperand(destination);
+ __ Move(dst, g.ToImmediate(source));
+ } else if (src_constant.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
+ if (destination->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kInt32Size));
+ __ mov(MemOperand(esp, 0), Immediate(src));
+ // always only push one value into the x87 stack.
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ add(esp, Immediate(kInt32Size));
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ Move(dst, Immediate(src));
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src_constant.type());
+ uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+ uint32_t lower = static_cast<uint32_t>(src);
+ uint32_t upper = static_cast<uint32_t>(src >> 32);
+ if (destination->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ mov(MemOperand(esp, 0), Immediate(lower));
+ __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
+ // always only push one value into the x87 stack.
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst0 = g.ToOperand(destination);
+ Operand dst1 = g.HighOperand(destination);
+ __ Move(dst0, Immediate(lower));
+ __ Move(dst1, Immediate(upper));
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ auto allocated = AllocatedOperand::cast(*source);
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
+ __ fst_s(dst);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ fst_d(dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ Operand src = g.ToOperand(source);
+ auto allocated = AllocatedOperand::cast(*source);
+ if (destination->IsDoubleRegister()) {
+ // always only push one value into the x87 stack.
+ __ fstp(0);
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
+ __ fld_s(src);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ fld_d(src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ Operand dst = g.ToOperand(destination);
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
+ __ fld_s(src);
+ __ fstp_s(dst);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ fld_d(src);
+ __ fstp_d(dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ X87OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Register-register.
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ xchg(dst, src);
+ } else if (source->IsRegister() && destination->IsStackSlot()) {
+ // Register-memory.
+ __ xchg(g.ToRegister(source), g.ToOperand(destination));
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+ // Memory-memory.
+ Operand dst1 = g.ToOperand(destination);
+ __ push(dst1);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand src1 = g.ToOperand(source);
+ __ push(src1);
+ Operand dst2 = g.ToOperand(destination);
+ __ pop(dst2);
+ frame_access_state()->IncreaseSPDelta(-1);
+ Operand src2 = g.ToOperand(source);
+ __ pop(src2);
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ UNREACHABLE();
+ } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+ auto allocated = AllocatedOperand::cast(*source);
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
+ __ fld_s(g.ToOperand(destination));
+ __ fxch();
+ __ fstp_s(g.ToOperand(destination));
+ break;
+ case MachineRepresentation::kFloat64:
+ __ fld_d(g.ToOperand(destination));
+ __ fxch();
+ __ fstp_d(g.ToOperand(destination));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ auto allocated = AllocatedOperand::cast(*source);
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
+ __ fld_s(g.ToOperand(source));
+ __ fld_s(g.ToOperand(destination));
+ __ fstp_s(g.ToOperand(source));
+ __ fstp_s(g.ToOperand(destination));
+ break;
+ case MachineRepresentation::kFloat64:
+ __ fld_d(g.ToOperand(source));
+ __ fld_d(g.ToOperand(destination));
+ __ fstp_d(g.ToOperand(source));
+ __ fstp_d(g.ToOperand(destination));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ dd(targets[index]);
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
+ int space_needed = Deoptimizer::patch_size();
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/x87/instruction-codes-x87.h b/src/compiler/x87/instruction-codes-x87.h
new file mode 100644
index 0000000..b498d9c
--- /dev/null
+++ b/src/compiler/x87/instruction-codes-x87.h
@@ -0,0 +1,125 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
+#define V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-codes.h"
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// X87-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(X87Add) \
+ V(X87And) \
+ V(X87Cmp) \
+ V(X87Test) \
+ V(X87Or) \
+ V(X87Xor) \
+ V(X87Sub) \
+ V(X87Imul) \
+ V(X87ImulHigh) \
+ V(X87UmulHigh) \
+ V(X87Idiv) \
+ V(X87Udiv) \
+ V(X87Not) \
+ V(X87Neg) \
+ V(X87Shl) \
+ V(X87Shr) \
+ V(X87Sar) \
+ V(X87Ror) \
+ V(X87Lzcnt) \
+ V(X87Popcnt) \
+ V(X87Float32Cmp) \
+ V(X87Float32Add) \
+ V(X87Float32Sub) \
+ V(X87Float32Mul) \
+ V(X87Float32Div) \
+ V(X87Float32Max) \
+ V(X87Float32Min) \
+ V(X87Float32Abs) \
+ V(X87Float32Sqrt) \
+ V(X87Float32Round) \
+ V(X87LoadFloat64Constant) \
+ V(X87Float64Add) \
+ V(X87Float64Sub) \
+ V(X87Float64Mul) \
+ V(X87Float64Div) \
+ V(X87Float64Mod) \
+ V(X87Float64Max) \
+ V(X87Float64Min) \
+ V(X87Float64Abs) \
+ V(X87Int32ToFloat64) \
+ V(X87Float32ToFloat64) \
+ V(X87Uint32ToFloat64) \
+ V(X87Float64ToInt32) \
+ V(X87Float64ToFloat32) \
+ V(X87Float64ToUint32) \
+ V(X87Float64ExtractHighWord32) \
+ V(X87Float64ExtractLowWord32) \
+ V(X87Float64InsertHighWord32) \
+ V(X87Float64InsertLowWord32) \
+ V(X87Float64Sqrt) \
+ V(X87Float64Round) \
+ V(X87Float64Cmp) \
+ V(X87Movsxbl) \
+ V(X87Movzxbl) \
+ V(X87Movb) \
+ V(X87Movsxwl) \
+ V(X87Movzxwl) \
+ V(X87Movw) \
+ V(X87Movl) \
+ V(X87Movss) \
+ V(X87Movsd) \
+ V(X87Lea) \
+ V(X87BitcastFI) \
+ V(X87BitcastIF) \
+ V(X87Push) \
+ V(X87PushFloat64) \
+ V(X87PushFloat32) \
+ V(X87Poke) \
+ V(X87StackCheck)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// M = memory operand
+// R = base register
+// N = index register * N for N in {1, 2, 4, 8}
+// I = immediate displacement (int32_t)
+
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MR) /* [%r1 ] */ \
+ V(MRI) /* [%r1 + K] */ \
+ V(MR1) /* [%r1 + %r2*1 ] */ \
+ V(MR2) /* [%r1 + %r2*2 ] */ \
+ V(MR4) /* [%r1 + %r2*4 ] */ \
+ V(MR8) /* [%r1 + %r2*8 ] */ \
+ V(MR1I) /* [%r1 + %r2*1 + K] */ \
+ V(MR2I) /* [%r1 + %r2*2 + K] */ \
+ V(MR4I) /* [%r1 + %r2*3 + K] */ \
+ V(MR8I) /* [%r1 + %r2*4 + K] */ \
+ V(M1) /* [ %r2*1 ] */ \
+ V(M2) /* [ %r2*2 ] */ \
+ V(M4) /* [ %r2*4 ] */ \
+ V(M8) /* [ %r2*8 ] */ \
+ V(M1I) /* [ %r2*1 + K] */ \
+ V(M2I) /* [ %r2*2 + K] */ \
+ V(M4I) /* [ %r2*4 + K] */ \
+ V(M8I) /* [ %r2*8 + K] */ \
+ V(MI) /* [ K] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
diff --git a/src/compiler/x87/instruction-scheduler-x87.cc b/src/compiler/x87/instruction-scheduler-x87.cc
new file mode 100644
index 0000000..af86a87
--- /dev/null
+++ b/src/compiler/x87/instruction-scheduler-x87.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNIMPLEMENTED();
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
new file mode 100644
index 0000000..cff4aaf
--- /dev/null
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -0,0 +1,1345 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/adapters.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds X87-specific methods for generating operands.
+class X87OperandGenerator final : public OperandGenerator {
+ public:
+ explicit X87OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseByteRegister(Node* node) {
+ // TODO(titzer): encode byte register use constraints.
+ return UseFixed(node, edx);
+ }
+
+ InstructionOperand DefineAsByteRegister(Node* node) {
+ // TODO(titzer): encode byte register def constraints.
+ return DefineAsRegister(node);
+ }
+
+ InstructionOperand CreateImmediate(int imm) {
+ return sequence()->AddImmediate(Constant(imm));
+ }
+
+ bool CanBeImmediate(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kExternalConstant:
+ return true;
+ case IrOpcode::kHeapConstant: {
+ // Constants in new space cannot be used as immediates in V8 because
+ // the GC does not scan code objects when collecting the new generation.
+ Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
+ Isolate* isolate = value->GetIsolate();
+ return !isolate->heap()->InNewSpace(*value);
+ }
+ default:
+ return false;
+ }
+ }
+
+ AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
+ Node* displacement_node,
+ InstructionOperand inputs[],
+ size_t* input_count) {
+ AddressingMode mode = kMode_MRI;
+ int32_t displacement = (displacement_node == nullptr)
+ ? 0
+ : OpParameter<int32_t>(displacement_node);
+ if (base != nullptr) {
+ if (base->opcode() == IrOpcode::kInt32Constant) {
+ displacement += OpParameter<int32_t>(base);
+ base = nullptr;
+ }
+ }
+ if (base != nullptr) {
+ inputs[(*input_count)++] = UseRegister(base);
+ if (index != nullptr) {
+ DCHECK(scale >= 0 && scale <= 3);
+ inputs[(*input_count)++] = UseRegister(index);
+ if (displacement != 0) {
+ inputs[(*input_count)++] = TempImmediate(displacement);
+ static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
+ kMode_MR4I, kMode_MR8I};
+ mode = kMRnI_modes[scale];
+ } else {
+ static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
+ kMode_MR4, kMode_MR8};
+ mode = kMRn_modes[scale];
+ }
+ } else {
+ if (displacement == 0) {
+ mode = kMode_MR;
+ } else {
+ inputs[(*input_count)++] = TempImmediate(displacement);
+ mode = kMode_MRI;
+ }
+ }
+ } else {
+ DCHECK(scale >= 0 && scale <= 3);
+ if (index != nullptr) {
+ inputs[(*input_count)++] = UseRegister(index);
+ if (displacement != 0) {
+ inputs[(*input_count)++] = TempImmediate(displacement);
+ static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+ kMode_M4I, kMode_M8I};
+ mode = kMnI_modes[scale];
+ } else {
+ static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
+ kMode_M4, kMode_M8};
+ mode = kMn_modes[scale];
+ }
+ } else {
+ inputs[(*input_count)++] = TempImmediate(displacement);
+ return kMode_MI;
+ }
+ }
+ return mode;
+ }
+
+ AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
+ InstructionOperand inputs[],
+ size_t* input_count) {
+ BaseWithIndexAndDisplacement32Matcher m(node, true);
+ DCHECK(m.matches());
+ if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
+ return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
+ m.displacement(), inputs, input_count);
+ } else {
+ inputs[(*input_count)++] = UseRegister(node->InputAt(0));
+ inputs[(*input_count)++] = UseRegister(node->InputAt(1));
+ return kMode_MR1;
+ }
+ }
+
+ bool CanBeBetterLeftOperand(Node* node) const {
+ return !selector()->IsLive(node);
+ }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kX87Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kX87Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kX87Movsxbl : kX87Movzxbl;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kX87Movsxwl : kX87Movzxwl;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kX87Movl;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ X87OperandGenerator g(this);
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ X87OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kX87Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kX87Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kX87Movb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kX87Movw;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kX87Movl;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ InstructionOperand val;
+ if (g.CanBeImmediate(value)) {
+ val = g.UseImmediate(value);
+ } else if (rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code =
+ opcode | AddressingModeField::encode(addressing_mode);
+ inputs[input_count++] = val;
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+ inputs);
+ }
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
+ X87OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+ case MachineRepresentation::kFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand =
+ g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+ if (g.CanBeImmediate(buffer)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ offset_operand, g.UseImmediate(buffer));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MR1),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ g.UseRegister(buffer), offset_operand);
+ }
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
+ X87OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+ case MachineRepresentation::kFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand value_operand =
+ g.CanBeImmediate(value) ? g.UseImmediate(value)
+ : ((rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit)
+ ? g.UseByteRegister(value)
+ : g.UseRegister(value));
+ InstructionOperand offset_operand = g.UseRegister(offset);
+ InstructionOperand length_operand =
+ g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+ if (g.CanBeImmediate(buffer)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ offset_operand, length_operand, value_operand, offset_operand,
+ g.UseImmediate(buffer));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
+ offset_operand, length_operand, value_operand, g.UseRegister(buffer),
+ offset_operand);
+ }
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+
+ // TODO(turbofan): match complex addressing modes.
+ if (left == right) {
+ // If both inputs refer to the same operand, enforce allocating a register
+ // for both of them to ensure that we don't end up generating code like
+ // this:
+ //
+ // mov eax, [ebp-0x10]
+ // add eax, [ebp-0x10]
+ // jo label
+ InstructionOperand const input = g.UseRegister(left);
+ inputs[input_count++] = input;
+ inputs[input_count++] = input;
+ } else if (g.CanBeImmediate(right)) {
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.UseImmediate(right);
+ } else {
+ if (node->op()->HasProperty(Operator::kCommutative) &&
+ g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.Use(right);
+ }
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kX87And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kX87Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ X87OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kX87Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop(this, node, kX87Xor);
+ }
+}
+
+
+// Shared routine for multiple shift operations.
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X87OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ if (g.CanBeImmediate(right)) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseImmediate(right));
+ } else {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseFixed(right, ecx));
+ }
+}
+
+
+namespace {
+
+void VisitMulHigh(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ X87OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsFixed(node, edx),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+
+void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+ X87OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempRegister(edx)};
+ selector->Emit(opcode, g.DefineAsFixed(node, eax),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+ X87OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsFixed(node, edx),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUnique(node->InputAt(1)));
+}
+
+void EmitLea(InstructionSelector* selector, Node* result, Node* index,
+ int scale, Node* base, Node* displacement) {
+ X87OperandGenerator g(selector);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode mode = g.GenerateMemoryOperandInputs(
+ index, scale, base, displacement, inputs, &input_count);
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(result);
+
+ InstructionCode opcode = AddressingModeField::encode(mode) | kX87Lea;
+
+ selector->Emit(opcode, 1, outputs, input_count, inputs);
+}
+
+} // namespace
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32ScaleMatcher m(node, true);
+ if (m.matches()) {
+ Node* index = node->InputAt(0);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
+ return;
+ }
+ VisitShift(this, node, kX87Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitShift(this, node, kX87Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitShift(this, node, kX87Sar);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitShift(this, node, kX87Ror);
+}
+
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ X87OperandGenerator g(this);
+
+ // Try to match the Add to a lea pattern
+ BaseWithIndexAndDisplacement32Matcher m(node);
+ if (m.matches() &&
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode mode = g.GenerateMemoryOperandInputs(
+ m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+
+ InstructionCode opcode = AddressingModeField::encode(mode) | kX87Lea;
+ Emit(opcode, 1, outputs, input_count, inputs);
+ return;
+ }
+
+ // No lea pattern match, use add
+ VisitBinop(this, node, kX87Add);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ X87OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kX87Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+ } else {
+ VisitBinop(this, node, kX87Sub);
+ }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ Int32ScaleMatcher m(node, true);
+ if (m.matches()) {
+ Node* index = node->InputAt(0);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
+ return;
+ }
+ X87OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (g.CanBeImmediate(right)) {
+ Emit(kX87Imul, g.DefineAsRegister(node), g.Use(left),
+ g.UseImmediate(right));
+ } else {
+ if (g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ Emit(kX87Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.Use(right));
+ }
+}
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ VisitMulHigh(this, node, kX87ImulHigh);
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ VisitMulHigh(this, node, kX87UmulHigh);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitDiv(this, node, kX87Idiv);
+}
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ VisitDiv(this, node, kX87Udiv);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitMod(this, node, kX87Idiv);
+}
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ VisitMod(this, node, kX87Udiv);
+}
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32ToFloat64, g.DefineAsFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Int32ToFloat64, g.DefineAsFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Uint32ToFloat64, g.DefineAsFixed(node, stX_0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ToFloat32, g.DefineAsFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ X87OperandGenerator g(this);
+
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+ return;
+ case TruncationMode::kRoundToZero:
+ Emit(kX87Float64ToInt32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87BitcastFI, g.DefineAsRegister(node), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87BitcastIF, g.DefineAsFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ X87OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister(eax)};
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Mod, g.DefineAsFixed(node, stX_0), 1, temps)->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundDown),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64Round | MiscField::encode(kRoundDown),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundUp), g.UseFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64Round | MiscField::encode(kRoundUp), g.UseFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundToZero),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64Round | MiscField::encode(kRoundToZero),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundToNearest),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64Round | MiscField::encode(kRoundToNearest),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ X87OperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ InstructionOperand temps[] = {g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr, temp_count, temps);
+
+ // Poke any stack arguments.
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ int const slot = static_cast<int>(n);
+ InstructionOperand value = g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
+ : g.UseRegister(input.node());
+ Emit(kX87Poke | MiscField::encode(slot), g.NoOutput(), value);
+ }
+ }
+ } else {
+ // Push any stack arguments.
+ for (PushParameter input : base::Reversed(*arguments)) {
+ // TODO(titzer): handle pushing double parameters.
+ if (input.node() == nullptr) continue;
+ InstructionOperand value =
+ g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
+ : IsSupported(ATOM) ||
+ sequence()->IsFloat(GetVirtualRegister(input.node()))
+ ? g.UseRegister(input.node())
+ : g.Use(input.node());
+ Emit(kX87Push, g.NoOutput(), value);
+ }
+ }
+}
+
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
+
+
+namespace {
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
+ left, right);
+ }
+}
+
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ Node* left, Node* right, FlagsContinuation* cont,
+ bool commutative) {
+ X87OperandGenerator g(selector);
+ if (commutative && g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+}
+
+
+// Shared routine for multiple float32 compare operations (inputs commuted).
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ selector->Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ selector->Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(cont->Encode(kX87Float32Cmp),
+ g.DefineAsByteRegister(cont->result()));
+ }
+}
+
+
+// Shared routine for multiple float64 compare operations (inputs commuted).
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ selector->Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ selector->Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(cont->Encode(kX87Float64Cmp),
+ g.DefineAsByteRegister(cont->result()));
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ Node* const left = node->InputAt(0);
+ Node* const right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right)) {
+ VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+ } else if (g.CanBeImmediate(left)) {
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, left, right, cont,
+ node->op()->HasProperty(Operator::kCommutative));
+ }
+}
+
+
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ X87OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
+ LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
+ ExternalReference js_stack_limit =
+ ExternalReference::address_of_stack_limit(selector->isolate());
+ if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
+ // Compare(Load(js_stack_limit), LoadStackPointer)
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode = cont->Encode(kX87StackCheck);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()));
+ }
+ return;
+ }
+ }
+ VisitWordCompare(selector, node, kX87Cmp, cont);
+}
+
+
+// Shared routine for word comparison with zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ // Try to combine the branch with a comparison.
+ while (selector->CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal: {
+ // Try to combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWordCompare(selector, value, cont);
+ }
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either nullptr, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || selector->IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX87Add, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX87Sub, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(selector, value, kX87Test, cont);
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Continuation could not be combined with a compare, emit compare against 0.
+ X87OperandGenerator g(selector);
+ VisitCompare(selector, kX87Cmp, g.Use(value), g.TempImmediate(0), cont);
+}
+
+} // namespace
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+ VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ X87OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 4 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kX87Lea | AddressingModeField::encode(kMode_MRI), index_operand,
+ value_operand, g.TempImmediate(-sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ }
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kX87Add, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX87Add, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kX87Sub, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX87Sub, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ExtractLowWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ExtractHighWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ X87OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kX87Float64InsertLowWord32, g.UseFixed(node, stX_0), g.UseRegister(left),
+ g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ X87OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kX87Float64InsertHighWord32, g.UseFixed(node, stX_0),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kWord32ShiftIsSafe;
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ flags |= MachineOperatorBuilder::kWord32Popcnt;
+ }
+
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
+ return flags;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/zone-pool.cc b/src/compiler/zone-pool.cc
index 179988d..2006a79 100644
--- a/src/compiler/zone-pool.cc
+++ b/src/compiler/zone-pool.cc
@@ -65,8 +65,7 @@
}
-ZonePool::ZonePool(Isolate* isolate)
- : isolate_(isolate), max_allocated_bytes_(0), total_deleted_bytes_(0) {}
+ZonePool::ZonePool() : max_allocated_bytes_(0), total_deleted_bytes_(0) {}
ZonePool::~ZonePool() {
@@ -104,10 +103,10 @@
zone = unused_.back();
unused_.pop_back();
} else {
- zone = new Zone(isolate_);
+ zone = new Zone();
}
used_.push_back(zone);
- DCHECK_EQ(0, zone->allocation_size());
+ DCHECK_EQ(0u, zone->allocation_size());
return zone;
}
@@ -130,7 +129,7 @@
delete zone;
} else {
zone->DeleteAll();
- DCHECK_EQ(0, zone->allocation_size());
+ DCHECK_EQ(0u, zone->allocation_size());
unused_.push_back(zone);
}
}
diff --git a/src/compiler/zone-pool.h b/src/compiler/zone-pool.h
index 8b43265..aaf9daa 100644
--- a/src/compiler/zone-pool.h
+++ b/src/compiler/zone-pool.h
@@ -9,26 +9,27 @@
#include <set>
#include <vector>
-#include "src/v8.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
-class ZonePool FINAL {
+class ZonePool final {
public:
- class Scope FINAL {
+ class Scope final {
public:
- explicit Scope(ZonePool* zone_pool) : zone_pool_(zone_pool), zone_(NULL) {}
+ explicit Scope(ZonePool* zone_pool)
+ : zone_pool_(zone_pool), zone_(nullptr) {}
~Scope() { Destroy(); }
Zone* zone() {
- if (zone_ == NULL) zone_ = zone_pool_->NewEmptyZone();
+ if (zone_ == nullptr) zone_ = zone_pool_->NewEmptyZone();
return zone_;
}
void Destroy() {
- if (zone_ != NULL) zone_pool_->ReturnZone(zone_);
- zone_ = NULL;
+ if (zone_ != nullptr) zone_pool_->ReturnZone(zone_);
+ zone_ = nullptr;
}
private:
@@ -37,7 +38,7 @@
DISALLOW_COPY_AND_ASSIGN(Scope);
};
- class StatsScope FINAL {
+ class StatsScope final {
public:
explicit StatsScope(ZonePool* zone_pool);
~StatsScope();
@@ -60,7 +61,7 @@
DISALLOW_COPY_AND_ASSIGN(StatsScope);
};
- explicit ZonePool(Isolate* isolate);
+ ZonePool();
~ZonePool();
size_t GetMaxAllocatedBytes();
@@ -76,7 +77,6 @@
typedef std::vector<Zone*> Used;
typedef std::vector<StatsScope*> Stats;
- Isolate* const isolate_;
Unused unused_;
Used used_;
Stats stats_;