6711100: 64bit fastdebug server vm crashes with assert(_base == Int,"Not an Int")
Insert CastII nodes to narrow type of load_array_length() node
Reviewed-by: never, kvn
diff --git a/hotspot/src/share/vm/opto/callnode.cpp b/hotspot/src/share/vm/opto/callnode.cpp
index 2e54ce7..8e8fc24 100644
--- a/hotspot/src/share/vm/opto/callnode.cpp
+++ b/hotspot/src/share/vm/opto/callnode.cpp
@@ -1034,6 +1034,39 @@
//=============================================================================
uint AllocateArrayNode::size_of() const { return sizeof(*this); }
+// Retrieve the length from the AllocateArrayNode. Narrow the type with a
+// CastII, if appropriate. If we are not allowed to create new nodes, and
+// a CastII is appropriate, return NULL.
+Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
+ Node *length = in(AllocateNode::ALength);
+ assert(length != NULL, "length is not null");
+
+ const TypeInt* length_type = phase->find_int_type(length);
+ const TypeAryPtr* ary_type = oop_type->isa_aryptr();
+
+ if (ary_type != NULL && length_type != NULL) {
+ const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
+ if (narrow_length_type != length_type) {
+ // Assert one of:
+ // - the narrow_length is 0
+ // - the narrow_length is not wider than length
+ assert(narrow_length_type == TypeInt::ZERO ||
+ (narrow_length_type->_hi <= length_type->_hi &&
+ narrow_length_type->_lo >= length_type->_lo),
+ "narrow type must be narrower than length type");
+
+ // Return NULL if new nodes are not allowed
+ if (!allow_new_nodes) return NULL;
+ // Create a cast which is control dependent on the initialization to
+ // propagate the fact that the array length must be positive.
+ length = new (phase->C, 2) CastIINode(length, narrow_length_type);
+ length->set_req(0, initialization()->proj_out(0));
+ }
+ }
+
+ return length;
+}
+
//=============================================================================
uint LockNode::size_of() const { return sizeof(*this); }
diff --git a/hotspot/src/share/vm/opto/callnode.hpp b/hotspot/src/share/vm/opto/callnode.hpp
index 0f9b26e..20192dd 100644
--- a/hotspot/src/share/vm/opto/callnode.hpp
+++ b/hotspot/src/share/vm/opto/callnode.hpp
@@ -755,6 +755,15 @@
virtual int Opcode() const;
virtual uint size_of() const; // Size is bigger
+ // Dig the length operand out of a array allocation site.
+ Node* Ideal_length() {
+ return in(AllocateNode::ALength);
+ }
+
+ // Dig the length operand out of a array allocation site and narrow the
+ // type with a CastII, if necesssary
+ Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
+
// Pattern-match a possible usage of AllocateArrayNode.
// Return null if no allocation is recognized.
static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
@@ -762,12 +771,6 @@
return (allo == NULL || !allo->is_AllocateArray())
? NULL : allo->as_AllocateArray();
}
-
- // Dig the length operand out of a (possible) array allocation site.
- static Node* Ideal_length(Node* ptr, PhaseTransform* phase) {
- AllocateArrayNode* allo = Ideal_array_allocation(ptr, phase);
- return (allo == NULL) ? NULL : allo->in(AllocateNode::ALength);
- }
};
//------------------------------AbstractLockNode-----------------------------------
diff --git a/hotspot/src/share/vm/opto/graphKit.cpp b/hotspot/src/share/vm/opto/graphKit.cpp
index 2fcb90f..78c7d97 100644
--- a/hotspot/src/share/vm/opto/graphKit.cpp
+++ b/hotspot/src/share/vm/opto/graphKit.cpp
@@ -1049,10 +1049,19 @@
//-------------------------load_array_length-----------------------------------
Node* GraphKit::load_array_length(Node* array) {
// Special-case a fresh allocation to avoid building nodes:
- Node* alen = AllocateArrayNode::Ideal_length(array, &_gvn);
- if (alen != NULL) return alen;
- Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
- return _gvn.transform( new (C, 3) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
+ AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
+ Node *alen;
+ if (alloc == NULL) {
+ Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
+ alen = _gvn.transform( new (C, 3) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
+ } else {
+ alen = alloc->Ideal_length();
+ Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_aryptr(), &_gvn);
+ if (ccast != alen) {
+ alen = _gvn.transform(ccast);
+ }
+ }
+ return alen;
}
//------------------------------do_null_check----------------------------------
@@ -2833,20 +2842,18 @@
assert(just_allocated_object(control()) == javaoop, "just allocated");
#ifdef ASSERT
- { // Verify that the AllocateNode::Ideal_foo recognizers work:
- Node* kn = alloc->in(AllocateNode::KlassNode);
- Node* ln = alloc->in(AllocateNode::ALength);
- assert(AllocateNode::Ideal_klass(rawoop, &_gvn) == kn,
- "Ideal_klass works");
- assert(AllocateNode::Ideal_klass(javaoop, &_gvn) == kn,
- "Ideal_klass works");
+ { // Verify that the AllocateNode::Ideal_allocation recognizers work:
+ assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc,
+ "Ideal_allocation works");
+ assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc,
+ "Ideal_allocation works");
if (alloc->is_AllocateArray()) {
- assert(AllocateArrayNode::Ideal_length(rawoop, &_gvn) == ln,
- "Ideal_length works");
- assert(AllocateArrayNode::Ideal_length(javaoop, &_gvn) == ln,
- "Ideal_length works");
+ assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(),
+ "Ideal_allocation works");
+ assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(),
+ "Ideal_allocation works");
} else {
- assert(ln->is_top(), "no length, please");
+ assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
}
}
#endif //ASSERT
@@ -3095,25 +3102,20 @@
// (This happens via a non-constant argument to inline_native_newArray.)
// In any case, the value of klass_node provides the desired array type.
const TypeInt* length_type = _gvn.find_int_type(length);
- const TypeInt* narrow_length_type = NULL;
const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
if (ary_type->isa_aryptr() && length_type != NULL) {
// Try to get a better type than POS for the size
ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
- narrow_length_type = ary_type->is_aryptr()->size();
- if (narrow_length_type == length_type)
- narrow_length_type = NULL;
}
Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only);
- // Cast length on remaining path to be positive:
- if (narrow_length_type != NULL) {
- Node* ccast = new (C, 2) CastIINode(length, narrow_length_type);
- ccast->set_req(0, control());
- _gvn.set_type_bottom(ccast);
- record_for_igvn(ccast);
- if (map()->find_edge(length) >= 0) {
+ // Cast length on remaining path to be as narrow as possible
+ if (map()->find_edge(length) >= 0) {
+ Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
+ if (ccast != length) {
+ _gvn.set_type_bottom(ccast);
+ record_for_igvn(ccast);
replace_in_map(length, ccast);
}
}
diff --git a/hotspot/src/share/vm/opto/memnode.cpp b/hotspot/src/share/vm/opto/memnode.cpp
index 56f7ef7..ea7e062 100644
--- a/hotspot/src/share/vm/opto/memnode.cpp
+++ b/hotspot/src/share/vm/opto/memnode.cpp
@@ -1887,6 +1887,38 @@
return tap->size();
}
+//-------------------------------Ideal---------------------------------------
+// Feed through the length in AllocateArray(...length...)._length.
+Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+ Node* p = MemNode::Ideal_common(phase, can_reshape);
+ if (p) return (p == NodeSentinel) ? NULL : p;
+
+ // Take apart the address into an oop and and offset.
+ // Return 'this' if we cannot.
+ Node* adr = in(MemNode::Address);
+ intptr_t offset = 0;
+ Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
+ if (base == NULL) return NULL;
+ const TypeAryPtr* tary = phase->type(adr)->isa_aryptr();
+ if (tary == NULL) return NULL;
+
+ // We can fetch the length directly through an AllocateArrayNode.
+ // This works even if the length is not constant (clone or newArray).
+ if (offset == arrayOopDesc::length_offset_in_bytes()) {
+ AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
+ if (alloc != NULL) {
+ Node* allocated_length = alloc->Ideal_length();
+ Node* len = alloc->make_ideal_length(tary, phase);
+ if (allocated_length != len) {
+ // New CastII improves on this.
+ return len;
+ }
+ }
+ }
+
+ return NULL;
+}
+
//------------------------------Identity---------------------------------------
// Feed through the length in AllocateArray(...length...)._length.
Node* LoadRangeNode::Identity( PhaseTransform *phase ) {
@@ -1905,15 +1937,22 @@
// We can fetch the length directly through an AllocateArrayNode.
// This works even if the length is not constant (clone or newArray).
if (offset == arrayOopDesc::length_offset_in_bytes()) {
- Node* allocated_length = AllocateArrayNode::Ideal_length(base, phase);
- if (allocated_length != NULL) {
- return allocated_length;
+ AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
+ if (alloc != NULL) {
+ Node* allocated_length = alloc->Ideal_length();
+ // Do not allow make_ideal_length to allocate a CastII node.
+ Node* len = alloc->make_ideal_length(tary, phase, false);
+ if (allocated_length == len) {
+ // Return allocated_length only if it would not be improved by a CastII.
+ return allocated_length;
+ }
}
}
return this;
}
+
//=============================================================================
//---------------------------StoreNode::make-----------------------------------
// Polymorphic factory method:
diff --git a/hotspot/src/share/vm/opto/memnode.hpp b/hotspot/src/share/vm/opto/memnode.hpp
index dff9dad..2b40a67 100644
--- a/hotspot/src/share/vm/opto/memnode.hpp
+++ b/hotspot/src/share/vm/opto/memnode.hpp
@@ -241,6 +241,7 @@
virtual int Opcode() const;
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Identity( PhaseTransform *phase );
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
};
//------------------------------LoadLNode--------------------------------------
diff --git a/hotspot/src/share/vm/opto/parse2.cpp b/hotspot/src/share/vm/opto/parse2.cpp
index cc1d6b3..0f40fdd 100644
--- a/hotspot/src/share/vm/opto/parse2.cpp
+++ b/hotspot/src/share/vm/opto/parse2.cpp
@@ -100,16 +100,17 @@
// Do the range check
if (GenerateRangeChecks && need_range_check) {
- // Range is constant in array-oop, so we can use the original state of mem
- Node* len = load_array_length(ary);
Node* tst;
if (sizetype->_hi <= 0) {
- // If the greatest array bound is negative, we can conclude that we're
+ // The greatest array bound is negative, so we can conclude that we're
// compiling unreachable code, but the unsigned compare trick used below
// only works with non-negative lengths. Instead, hack "tst" to be zero so
// the uncommon_trap path will always be taken.
tst = _gvn.intcon(0);
} else {
+ // Range is constant in array-oop, so we can use the original state of mem
+ Node* len = load_array_length(ary);
+
// Test length vs index (standard trick using unsigned compare)
Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) );
BoolTest::mask btest = BoolTest::lt;
@@ -137,9 +138,12 @@
// Check for always knowing you are throwing a range-check exception
if (stopped()) return top();
- Node* ptr = array_element_address( ary, idx, type, sizetype);
+ Node* ptr = array_element_address(ary, idx, type, sizetype);
if (result2 != NULL) *result2 = elemtype;
+
+ assert(ptr != top(), "top should go hand-in-hand with stopped");
+
return ptr;
}
diff --git a/hotspot/src/share/vm/opto/type.cpp b/hotspot/src/share/vm/opto/type.cpp
index 243b44c..3e9b66b 100644
--- a/hotspot/src/share/vm/opto/type.cpp
+++ b/hotspot/src/share/vm/opto/type.cpp
@@ -3157,17 +3157,18 @@
// Narrow the given size type to the index range for the given array base type.
// Return NULL if the resulting int type becomes empty.
-const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size, BasicType elem) {
+const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size) const {
jint hi = size->_hi;
jint lo = size->_lo;
jint min_lo = 0;
- jint max_hi = max_array_length(elem);
+ jint max_hi = max_array_length(elem()->basic_type());
//if (index_not_size) --max_hi; // type of a valid array index, FTR
bool chg = false;
if (lo < min_lo) { lo = min_lo; chg = true; }
if (hi > max_hi) { hi = max_hi; chg = true; }
+ // Negative length arrays will produce weird intermediate dead fath-path code
if (lo > hi)
- return NULL;
+ return TypeInt::ZERO;
if (!chg)
return size;
return TypeInt::make(lo, hi, Type::WidenMin);
@@ -3176,9 +3177,7 @@
//-------------------------------cast_to_size----------------------------------
const TypeAryPtr* TypeAryPtr::cast_to_size(const TypeInt* new_size) const {
assert(new_size != NULL, "");
- new_size = narrow_size_type(new_size, elem()->basic_type());
- if (new_size == NULL) // Negative length arrays will produce weird
- new_size = TypeInt::ZERO; // intermediate dead fast-path goo
+ new_size = narrow_size_type(new_size);
if (new_size == size()) return this;
const TypeAry* new_ary = TypeAry::make(elem(), new_size);
return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
diff --git a/hotspot/src/share/vm/opto/type.hpp b/hotspot/src/share/vm/opto/type.hpp
index 68366ed..69bc06a 100644
--- a/hotspot/src/share/vm/opto/type.hpp
+++ b/hotspot/src/share/vm/opto/type.hpp
@@ -840,6 +840,7 @@
virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
virtual const TypeAryPtr* cast_to_size(const TypeInt* size) const;
+ virtual const TypeInt* narrow_size_type(const TypeInt* size) const;
virtual bool empty(void) const; // TRUE if type is vacuous
virtual const TypePtr *add_offset( intptr_t offset ) const;
@@ -865,7 +866,6 @@
}
static const TypeAryPtr *_array_body_type[T_CONFLICT+1];
// sharpen the type of an int which is used as an array size
- static const TypeInt* narrow_size_type(const TypeInt* size, BasicType elem);
#ifndef PRODUCT
virtual void dump2( Dict &d, uint depth, outputStream *st ) const; // Specialized per-Type dumping
#endif