blob: cd0e60d971ebe41fff3f754fb2c1458e0cdcbfd9 [file] [log] [blame]
J. Duke81537792007-12-01 00:00:00 +00001/*
Christian Thalinger89cea912009-03-09 03:17:11 -07002 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
J. Duke81537792007-12-01 00:00:00 +00003 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25// Portions of code courtesy of Clifford Click
26
27class MultiNode;
28class PhaseCCP;
29class PhaseTransform;
30
31//------------------------------MemNode----------------------------------------
32// Load or Store, possibly throwing a NULL pointer exception
33class MemNode : public Node {
34protected:
35#ifdef ASSERT
36 const TypePtr* _adr_type; // What kind of memory is being addressed?
37#endif
38 virtual uint size_of() const; // Size is bigger (ASSERT only)
39public:
40 enum { Control, // When is it safe to do this load?
41 Memory, // Chunk of memory is being loaded from
42 Address, // Actually address, derived from base
43 ValueIn, // Value to store
44 OopStore // Preceeding oop store, only in StoreCM
45 };
46protected:
47 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
48 : Node(c0,c1,c2 ) {
49 init_class_id(Class_Mem);
50 debug_only(_adr_type=at; adr_type();)
51 }
52 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
53 : Node(c0,c1,c2,c3) {
54 init_class_id(Class_Mem);
55 debug_only(_adr_type=at; adr_type();)
56 }
57 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
58 : Node(c0,c1,c2,c3,c4) {
59 init_class_id(Class_Mem);
60 debug_only(_adr_type=at; adr_type();)
61 }
62
Vladimir Kozlovc223fed2008-02-29 09:57:18 -080063public:
J. Duke81537792007-12-01 00:00:00 +000064 // Helpers for the optimizer. Documented in memnode.cpp.
65 static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
66 Node* p2, AllocateNode* a2,
67 PhaseTransform* phase);
68 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
69
Vladimir Kozlovcdd27962008-03-20 15:11:44 -070070 static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
71 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
J. Duke81537792007-12-01 00:00:00 +000072 // This one should probably be a phase-specific function:
Vladimir Kozlovdf8fc192008-04-16 19:19:48 -070073 static bool all_controls_dominate(Node* dom, Node* sub);
J. Duke81537792007-12-01 00:00:00 +000074
Vladimir Kozlov757229d2008-05-21 10:45:07 -070075 // Find any cast-away of null-ness and keep its control.
76 static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
J. Duke81537792007-12-01 00:00:00 +000077 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
78
79 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
80
81 // Shared code for Ideal methods:
82 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL.
83
84 // Helper function for adr_type() implementations.
85 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
86
87 // Raw access function, to allow copying of adr_type efficiently in
88 // product builds and retain the debug info for debug builds.
89 const TypePtr *raw_adr_type() const {
90#ifdef ASSERT
91 return _adr_type;
92#else
93 return 0;
94#endif
95 }
96
97 // Map a load or store opcode to its corresponding store opcode.
98 // (Return -1 if unknown.)
99 virtual int store_Opcode() const { return -1; }
100
101 // What is the type of the value in memory? (T_VOID mean "unspecified".)
102 virtual BasicType memory_type() const = 0;
Vladimir Kozlov9f1a8ed2008-02-25 15:05:44 -0800103 virtual int memory_size() const {
104#ifdef ASSERT
105 return type2aelembytes(memory_type(), true);
106#else
107 return type2aelembytes(memory_type());
108#endif
109 }
J. Duke81537792007-12-01 00:00:00 +0000110
111 // Search through memory states which precede this node (load or store).
112 // Look for an exact match for the address, with no intervening
113 // aliased stores.
114 Node* find_previous_store(PhaseTransform* phase);
115
116 // Can this node (load or store) accurately see a stored value in
117 // the given memory state? (The state may or may not be in(Memory).)
118 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
119
120#ifndef PRODUCT
121 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
122 virtual void dump_spec(outputStream *st) const;
123#endif
124};
125
126//------------------------------LoadNode---------------------------------------
127// Load value; requires Memory and Address
128class LoadNode : public MemNode {
129protected:
130 virtual uint cmp( const Node &n ) const;
131 virtual uint size_of() const; // Size is bigger
132 const Type* const _type; // What kind of value is loaded?
133public:
134
135 LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
136 : MemNode(c,mem,adr,at), _type(rt) {
137 init_class_id(Class_Load);
138 }
139
140 // Polymorphic factory method:
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400141 static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
142 const TypePtr* at, const Type *rt, BasicType bt );
J. Duke81537792007-12-01 00:00:00 +0000143
144 virtual uint hash() const; // Check the type
145
146 // Handle algebraic identities here. If we have an identity, return the Node
147 // we are equivalent to. We look for Load of a Store.
148 virtual Node *Identity( PhaseTransform *phase );
149
150 // If the load is from Field memory and the pointer is non-null, we can
151 // zero out the control input.
152 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
153
Vladimir Kozlov757229d2008-05-21 10:45:07 -0700154 // Split instance field load through Phi.
155 Node* split_through_phi(PhaseGVN *phase);
156
Tom Rodriguez10c473e2007-12-05 09:01:00 -0800157 // Recover original value from boxed values
158 Node *eliminate_autobox(PhaseGVN *phase);
159
J. Duke81537792007-12-01 00:00:00 +0000160 // Compute a new Type for this node. Basically we just do the pre-check,
161 // then call the virtual add() to set the type.
162 virtual const Type *Value( PhaseTransform *phase ) const;
163
Vladimir Kozlov76035422008-05-21 13:46:23 -0700164 // Common methods for LoadKlass and LoadNKlass nodes.
165 const Type *klass_value_common( PhaseTransform *phase ) const;
166 Node *klass_identity_common( PhaseTransform *phase );
167
J. Duke81537792007-12-01 00:00:00 +0000168 virtual uint ideal_reg() const;
169 virtual const Type *bottom_type() const;
170 // Following method is copied from TypeNode:
171 void set_type(const Type* t) {
172 assert(t != NULL, "sanity");
173 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
174 *(const Type**)&_type = t; // cast away const-ness
175 // If this node is in the hash table, make sure it doesn't need a rehash.
176 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
177 }
178 const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
179
180 // Do not match memory edge
181 virtual uint match_edge(uint idx) const;
182
183 // Map a load opcode to its corresponding store opcode.
184 virtual int store_Opcode() const = 0;
185
Vladimir Kozlov30dc0ed2008-03-13 16:31:32 -0700186 // Check if the load's memory input is a Phi node with the same control.
187 bool is_instance_field_load_with_local_phi(Node* ctrl);
188
J. Duke81537792007-12-01 00:00:00 +0000189#ifndef PRODUCT
190 virtual void dump_spec(outputStream *st) const;
191#endif
192protected:
193 const Type* load_array_final_field(const TypeKlassPtr *tkls,
194 ciKlass* klass) const;
195};
196
197//------------------------------LoadBNode--------------------------------------
198// Load a byte (8bits signed) from memory
199class LoadBNode : public LoadNode {
200public:
201 LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
202 : LoadNode(c,mem,adr,at,ti) {}
203 virtual int Opcode() const;
204 virtual uint ideal_reg() const { return Op_RegI; }
205 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
206 virtual int store_Opcode() const { return Op_StoreB; }
207 virtual BasicType memory_type() const { return T_BYTE; }
208};
209
Christian Thalinger89cea912009-03-09 03:17:11 -0700210//------------------------------LoadUBNode-------------------------------------
211// Load a unsigned byte (8bits unsigned) from memory
212class LoadUBNode : public LoadNode {
213public:
214 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
215 : LoadNode(c, mem, adr, at, ti) {}
216 virtual int Opcode() const;
217 virtual uint ideal_reg() const { return Op_RegI; }
218 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
219 virtual int store_Opcode() const { return Op_StoreB; }
220 virtual BasicType memory_type() const { return T_BYTE; }
221};
222
Christian Thalinger3b8452d2009-01-26 16:22:12 +0100223//------------------------------LoadUSNode-------------------------------------
224// Load an unsigned short/char (16bits unsigned) from memory
225class LoadUSNode : public LoadNode {
J. Duke81537792007-12-01 00:00:00 +0000226public:
Christian Thalinger3b8452d2009-01-26 16:22:12 +0100227 LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
J. Duke81537792007-12-01 00:00:00 +0000228 : LoadNode(c,mem,adr,at,ti) {}
229 virtual int Opcode() const;
230 virtual uint ideal_reg() const { return Op_RegI; }
231 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
232 virtual int store_Opcode() const { return Op_StoreC; }
233 virtual BasicType memory_type() const { return T_CHAR; }
234};
235
236//------------------------------LoadINode--------------------------------------
237// Load an integer from memory
238class LoadINode : public LoadNode {
239public:
240 LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
241 : LoadNode(c,mem,adr,at,ti) {}
242 virtual int Opcode() const;
243 virtual uint ideal_reg() const { return Op_RegI; }
244 virtual int store_Opcode() const { return Op_StoreI; }
245 virtual BasicType memory_type() const { return T_INT; }
246};
247
Christian Thalinger89cea912009-03-09 03:17:11 -0700248//------------------------------LoadUI2LNode-----------------------------------
249// Load an unsigned integer into long from memory
250class LoadUI2LNode : public LoadNode {
251public:
252 LoadUI2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong* t = TypeLong::UINT)
253 : LoadNode(c, mem, adr, at, t) {}
254 virtual int Opcode() const;
255 virtual uint ideal_reg() const { return Op_RegL; }
256 virtual int store_Opcode() const { return Op_StoreL; }
257 virtual BasicType memory_type() const { return T_LONG; }
258};
259
J. Duke81537792007-12-01 00:00:00 +0000260//------------------------------LoadRangeNode----------------------------------
261// Load an array length from the array
262class LoadRangeNode : public LoadINode {
263public:
264 LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
265 : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
266 virtual int Opcode() const;
267 virtual const Type *Value( PhaseTransform *phase ) const;
268 virtual Node *Identity( PhaseTransform *phase );
Chuck Rasboldeee15b12008-09-17 08:29:17 -0700269 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
J. Duke81537792007-12-01 00:00:00 +0000270};
271
272//------------------------------LoadLNode--------------------------------------
273// Load a long from memory
274class LoadLNode : public LoadNode {
275 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
276 virtual uint cmp( const Node &n ) const {
277 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
278 && LoadNode::cmp(n);
279 }
280 virtual uint size_of() const { return sizeof(*this); }
281 const bool _require_atomic_access; // is piecewise load forbidden?
282
283public:
284 LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
285 const TypeLong *tl = TypeLong::LONG,
286 bool require_atomic_access = false )
287 : LoadNode(c,mem,adr,at,tl)
288 , _require_atomic_access(require_atomic_access)
289 {}
290 virtual int Opcode() const;
291 virtual uint ideal_reg() const { return Op_RegL; }
292 virtual int store_Opcode() const { return Op_StoreL; }
293 virtual BasicType memory_type() const { return T_LONG; }
294 bool require_atomic_access() { return _require_atomic_access; }
295 static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
296#ifndef PRODUCT
297 virtual void dump_spec(outputStream *st) const {
298 LoadNode::dump_spec(st);
299 if (_require_atomic_access) st->print(" Atomic!");
300 }
301#endif
302};
303
304//------------------------------LoadL_unalignedNode----------------------------
305// Load a long from unaligned memory
306class LoadL_unalignedNode : public LoadLNode {
307public:
308 LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
309 : LoadLNode(c,mem,adr,at) {}
310 virtual int Opcode() const;
311};
312
313//------------------------------LoadFNode--------------------------------------
314// Load a float (64 bits) from memory
315class LoadFNode : public LoadNode {
316public:
317 LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
318 : LoadNode(c,mem,adr,at,t) {}
319 virtual int Opcode() const;
320 virtual uint ideal_reg() const { return Op_RegF; }
321 virtual int store_Opcode() const { return Op_StoreF; }
322 virtual BasicType memory_type() const { return T_FLOAT; }
323};
324
325//------------------------------LoadDNode--------------------------------------
326// Load a double (64 bits) from memory
327class LoadDNode : public LoadNode {
328public:
329 LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
330 : LoadNode(c,mem,adr,at,t) {}
331 virtual int Opcode() const;
332 virtual uint ideal_reg() const { return Op_RegD; }
333 virtual int store_Opcode() const { return Op_StoreD; }
334 virtual BasicType memory_type() const { return T_DOUBLE; }
335};
336
337//------------------------------LoadD_unalignedNode----------------------------
338// Load a double from unaligned memory
339class LoadD_unalignedNode : public LoadDNode {
340public:
341 LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
342 : LoadDNode(c,mem,adr,at) {}
343 virtual int Opcode() const;
344};
345
346//------------------------------LoadPNode--------------------------------------
347// Load a pointer from memory (either object or array)
348class LoadPNode : public LoadNode {
349public:
350 LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
351 : LoadNode(c,mem,adr,at,t) {}
352 virtual int Opcode() const;
353 virtual uint ideal_reg() const { return Op_RegP; }
354 virtual int store_Opcode() const { return Op_StoreP; }
355 virtual BasicType memory_type() const { return T_ADDRESS; }
356 // depends_only_on_test is almost always true, and needs to be almost always
357 // true to enable key hoisting & commoning optimizations. However, for the
358 // special case of RawPtr loads from TLS top & end, the control edge carries
359 // the dependence preventing hoisting past a Safepoint instead of the memory
360 // edge. (An unfortunate consequence of having Safepoints not set Raw
361 // Memory; itself an unfortunate consequence of having Nodes which produce
362 // results (new raw memory state) inside of loops preventing all manner of
363 // other optimizations). Basically, it's ugly but so is the alternative.
364 // See comment in macro.cpp, around line 125 expand_allocate_common().
365 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
366};
367
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400368
369//------------------------------LoadNNode--------------------------------------
370// Load a narrow oop from memory (either object or array)
371class LoadNNode : public LoadNode {
372public:
373 LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
374 : LoadNode(c,mem,adr,at,t) {}
375 virtual int Opcode() const;
376 virtual uint ideal_reg() const { return Op_RegN; }
377 virtual int store_Opcode() const { return Op_StoreN; }
378 virtual BasicType memory_type() const { return T_NARROWOOP; }
379 // depends_only_on_test is almost always true, and needs to be almost always
380 // true to enable key hoisting & commoning optimizations. However, for the
381 // special case of RawPtr loads from TLS top & end, the control edge carries
382 // the dependence preventing hoisting past a Safepoint instead of the memory
383 // edge. (An unfortunate consequence of having Safepoints not set Raw
384 // Memory; itself an unfortunate consequence of having Nodes which produce
385 // results (new raw memory state) inside of loops preventing all manner of
386 // other optimizations). Basically, it's ugly but so is the alternative.
387 // See comment in macro.cpp, around line 125 expand_allocate_common().
388 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
389};
390
J. Duke81537792007-12-01 00:00:00 +0000391//------------------------------LoadKlassNode----------------------------------
392// Load a Klass from an object
393class LoadKlassNode : public LoadPNode {
394public:
Vladimir Kozlov76035422008-05-21 13:46:23 -0700395 LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
J. Duke81537792007-12-01 00:00:00 +0000396 : LoadPNode(c,mem,adr,at,tk) {}
397 virtual int Opcode() const;
398 virtual const Type *Value( PhaseTransform *phase ) const;
399 virtual Node *Identity( PhaseTransform *phase );
400 virtual bool depends_only_on_test() const { return true; }
Vladimir Kozlov76035422008-05-21 13:46:23 -0700401
402 // Polymorphic factory method:
403 static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
404 const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
J. Duke81537792007-12-01 00:00:00 +0000405};
406
Vladimir Kozlov76035422008-05-21 13:46:23 -0700407//------------------------------LoadNKlassNode---------------------------------
408// Load a narrow Klass from an object.
409class LoadNKlassNode : public LoadNNode {
410public:
411 LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk )
412 : LoadNNode(c,mem,adr,at,tk) {}
413 virtual int Opcode() const;
414 virtual uint ideal_reg() const { return Op_RegN; }
415 virtual int store_Opcode() const { return Op_StoreN; }
416 virtual BasicType memory_type() const { return T_NARROWOOP; }
417
418 virtual const Type *Value( PhaseTransform *phase ) const;
419 virtual Node *Identity( PhaseTransform *phase );
420 virtual bool depends_only_on_test() const { return true; }
421};
422
423
J. Duke81537792007-12-01 00:00:00 +0000424//------------------------------LoadSNode--------------------------------------
425// Load a short (16bits signed) from memory
426class LoadSNode : public LoadNode {
427public:
428 LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
429 : LoadNode(c,mem,adr,at,ti) {}
430 virtual int Opcode() const;
431 virtual uint ideal_reg() const { return Op_RegI; }
432 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
433 virtual int store_Opcode() const { return Op_StoreC; }
434 virtual BasicType memory_type() const { return T_SHORT; }
435};
436
437//------------------------------StoreNode--------------------------------------
438// Store value; requires Store, Address and Value
439class StoreNode : public MemNode {
440protected:
441 virtual uint cmp( const Node &n ) const;
442 virtual bool depends_only_on_test() const { return false; }
443
444 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
445 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
446
447public:
448 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
449 : MemNode(c,mem,adr,at,val) {
450 init_class_id(Class_Store);
451 }
452 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
453 : MemNode(c,mem,adr,at,val,oop_store) {
454 init_class_id(Class_Store);
455 }
456
457 // Polymorphic factory method:
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400458 static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
459 const TypePtr* at, Node *val, BasicType bt );
J. Duke81537792007-12-01 00:00:00 +0000460
461 virtual uint hash() const; // Check the type
462
463 // If the store is to Field memory and the pointer is non-null, we can
464 // zero out the control input.
465 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
466
467 // Compute a new Type for this node. Basically we just do the pre-check,
468 // then call the virtual add() to set the type.
469 virtual const Type *Value( PhaseTransform *phase ) const;
470
471 // Check for identity function on memory (Load then Store at same address)
472 virtual Node *Identity( PhaseTransform *phase );
473
474 // Do not match memory edge
475 virtual uint match_edge(uint idx) const;
476
477 virtual const Type *bottom_type() const; // returns Type::MEMORY
478
479 // Map a store opcode to its corresponding own opcode, trivially.
480 virtual int store_Opcode() const { return Opcode(); }
481
482 // have all possible loads of the value stored been optimized away?
483 bool value_never_loaded(PhaseTransform *phase) const;
484};
485
486//------------------------------StoreBNode-------------------------------------
487// Store byte to memory
488class StoreBNode : public StoreNode {
489public:
490 StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
491 virtual int Opcode() const;
492 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
493 virtual BasicType memory_type() const { return T_BYTE; }
494};
495
496//------------------------------StoreCNode-------------------------------------
497// Store char/short to memory
498class StoreCNode : public StoreNode {
499public:
500 StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
501 virtual int Opcode() const;
502 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
503 virtual BasicType memory_type() const { return T_CHAR; }
504};
505
506//------------------------------StoreINode-------------------------------------
507// Store int to memory
508class StoreINode : public StoreNode {
509public:
510 StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
511 virtual int Opcode() const;
512 virtual BasicType memory_type() const { return T_INT; }
513};
514
515//------------------------------StoreLNode-------------------------------------
516// Store long to memory
517class StoreLNode : public StoreNode {
518 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
519 virtual uint cmp( const Node &n ) const {
520 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
521 && StoreNode::cmp(n);
522 }
523 virtual uint size_of() const { return sizeof(*this); }
524 const bool _require_atomic_access; // is piecewise store forbidden?
525
526public:
527 StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
528 bool require_atomic_access = false )
529 : StoreNode(c,mem,adr,at,val)
530 , _require_atomic_access(require_atomic_access)
531 {}
532 virtual int Opcode() const;
533 virtual BasicType memory_type() const { return T_LONG; }
534 bool require_atomic_access() { return _require_atomic_access; }
535 static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
536#ifndef PRODUCT
537 virtual void dump_spec(outputStream *st) const {
538 StoreNode::dump_spec(st);
539 if (_require_atomic_access) st->print(" Atomic!");
540 }
541#endif
542};
543
544//------------------------------StoreFNode-------------------------------------
545// Store float to memory
546class StoreFNode : public StoreNode {
547public:
548 StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
549 virtual int Opcode() const;
550 virtual BasicType memory_type() const { return T_FLOAT; }
551};
552
553//------------------------------StoreDNode-------------------------------------
554// Store double to memory
555class StoreDNode : public StoreNode {
556public:
557 StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
558 virtual int Opcode() const;
559 virtual BasicType memory_type() const { return T_DOUBLE; }
560};
561
562//------------------------------StorePNode-------------------------------------
563// Store pointer to memory
564class StorePNode : public StoreNode {
565public:
566 StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
567 virtual int Opcode() const;
568 virtual BasicType memory_type() const { return T_ADDRESS; }
569};
570
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400571//------------------------------StoreNNode-------------------------------------
572// Store narrow oop to memory
573class StoreNNode : public StoreNode {
574public:
575 StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
576 virtual int Opcode() const;
577 virtual BasicType memory_type() const { return T_NARROWOOP; }
578};
579
J. Duke81537792007-12-01 00:00:00 +0000580//------------------------------StoreCMNode-----------------------------------
581// Store card-mark byte to memory for CM
582// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
583// Preceeding equivalent StoreCMs may be eliminated.
584class StoreCMNode : public StoreNode {
Changpeng Fangc492f4c2009-09-14 09:49:54 -0700585 private:
586 int _oop_alias_idx; // The alias_idx of OopStore
J. Duke81537792007-12-01 00:00:00 +0000587public:
Changpeng Fangc492f4c2009-09-14 09:49:54 -0700588 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : StoreNode(c,mem,adr,at,val,oop_store), _oop_alias_idx(oop_alias_idx) {}
J. Duke81537792007-12-01 00:00:00 +0000589 virtual int Opcode() const;
590 virtual Node *Identity( PhaseTransform *phase );
Changpeng Fangc492f4c2009-09-14 09:49:54 -0700591 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
J. Duke81537792007-12-01 00:00:00 +0000592 virtual const Type *Value( PhaseTransform *phase ) const;
593 virtual BasicType memory_type() const { return T_VOID; } // unspecific
Changpeng Fangc492f4c2009-09-14 09:49:54 -0700594 int oop_alias_idx() const { return _oop_alias_idx; }
J. Duke81537792007-12-01 00:00:00 +0000595};
596
597//------------------------------LoadPLockedNode---------------------------------
598// Load-locked a pointer from memory (either object or array).
599// On Sparc & Intel this is implemented as a normal pointer load.
600// On PowerPC and friends it's a real load-locked.
601class LoadPLockedNode : public LoadPNode {
602public:
603 LoadPLockedNode( Node *c, Node *mem, Node *adr )
604 : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
605 virtual int Opcode() const;
606 virtual int store_Opcode() const { return Op_StorePConditional; }
607 virtual bool depends_only_on_test() const { return true; }
608};
609
610//------------------------------LoadLLockedNode---------------------------------
611// Load-locked a pointer from memory (either object or array).
612// On Sparc & Intel this is implemented as a normal long load.
613class LoadLLockedNode : public LoadLNode {
614public:
615 LoadLLockedNode( Node *c, Node *mem, Node *adr )
616 : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
617 virtual int Opcode() const;
618 virtual int store_Opcode() const { return Op_StoreLConditional; }
619};
620
621//------------------------------SCMemProjNode---------------------------------------
622// This class defines a projection of the memory state of a store conditional node.
623// These nodes return a value, but also update memory.
624class SCMemProjNode : public ProjNode {
625public:
626 enum {SCMEMPROJCON = (uint)-2};
627 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
628 virtual int Opcode() const;
629 virtual bool is_CFG() const { return false; }
630 virtual const Type *bottom_type() const {return Type::MEMORY;}
631 virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
632 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
633 virtual const Type *Value( PhaseTransform *phase ) const;
634#ifndef PRODUCT
635 virtual void dump_spec(outputStream *st) const {};
636#endif
637};
638
639//------------------------------LoadStoreNode---------------------------
Vladimir Kozlov50c4a232008-07-28 17:12:52 -0700640// Note: is_Mem() method returns 'true' for this class.
J. Duke81537792007-12-01 00:00:00 +0000641class LoadStoreNode : public Node {
642public:
643 enum {
644 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
645 };
646 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex);
647 virtual bool depends_only_on_test() const { return false; }
648 virtual const Type *bottom_type() const { return TypeInt::BOOL; }
649 virtual uint ideal_reg() const { return Op_RegI; }
650 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
651};
652
653//------------------------------StorePConditionalNode---------------------------
654// Conditionally store pointer to memory, if no change since prior
655// load-locked. Sets flags for success or failure of the store.
656class StorePConditionalNode : public LoadStoreNode {
657public:
658 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
659 virtual int Opcode() const;
660 // Produces flags
661 virtual uint ideal_reg() const { return Op_RegFlags; }
662};
663
Vladimir Kozlov7aae40a2008-11-07 09:29:38 -0800664//------------------------------StoreIConditionalNode---------------------------
665// Conditionally store int to memory, if no change since prior
666// load-locked. Sets flags for success or failure of the store.
667class StoreIConditionalNode : public LoadStoreNode {
668public:
669 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreNode(c, mem, adr, val, ii) { }
670 virtual int Opcode() const;
671 // Produces flags
672 virtual uint ideal_reg() const { return Op_RegFlags; }
673};
674
J. Duke81537792007-12-01 00:00:00 +0000675//------------------------------StoreLConditionalNode---------------------------
676// Conditionally store long to memory, if no change since prior
677// load-locked. Sets flags for success or failure of the store.
678class StoreLConditionalNode : public LoadStoreNode {
679public:
680 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
681 virtual int Opcode() const;
Vladimir Kozlov7aae40a2008-11-07 09:29:38 -0800682 // Produces flags
683 virtual uint ideal_reg() const { return Op_RegFlags; }
J. Duke81537792007-12-01 00:00:00 +0000684};
685
686
687//------------------------------CompareAndSwapLNode---------------------------
688class CompareAndSwapLNode : public LoadStoreNode {
689public:
690 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
691 virtual int Opcode() const;
692};
693
694
695//------------------------------CompareAndSwapINode---------------------------
696class CompareAndSwapINode : public LoadStoreNode {
697public:
698 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
699 virtual int Opcode() const;
700};
701
702
703//------------------------------CompareAndSwapPNode---------------------------
704class CompareAndSwapPNode : public LoadStoreNode {
705public:
706 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
707 virtual int Opcode() const;
708};
709
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400710//------------------------------CompareAndSwapNNode---------------------------
711class CompareAndSwapNNode : public LoadStoreNode {
712public:
713 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
714 virtual int Opcode() const;
715};
716
J. Duke81537792007-12-01 00:00:00 +0000717//------------------------------ClearArray-------------------------------------
718class ClearArrayNode: public Node {
719public:
Vladimir Kozlov9f5ca022009-12-09 16:40:45 -0800720 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
721 : Node(ctrl,arymem,word_cnt,base) {
722 init_class_id(Class_ClearArray);
723 }
J. Duke81537792007-12-01 00:00:00 +0000724 virtual int Opcode() const;
725 virtual const Type *bottom_type() const { return Type::MEMORY; }
726 // ClearArray modifies array elements, and so affects only the
727 // array memory addressed by the bottom_type of its base address.
728 virtual const class TypePtr *adr_type() const;
729 virtual Node *Identity( PhaseTransform *phase );
730 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
731 virtual uint match_edge(uint idx) const;
732
733 // Clear the given area of an object or array.
734 // The start offset must always be aligned mod BytesPerInt.
735 // The end offset must always be aligned mod BytesPerLong.
736 // Return the new memory.
737 static Node* clear_memory(Node* control, Node* mem, Node* dest,
738 intptr_t start_offset,
739 intptr_t end_offset,
740 PhaseGVN* phase);
741 static Node* clear_memory(Node* control, Node* mem, Node* dest,
742 intptr_t start_offset,
743 Node* end_offset,
744 PhaseGVN* phase);
745 static Node* clear_memory(Node* control, Node* mem, Node* dest,
746 Node* start_offset,
747 Node* end_offset,
748 PhaseGVN* phase);
Vladimir Kozlov9f5ca022009-12-09 16:40:45 -0800749 // Return allocation input memory edge if it is different instance
750 // or itself if it is the one we are looking for.
751 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
J. Duke81537792007-12-01 00:00:00 +0000752};
753
754//------------------------------StrComp-------------------------------------
755class StrCompNode: public Node {
756public:
Vladimir Kozlov243514d2009-09-14 12:14:20 -0700757 StrCompNode(Node* control, Node* char_array_mem,
758 Node* s1, Node* c1,
759 Node* s2, Node* c2): Node(control, char_array_mem,
760 s1, c1,
761 s2, c2) {};
J. Duke81537792007-12-01 00:00:00 +0000762 virtual int Opcode() const;
763 virtual bool depends_only_on_test() const { return false; }
764 virtual const Type* bottom_type() const { return TypeInt::INT; }
Vladimir Kozlov243514d2009-09-14 12:14:20 -0700765 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
J. Duke81537792007-12-01 00:00:00 +0000766 virtual uint match_edge(uint idx) const;
767 virtual uint ideal_reg() const { return Op_RegI; }
768 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
769};
770
Changpeng Fangc0d62ad2009-03-31 14:07:08 -0700771//------------------------------StrEquals-------------------------------------
772class StrEqualsNode: public Node {
773public:
Vladimir Kozlov243514d2009-09-14 12:14:20 -0700774 StrEqualsNode(Node* control, Node* char_array_mem,
775 Node* s1, Node* s2, Node* c): Node(control, char_array_mem,
776 s1, s2, c) {};
Changpeng Fangc0d62ad2009-03-31 14:07:08 -0700777 virtual int Opcode() const;
778 virtual bool depends_only_on_test() const { return false; }
779 virtual const Type* bottom_type() const { return TypeInt::BOOL; }
Vladimir Kozlov243514d2009-09-14 12:14:20 -0700780 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
Changpeng Fangc0d62ad2009-03-31 14:07:08 -0700781 virtual uint match_edge(uint idx) const;
782 virtual uint ideal_reg() const { return Op_RegI; }
783 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
784};
785
786//------------------------------StrIndexOf-------------------------------------
787class StrIndexOfNode: public Node {
788public:
Vladimir Kozlov243514d2009-09-14 12:14:20 -0700789 StrIndexOfNode(Node* control, Node* char_array_mem,
790 Node* s1, Node* c1,
791 Node* s2, Node* c2): Node(control, char_array_mem,
792 s1, c1,
793 s2, c2) {};
Changpeng Fangc0d62ad2009-03-31 14:07:08 -0700794 virtual int Opcode() const;
795 virtual bool depends_only_on_test() const { return false; }
796 virtual const Type* bottom_type() const { return TypeInt::INT; }
Vladimir Kozlov243514d2009-09-14 12:14:20 -0700797 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
Changpeng Fangc0d62ad2009-03-31 14:07:08 -0700798 virtual uint match_edge(uint idx) const;
799 virtual uint ideal_reg() const { return Op_RegI; }
800 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
801};
802
Chuck Rasbold2e672ac2008-05-29 16:22:09 -0700803//------------------------------AryEq---------------------------------------
804class AryEqNode: public Node {
805public:
Vladimir Kozlov243514d2009-09-14 12:14:20 -0700806 AryEqNode(Node* control, Node* char_array_mem,
807 Node* s1, Node* s2): Node(control, char_array_mem, s1, s2) {};
Chuck Rasbold2e672ac2008-05-29 16:22:09 -0700808 virtual int Opcode() const;
809 virtual bool depends_only_on_test() const { return false; }
810 virtual const Type* bottom_type() const { return TypeInt::BOOL; }
811 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
Vladimir Kozlov243514d2009-09-14 12:14:20 -0700812 virtual uint match_edge(uint idx) const;
Chuck Rasbold2e672ac2008-05-29 16:22:09 -0700813 virtual uint ideal_reg() const { return Op_RegI; }
814 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
815};
816
J. Duke81537792007-12-01 00:00:00 +0000817//------------------------------MemBar-----------------------------------------
818// There are different flavors of Memory Barriers to match the Java Memory
819// Model. Monitor-enter and volatile-load act as Aquires: no following ref
820// can be moved to before them. We insert a MemBar-Acquire after a FastLock or
821// volatile-load. Monitor-exit and volatile-store act as Release: no
Christian Thalinger05d1de72009-02-27 13:27:09 -0800822// preceding ref can be moved to after them. We insert a MemBar-Release
J. Duke81537792007-12-01 00:00:00 +0000823// before a FastUnlock or volatile-store. All volatiles need to be
824// serialized, so we follow all volatile-stores with a MemBar-Volatile to
Christian Thalinger05d1de72009-02-27 13:27:09 -0800825// separate it from any following volatile-load.
J. Duke81537792007-12-01 00:00:00 +0000826class MemBarNode: public MultiNode {
827 virtual uint hash() const ; // { return NO_HASH; }
828 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
829
830 virtual uint size_of() const { return sizeof(*this); }
831 // Memory type this node is serializing. Usually either rawptr or bottom.
832 const TypePtr* _adr_type;
833
834public:
835 enum {
836 Precedent = TypeFunc::Parms // optional edge to force precedence
837 };
838 MemBarNode(Compile* C, int alias_idx, Node* precedent);
839 virtual int Opcode() const = 0;
840 virtual const class TypePtr *adr_type() const { return _adr_type; }
841 virtual const Type *Value( PhaseTransform *phase ) const;
842 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
843 virtual uint match_edge(uint idx) const { return 0; }
844 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
845 virtual Node *match( const ProjNode *proj, const Matcher *m );
846 // Factory method. Builds a wide or narrow membar.
847 // Optional 'precedent' becomes an extra edge if not null.
848 static MemBarNode* make(Compile* C, int opcode,
849 int alias_idx = Compile::AliasIdxBot,
850 Node* precedent = NULL);
851};
852
853// "Acquire" - no following ref can move before (but earlier refs can
854// follow, like an early Load stalled in cache). Requires multi-cpu
855// visibility. Inserted after a volatile load or FastLock.
856class MemBarAcquireNode: public MemBarNode {
857public:
858 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
859 : MemBarNode(C, alias_idx, precedent) {}
860 virtual int Opcode() const;
861};
862
863// "Release" - no earlier ref can move after (but later refs can move
864// up, like a speculative pipelined cache-hitting Load). Requires
865// multi-cpu visibility. Inserted before a volatile store or FastUnLock.
866class MemBarReleaseNode: public MemBarNode {
867public:
868 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
869 : MemBarNode(C, alias_idx, precedent) {}
870 virtual int Opcode() const;
871};
872
873// Ordering between a volatile store and a following volatile load.
874// Requires multi-CPU visibility?
875class MemBarVolatileNode: public MemBarNode {
876public:
877 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
878 : MemBarNode(C, alias_idx, precedent) {}
879 virtual int Opcode() const;
880};
881
882// Ordering within the same CPU. Used to order unsafe memory references
883// inside the compiler when we lack alias info. Not needed "outside" the
884// compiler because the CPU does all the ordering for us.
885class MemBarCPUOrderNode: public MemBarNode {
886public:
887 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
888 : MemBarNode(C, alias_idx, precedent) {}
889 virtual int Opcode() const;
890 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
891};
892
893// Isolation of object setup after an AllocateNode and before next safepoint.
894// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
895class InitializeNode: public MemBarNode {
896 friend class AllocateNode;
897
898 bool _is_complete;
899
900public:
901 enum {
902 Control = TypeFunc::Control,
903 Memory = TypeFunc::Memory, // MergeMem for states affected by this op
904 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address
905 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP)
906 };
907
908 InitializeNode(Compile* C, int adr_type, Node* rawoop);
909 virtual int Opcode() const;
910 virtual uint size_of() const { return sizeof(*this); }
911 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
912 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress
913
914 // Manage incoming memory edges via a MergeMem on in(Memory):
915 Node* memory(uint alias_idx);
916
917 // The raw memory edge coming directly from the Allocation.
918 // The contents of this memory are *always* all-zero-bits.
919 Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
920
921 // Return the corresponding allocation for this initialization (or null if none).
922 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
923 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
924 AllocateNode* allocation();
925
926 // Anything other than zeroing in this init?
927 bool is_non_zero();
928
929 // An InitializeNode must completed before macro expansion is done.
930 // Completion requires that the AllocateNode must be followed by
931 // initialization of the new memory to zero, then to any initializers.
932 bool is_complete() { return _is_complete; }
933
934 // Mark complete. (Must not yet be complete.)
935 void set_complete(PhaseGVN* phase);
936
937#ifdef ASSERT
938 // ensure all non-degenerate stores are ordered and non-overlapping
939 bool stores_are_sane(PhaseTransform* phase);
940#endif //ASSERT
941
942 // See if this store can be captured; return offset where it initializes.
943 // Return 0 if the store cannot be moved (any sort of problem).
944 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
945
946 // Capture another store; reformat it to write my internal raw memory.
947 // Return the captured copy, else NULL if there is some sort of problem.
948 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
949
950 // Find captured store which corresponds to the range [start..start+size).
951 // Return my own memory projection (meaning the initial zero bits)
952 // if there is no such store. Return NULL if there is a problem.
953 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
954
955 // Called when the associated AllocateNode is expanded into CFG.
956 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
957 intptr_t header_size, Node* size_in_bytes,
958 PhaseGVN* phase);
959
960 private:
961 void remove_extra_zeroes();
962
963 // Find out where a captured store should be placed (or already is placed).
964 int captured_store_insertion_point(intptr_t start, int size_in_bytes,
965 PhaseTransform* phase);
966
967 static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
968
969 Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
970
971 bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
972
973 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
974 PhaseGVN* phase);
975
976 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
977};
978
979//------------------------------MergeMem---------------------------------------
980// (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
981class MergeMemNode: public Node {
982 virtual uint hash() const ; // { return NO_HASH; }
983 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
984 friend class MergeMemStream;
985 MergeMemNode(Node* def); // clients use MergeMemNode::make
986
987public:
988 // If the input is a whole memory state, clone it with all its slices intact.
989 // Otherwise, make a new memory state with just that base memory input.
990 // In either case, the result is a newly created MergeMem.
991 static MergeMemNode* make(Compile* C, Node* base_memory);
992
993 virtual int Opcode() const;
994 virtual Node *Identity( PhaseTransform *phase );
995 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
996 virtual uint ideal_reg() const { return NotAMachineReg; }
997 virtual uint match_edge(uint idx) const { return 0; }
998 virtual const RegMask &out_RegMask() const;
999 virtual const Type *bottom_type() const { return Type::MEMORY; }
1000 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1001 // sparse accessors
1002 // Fetch the previously stored "set_memory_at", or else the base memory.
1003 // (Caller should clone it if it is a phi-nest.)
1004 Node* memory_at(uint alias_idx) const;
1005 // set the memory, regardless of its previous value
1006 void set_memory_at(uint alias_idx, Node* n);
1007 // the "base" is the memory that provides the non-finite support
1008 Node* base_memory() const { return in(Compile::AliasIdxBot); }
1009 // warning: setting the base can implicitly set any of the other slices too
1010 void set_base_memory(Node* def);
1011 // sentinel value which denotes a copy of the base memory:
1012 Node* empty_memory() const { return in(Compile::AliasIdxTop); }
1013 static Node* make_empty_memory(); // where the sentinel comes from
1014 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1015 // hook for the iterator, to perform any necessary setup
1016 void iteration_setup(const MergeMemNode* other = NULL);
1017 // push sentinels until I am at least as long as the other (semantic no-op)
1018 void grow_to_match(const MergeMemNode* other);
1019 bool verify_sparse() const PRODUCT_RETURN0;
1020#ifndef PRODUCT
1021 virtual void dump_spec(outputStream *st) const;
1022#endif
1023};
1024
1025class MergeMemStream : public StackObj {
1026 private:
1027 MergeMemNode* _mm;
1028 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations
1029 Node* _mm_base; // loop-invariant base memory of _mm
1030 int _idx;
1031 int _cnt;
1032 Node* _mem;
1033 Node* _mem2;
1034 int _cnt2;
1035
1036 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
1037 // subsume_node will break sparseness at times, whenever a memory slice
1038 // folds down to a copy of the base ("fat") memory. In such a case,
1039 // the raw edge will update to base, although it should be top.
1040 // This iterator will recognize either top or base_memory as an
1041 // "empty" slice. See is_empty, is_empty2, and next below.
1042 //
1043 // The sparseness property is repaired in MergeMemNode::Ideal.
1044 // As long as access to a MergeMem goes through this iterator
1045 // or the memory_at accessor, flaws in the sparseness will
1046 // never be observed.
1047 //
1048 // Also, iteration_setup repairs sparseness.
1049 assert(mm->verify_sparse(), "please, no dups of base");
1050 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
1051
1052 _mm = mm;
1053 _mm_base = mm->base_memory();
1054 _mm2 = mm2;
1055 _cnt = mm->req();
1056 _idx = Compile::AliasIdxBot-1; // start at the base memory
1057 _mem = NULL;
1058 _mem2 = NULL;
1059 }
1060
1061#ifdef ASSERT
1062 Node* check_memory() const {
1063 if (at_base_memory())
1064 return _mm->base_memory();
1065 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1066 return _mm->memory_at(_idx);
1067 else
1068 return _mm_base;
1069 }
1070 Node* check_memory2() const {
1071 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1072 }
1073#endif
1074
1075 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1076 void assert_synch() const {
1077 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1078 "no side-effects except through the stream");
1079 }
1080
1081 public:
1082
1083 // expected usages:
1084 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1085 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1086
1087 // iterate over one merge
1088 MergeMemStream(MergeMemNode* mm) {
1089 mm->iteration_setup();
1090 init(mm);
1091 debug_only(_cnt2 = 999);
1092 }
1093 // iterate in parallel over two merges
1094 // only iterates through non-empty elements of mm2
1095 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1096 assert(mm2, "second argument must be a MergeMem also");
1097 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state
1098 mm->iteration_setup(mm2);
1099 init(mm, mm2);
1100 _cnt2 = mm2->req();
1101 }
1102#ifdef ASSERT
1103 ~MergeMemStream() {
1104 assert_synch();
1105 }
1106#endif
1107
1108 MergeMemNode* all_memory() const {
1109 return _mm;
1110 }
1111 Node* base_memory() const {
1112 assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1113 return _mm_base;
1114 }
1115 const MergeMemNode* all_memory2() const {
1116 assert(_mm2 != NULL, "");
1117 return _mm2;
1118 }
1119 bool at_base_memory() const {
1120 return _idx == Compile::AliasIdxBot;
1121 }
1122 int alias_idx() const {
1123 assert(_mem, "must call next 1st");
1124 return _idx;
1125 }
1126
1127 const TypePtr* adr_type() const {
1128 return Compile::current()->get_adr_type(alias_idx());
1129 }
1130
1131 const TypePtr* adr_type(Compile* C) const {
1132 return C->get_adr_type(alias_idx());
1133 }
1134 bool is_empty() const {
1135 assert(_mem, "must call next 1st");
1136 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1137 return _mem->is_top();
1138 }
1139 bool is_empty2() const {
1140 assert(_mem2, "must call next 1st");
1141 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1142 return _mem2->is_top();
1143 }
1144 Node* memory() const {
1145 assert(!is_empty(), "must not be empty");
1146 assert_synch();
1147 return _mem;
1148 }
1149 // get the current memory, regardless of empty or non-empty status
1150 Node* force_memory() const {
1151 assert(!is_empty() || !at_base_memory(), "");
1152 // Use _mm_base to defend against updates to _mem->base_memory().
1153 Node *mem = _mem->is_top() ? _mm_base : _mem;
1154 assert(mem == check_memory(), "");
1155 return mem;
1156 }
1157 Node* memory2() const {
1158 assert(_mem2 == check_memory2(), "");
1159 return _mem2;
1160 }
1161 void set_memory(Node* mem) {
1162 if (at_base_memory()) {
1163 // Note that this does not change the invariant _mm_base.
1164 _mm->set_base_memory(mem);
1165 } else {
1166 _mm->set_memory_at(_idx, mem);
1167 }
1168 _mem = mem;
1169 assert_synch();
1170 }
1171
1172 // Recover from a side effect to the MergeMemNode.
1173 void set_memory() {
1174 _mem = _mm->in(_idx);
1175 }
1176
1177 bool next() { return next(false); }
1178 bool next2() { return next(true); }
1179
1180 bool next_non_empty() { return next_non_empty(false); }
1181 bool next_non_empty2() { return next_non_empty(true); }
1182 // next_non_empty2 can yield states where is_empty() is true
1183
1184 private:
1185 // find the next item, which might be empty
1186 bool next(bool have_mm2) {
1187 assert((_mm2 != NULL) == have_mm2, "use other next");
1188 assert_synch();
1189 if (++_idx < _cnt) {
1190 // Note: This iterator allows _mm to be non-sparse.
1191 // It behaves the same whether _mem is top or base_memory.
1192 _mem = _mm->in(_idx);
1193 if (have_mm2)
1194 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1195 return true;
1196 }
1197 return false;
1198 }
1199
1200 // find the next non-empty item
1201 bool next_non_empty(bool have_mm2) {
1202 while (next(have_mm2)) {
1203 if (!is_empty()) {
1204 // make sure _mem2 is filled in sensibly
1205 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory();
1206 return true;
1207 } else if (have_mm2 && !is_empty2()) {
1208 return true; // is_empty() == true
1209 }
1210 }
1211 return false;
1212 }
1213};
1214
1215//------------------------------Prefetch---------------------------------------
1216
1217// Non-faulting prefetch load. Prefetch for many reads.
1218class PrefetchReadNode : public Node {
1219public:
1220 PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1221 virtual int Opcode() const;
1222 virtual uint ideal_reg() const { return NotAMachineReg; }
1223 virtual uint match_edge(uint idx) const { return idx==2; }
1224 virtual const Type *bottom_type() const { return Type::ABIO; }
1225};
1226
1227// Non-faulting prefetch load. Prefetch for many reads & many writes.
1228class PrefetchWriteNode : public Node {
1229public:
1230 PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1231 virtual int Opcode() const;
1232 virtual uint ideal_reg() const { return NotAMachineReg; }
1233 virtual uint match_edge(uint idx) const { return idx==2; }
1234 virtual const Type *bottom_type() const { return Type::ABIO; }
1235};