blob: 9df15bd4d3d3032be9e9656d67f24adb7d9fee6d [file] [log] [blame]
J. Duke81537792007-12-01 00:00:00 +00001/*
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -07002 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
J. Duke81537792007-12-01 00:00:00 +00003 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
Erik Trimbleba7c1732010-05-27 19:08:38 -070019 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
J. Duke81537792007-12-01 00:00:00 +000022 *
23 */
24
Stefan Karlsson8006fe82010-11-23 13:22:55 -080025#ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
26#define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
27
28#include "memory/referencePolicy.hpp"
29#include "oops/instanceRefKlass.hpp"
30
J. Duke81537792007-12-01 00:00:00 +000031// ReferenceProcessor class encapsulates the per-"collector" processing
Y. Srinivas Ramakrishna7d7cf3f2008-11-20 16:56:09 -080032// of java.lang.Reference objects for GC. The interface is useful for supporting
J. Duke81537792007-12-01 00:00:00 +000033// a generational abstraction, in particular when there are multiple
34// generations that are being independently collected -- possibly
35// concurrently and/or incrementally. Note, however, that the
36// ReferenceProcessor class abstracts away from a generational setting
37// by using only a heap interval (called "span" below), thus allowing
38// its use in a straightforward manner in a general, non-generational
39// setting.
40//
41// The basic idea is that each ReferenceProcessor object concerns
42// itself with ("weak") reference processing in a specific "span"
43// of the heap of interest to a specific collector. Currently,
44// the span is a convex interval of the heap, but, efficiency
45// apart, there seems to be no reason it couldn't be extended
46// (with appropriate modifications) to any "non-convex interval".
47
48// forward references
49class ReferencePolicy;
50class AbstractRefProcTaskExecutor;
John Cuthbertson1b62d102011-09-22 10:57:37 -070051
52// List of discovered references.
53class DiscoveredList {
54public:
55 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
56 oop head() const {
57 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
58 _oop_head;
59 }
60 HeapWord* adr_head() {
61 return UseCompressedOops ? (HeapWord*)&_compressed_head :
62 (HeapWord*)&_oop_head;
63 }
64 void set_head(oop o) {
65 if (UseCompressedOops) {
66 // Must compress the head ptr.
67 _compressed_head = oopDesc::encode_heap_oop(o);
68 } else {
69 _oop_head = o;
70 }
71 }
72 bool is_empty() const { return head() == NULL; }
73 size_t length() { return _len; }
74 void set_length(size_t len) { _len = len; }
75 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
76 void dec_length(size_t dec) { _len -= dec; }
77private:
78 // Set value depending on UseCompressedOops. This could be a template class
79 // but then we have to fix all the instantiations and declarations that use this class.
80 oop _oop_head;
81 narrowOop _compressed_head;
82 size_t _len;
83};
84
85// Iterator for the list of discovered references.
86class DiscoveredListIterator {
87private:
88 DiscoveredList& _refs_list;
89 HeapWord* _prev_next;
90 oop _prev;
91 oop _ref;
92 HeapWord* _discovered_addr;
93 oop _next;
94 HeapWord* _referent_addr;
95 oop _referent;
96 OopClosure* _keep_alive;
97 BoolObjectClosure* _is_alive;
98
99 DEBUG_ONLY(
100 oop _first_seen; // cyclic linked list check
101 )
102
103 NOT_PRODUCT(
104 size_t _processed;
105 size_t _removed;
106 )
107
108public:
109 inline DiscoveredListIterator(DiscoveredList& refs_list,
110 OopClosure* keep_alive,
111 BoolObjectClosure* is_alive):
112 _refs_list(refs_list),
113 _prev_next(refs_list.adr_head()),
114 _prev(NULL),
115 _ref(refs_list.head()),
116#ifdef ASSERT
117 _first_seen(refs_list.head()),
118#endif
119#ifndef PRODUCT
120 _processed(0),
121 _removed(0),
122#endif
123 _next(NULL),
124 _keep_alive(keep_alive),
125 _is_alive(is_alive)
126{ }
127
128 // End Of List.
129 inline bool has_next() const { return _ref != NULL; }
130
131 // Get oop to the Reference object.
132 inline oop obj() const { return _ref; }
133
134 // Get oop to the referent object.
135 inline oop referent() const { return _referent; }
136
137 // Returns true if referent is alive.
138 inline bool is_referent_alive() const {
139 return _is_alive->do_object_b(_referent);
140 }
141
142 // Loads data for the current reference.
143 // The "allow_null_referent" argument tells us to allow for the possibility
144 // of a NULL referent in the discovered Reference object. This typically
145 // happens in the case of concurrent collectors that may have done the
146 // discovery concurrently, or interleaved, with mutator execution.
147 void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
148
149 // Move to the next discovered reference.
150 inline void next() {
151 _prev_next = _discovered_addr;
152 _prev = _ref;
153 move_to_next();
154 }
155
156 // Remove the current reference from the list
157 void remove();
158
159 // Make the Reference object active again.
160 void make_active();
161
162 // Make the referent alive.
163 inline void make_referent_alive() {
164 if (UseCompressedOops) {
165 _keep_alive->do_oop((narrowOop*)_referent_addr);
166 } else {
167 _keep_alive->do_oop((oop*)_referent_addr);
168 }
169 }
170
171 // Update the discovered field.
172 inline void update_discovered() {
173 // First _prev_next ref actually points into DiscoveredList (gross).
174 if (UseCompressedOops) {
175 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
176 _keep_alive->do_oop((narrowOop*)_prev_next);
177 }
178 } else {
179 if (!oopDesc::is_null(*(oop*)_prev_next)) {
180 _keep_alive->do_oop((oop*)_prev_next);
181 }
182 }
183 }
184
185 // NULL out referent pointer.
186 void clear_referent();
187
188 // Statistics
189 NOT_PRODUCT(
190 inline size_t processed() const { return _processed; }
191 inline size_t removed() const { return _removed; }
192 )
193
194 inline void move_to_next() {
195 if (_ref == _next) {
196 // End of the list.
197 _ref = NULL;
198 } else {
199 _ref = _next;
200 }
201 assert(_ref != _first_seen, "cyclic ref_list found");
202 NOT_PRODUCT(_processed++);
203 }
John Cuthbertson1b62d102011-09-22 10:57:37 -0700204};
J. Duke81537792007-12-01 00:00:00 +0000205
Zhengyu Gua39b1762012-06-28 17:03:16 -0400206class ReferenceProcessor : public CHeapObj<mtGC> {
J. Duke81537792007-12-01 00:00:00 +0000207 protected:
Y. Srinivas Ramakrishnaed72e312011-09-07 13:55:42 -0700208 // Compatibility with pre-4965777 JDK's
209 static bool _pending_list_uses_discovered_field;
John Cuthbertson1b62d102011-09-22 10:57:37 -0700210
John Cuthbertson4738ed82011-10-12 10:25:51 -0700211 // The SoftReference master timestamp clock
212 static jlong _soft_ref_timestamp_clock;
213
John Cuthbertson1b62d102011-09-22 10:57:37 -0700214 MemRegion _span; // (right-open) interval of heap
215 // subject to wkref discovery
216
217 bool _discovering_refs; // true when discovery enabled
218 bool _discovery_is_atomic; // if discovery is atomic wrt
219 // other collectors in configuration
220 bool _discovery_is_mt; // true if reference discovery is MT.
221
Y. Srinivas Ramakrishna18f33862008-06-05 15:57:56 -0700222 // If true, setting "next" field of a discovered refs list requires
223 // write barrier(s). (Must be true if used in a collector in which
224 // elements of a discovered list may be moved during discovery: for
225 // example, a collector like Garbage-First that moves objects during a
226 // long-term concurrent marking phase that does weak reference
227 // discovery.)
228 bool _discovered_list_needs_barrier;
J. Duke81537792007-12-01 00:00:00 +0000229
John Cuthbertson1b62d102011-09-22 10:57:37 -0700230 BarrierSet* _bs; // Cached copy of BarrierSet.
231 bool _enqueuing_is_done; // true if all weak references enqueued
232 bool _processing_is_mt; // true during phases when
233 // reference processing is MT.
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800234 uint _next_id; // round-robin mod _num_q counter in
John Cuthbertson1b62d102011-09-22 10:57:37 -0700235 // support of work distribution
236
237 // For collectors that do not keep GC liveness information
J. Duke81537792007-12-01 00:00:00 +0000238 // in the object header, this field holds a closure that
239 // helps the reference processor determine the reachability
John Cuthbertson1b62d102011-09-22 10:57:37 -0700240 // of an oop. It is currently initialized to NULL for all
241 // collectors except for CMS and G1.
J. Duke81537792007-12-01 00:00:00 +0000242 BoolObjectClosure* _is_alive_non_header;
243
Y. Srinivas Ramakrishna7d7cf3f2008-11-20 16:56:09 -0800244 // Soft ref clearing policies
245 // . the default policy
246 static ReferencePolicy* _default_soft_ref_policy;
247 // . the "clear all" policy
248 static ReferencePolicy* _always_clear_soft_ref_policy;
249 // . the current policy below is either one of the above
250 ReferencePolicy* _current_soft_ref_policy;
251
J. Duke81537792007-12-01 00:00:00 +0000252 // The discovered ref lists themselves
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400253
Jon Masamitsu28e56b82010-09-20 14:38:38 -0700254 // The active MT'ness degree of the queues below
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800255 uint _num_q;
Jon Masamitsu28e56b82010-09-20 14:38:38 -0700256 // The maximum MT'ness degree of the queues below
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800257 uint _max_num_q;
John Cuthbertsoncab40722011-10-17 09:57:41 -0700258
259 // Master array of discovered oops
260 DiscoveredList* _discovered_refs;
261
262 // Arrays of lists of oops, one per thread (pointers into master array above)
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400263 DiscoveredList* _discoveredSoftRefs;
J. Duke81537792007-12-01 00:00:00 +0000264 DiscoveredList* _discoveredWeakRefs;
265 DiscoveredList* _discoveredFinalRefs;
266 DiscoveredList* _discoveredPhantomRefs;
267
268 public:
John Cuthbertson1b62d102011-09-22 10:57:37 -0700269 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
270
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800271 uint num_q() { return _num_q; }
272 uint max_num_q() { return _max_num_q; }
273 void set_active_mt_degree(uint v) { _num_q = v; }
John Cuthbertsoncab40722011-10-17 09:57:41 -0700274
275 DiscoveredList* discovered_refs() { return _discovered_refs; }
John Cuthbertson1b62d102011-09-22 10:57:37 -0700276
Y. Srinivas Ramakrishna16aa57c2008-12-01 23:25:24 -0800277 ReferencePolicy* setup_policy(bool always_clear) {
Y. Srinivas Ramakrishna7d7cf3f2008-11-20 16:56:09 -0800278 _current_soft_ref_policy = always_clear ?
279 _always_clear_soft_ref_policy : _default_soft_ref_policy;
Y. Srinivas Ramakrishna16aa57c2008-12-01 23:25:24 -0800280 _current_soft_ref_policy->setup(); // snapshot the policy threshold
Y. Srinivas Ramakrishna7d7cf3f2008-11-20 16:56:09 -0800281 return _current_soft_ref_policy;
282 }
J. Duke81537792007-12-01 00:00:00 +0000283
J. Duke81537792007-12-01 00:00:00 +0000284 // Process references with a certain reachability level.
285 void process_discovered_reflist(DiscoveredList refs_lists[],
286 ReferencePolicy* policy,
287 bool clear_referent,
288 BoolObjectClosure* is_alive,
289 OopClosure* keep_alive,
290 VoidClosure* complete_gc,
291 AbstractRefProcTaskExecutor* task_executor);
292
293 void process_phaseJNI(BoolObjectClosure* is_alive,
294 OopClosure* keep_alive,
295 VoidClosure* complete_gc);
296
297 // Work methods used by the method process_discovered_reflist
298 // Phase1: keep alive all those referents that are otherwise
299 // dead but which must be kept alive by policy (and their closure).
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400300 void process_phase1(DiscoveredList& refs_list,
J. Duke81537792007-12-01 00:00:00 +0000301 ReferencePolicy* policy,
302 BoolObjectClosure* is_alive,
303 OopClosure* keep_alive,
304 VoidClosure* complete_gc);
305 // Phase2: remove all those references whose referents are
306 // reachable.
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400307 inline void process_phase2(DiscoveredList& refs_list,
J. Duke81537792007-12-01 00:00:00 +0000308 BoolObjectClosure* is_alive,
309 OopClosure* keep_alive,
310 VoidClosure* complete_gc) {
311 if (discovery_is_atomic()) {
312 // complete_gc is ignored in this case for this phase
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400313 pp2_work(refs_list, is_alive, keep_alive);
J. Duke81537792007-12-01 00:00:00 +0000314 } else {
315 assert(complete_gc != NULL, "Error");
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400316 pp2_work_concurrent_discovery(refs_list, is_alive,
J. Duke81537792007-12-01 00:00:00 +0000317 keep_alive, complete_gc);
318 }
319 }
320 // Work methods in support of process_phase2
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400321 void pp2_work(DiscoveredList& refs_list,
J. Duke81537792007-12-01 00:00:00 +0000322 BoolObjectClosure* is_alive,
323 OopClosure* keep_alive);
324 void pp2_work_concurrent_discovery(
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400325 DiscoveredList& refs_list,
J. Duke81537792007-12-01 00:00:00 +0000326 BoolObjectClosure* is_alive,
327 OopClosure* keep_alive,
328 VoidClosure* complete_gc);
329 // Phase3: process the referents by either clearing them
330 // or keeping them alive (and their closure)
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400331 void process_phase3(DiscoveredList& refs_list,
J. Duke81537792007-12-01 00:00:00 +0000332 bool clear_referent,
333 BoolObjectClosure* is_alive,
334 OopClosure* keep_alive,
335 VoidClosure* complete_gc);
336
337 // Enqueue references with a certain reachability level
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400338 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
J. Duke81537792007-12-01 00:00:00 +0000339
340 // "Preclean" all the discovered reference lists
341 // by removing references with strongly reachable referents.
342 // The first argument is a predicate on an oop that indicates
343 // its (strong) reachability and the second is a closure that
344 // may be used to incrementalize or abort the precleaning process.
345 // The caller is responsible for taking care of potential
346 // interference with concurrent operations on these lists
347 // (or predicates involved) by other threads. Currently
Jon Masamitsub2ed5472010-01-21 11:33:32 -0800348 // only used by the CMS collector. should_unload_classes is
349 // used to aid assertion checking when classes are collected.
J. Duke81537792007-12-01 00:00:00 +0000350 void preclean_discovered_references(BoolObjectClosure* is_alive,
351 OopClosure* keep_alive,
352 VoidClosure* complete_gc,
Jon Masamitsub2ed5472010-01-21 11:33:32 -0800353 YieldClosure* yield,
354 bool should_unload_classes);
J. Duke81537792007-12-01 00:00:00 +0000355
356 // Delete entries in the discovered lists that have
357 // either a null referent or are not active. Such
358 // Reference objects can result from the clearing
359 // or enqueueing of Reference objects concurrent
360 // with their discovery by a (concurrent) collector.
361 // For a definition of "active" see java.lang.ref.Reference;
362 // Refs are born active, become inactive when enqueued,
363 // and never become active again. The state of being
364 // active is encoded as follows: A Ref is active
365 // if and only if its "next" field is NULL.
366 void clean_up_discovered_references();
367 void clean_up_discovered_reflist(DiscoveredList& refs_list);
368
369 // Returns the name of the discovered reference list
370 // occupying the i / _num_q slot.
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800371 const char* list_name(uint i);
J. Duke81537792007-12-01 00:00:00 +0000372
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400373 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
374
J. Duke81537792007-12-01 00:00:00 +0000375 protected:
John Cuthbertson1b62d102011-09-22 10:57:37 -0700376 // Set the 'discovered' field of the given reference to
377 // the given value - emitting barriers depending upon
378 // the value of _discovered_list_needs_barrier.
379 void set_discovered(oop ref, oop value);
380
J. Duke81537792007-12-01 00:00:00 +0000381 // "Preclean" the given discovered reference list
382 // by removing references with strongly reachable referents.
383 // Currently used in support of CMS only.
384 void preclean_discovered_reflist(DiscoveredList& refs_list,
385 BoolObjectClosure* is_alive,
386 OopClosure* keep_alive,
387 VoidClosure* complete_gc,
388 YieldClosure* yield);
389
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700390 // round-robin mod _num_q (not: _not_ mode _max_num_q)
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800391 uint next_id() {
392 uint id = _next_id;
J. Duke81537792007-12-01 00:00:00 +0000393 if (++_next_id == _num_q) {
394 _next_id = 0;
395 }
396 return id;
397 }
398 DiscoveredList* get_discovered_list(ReferenceType rt);
399 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400400 HeapWord* discovered_addr);
J. Duke81537792007-12-01 00:00:00 +0000401 void verify_ok_to_handle_reflists() PRODUCT_RETURN;
402
Stefan Karlsson5b6ba4e2011-09-01 16:18:17 +0200403 void clear_discovered_references(DiscoveredList& refs_list);
J. Duke81537792007-12-01 00:00:00 +0000404 void abandon_partial_discovered_list(DiscoveredList& refs_list);
J. Duke81537792007-12-01 00:00:00 +0000405
406 // Calculate the number of jni handles.
407 unsigned int count_jni_refs();
408
409 // Balances reference queues.
410 void balance_queues(DiscoveredList ref_lists[]);
411
412 // Update (advance) the soft ref master clock field.
413 void update_soft_ref_master_clock();
414
415 public:
416 // constructor
417 ReferenceProcessor():
418 _span((HeapWord*)NULL, (HeapWord*)NULL),
John Cuthbertsoncab40722011-10-17 09:57:41 -0700419 _discovered_refs(NULL),
J. Duke81537792007-12-01 00:00:00 +0000420 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL),
421 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
422 _discovering_refs(false),
423 _discovery_is_atomic(true),
424 _enqueuing_is_done(false),
425 _discovery_is_mt(false),
Y. Srinivas Ramakrishna18f33862008-06-05 15:57:56 -0700426 _discovered_list_needs_barrier(false),
427 _bs(NULL),
J. Duke81537792007-12-01 00:00:00 +0000428 _is_alive_non_header(NULL),
429 _num_q(0),
Jon Masamitsu28e56b82010-09-20 14:38:38 -0700430 _max_num_q(0),
J. Duke81537792007-12-01 00:00:00 +0000431 _processing_is_mt(false),
432 _next_id(0)
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700433 { }
J. Duke81537792007-12-01 00:00:00 +0000434
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700435 // Default parameters give you a vanilla reference processor.
436 ReferenceProcessor(MemRegion span,
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800437 bool mt_processing = false, uint mt_processing_degree = 1,
438 bool mt_discovery = false, uint mt_discovery_degree = 1,
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700439 bool atomic_discovery = true,
440 BoolObjectClosure* is_alive_non_header = NULL,
Y. Srinivas Ramakrishna18f33862008-06-05 15:57:56 -0700441 bool discovered_list_needs_barrier = false);
J. Duke81537792007-12-01 00:00:00 +0000442
J. Duke81537792007-12-01 00:00:00 +0000443 // RefDiscoveryPolicy values
John Cuthbertsonb34027a2010-01-29 14:51:38 -0800444 enum DiscoveryPolicy {
J. Duke81537792007-12-01 00:00:00 +0000445 ReferenceBasedDiscovery = 0,
John Cuthbertsonb34027a2010-01-29 14:51:38 -0800446 ReferentBasedDiscovery = 1,
447 DiscoveryPolicyMin = ReferenceBasedDiscovery,
448 DiscoveryPolicyMax = ReferentBasedDiscovery
J. Duke81537792007-12-01 00:00:00 +0000449 };
450
451 static void init_statics();
452
453 public:
454 // get and set "is_alive_non_header" field
455 BoolObjectClosure* is_alive_non_header() {
456 return _is_alive_non_header;
457 }
458 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
459 _is_alive_non_header = is_alive_non_header;
460 }
461
462 // get and set span
463 MemRegion span() { return _span; }
464 void set_span(MemRegion span) { _span = span; }
465
466 // start and stop weak ref discovery
John Cuthbertson4738ed82011-10-12 10:25:51 -0700467 void enable_discovery(bool verify_disabled, bool check_no_refs);
J. Duke81537792007-12-01 00:00:00 +0000468 void disable_discovery() { _discovering_refs = false; }
469 bool discovery_enabled() { return _discovering_refs; }
470
471 // whether discovery is atomic wrt other collectors
472 bool discovery_is_atomic() const { return _discovery_is_atomic; }
473 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
474
Y. Srinivas Ramakrishnaed72e312011-09-07 13:55:42 -0700475 // whether the JDK in which we are embedded is a pre-4965777 JDK,
476 // and thus whether or not it uses the discovered field to chain
477 // the entries in the pending list.
478 static bool pending_list_uses_discovered_field() {
479 return _pending_list_uses_discovered_field;
480 }
481
J. Duke81537792007-12-01 00:00:00 +0000482 // whether discovery is done by multiple threads same-old-timeously
483 bool discovery_is_mt() const { return _discovery_is_mt; }
484 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
485
486 // Whether we are in a phase when _processing_ is MT.
487 bool processing_is_mt() const { return _processing_is_mt; }
488 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
489
490 // whether all enqueuing of weak references is complete
491 bool enqueuing_is_done() { return _enqueuing_is_done; }
492 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
493
494 // iterate over oops
495 void weak_oops_do(OopClosure* f); // weak roots
J. Duke81537792007-12-01 00:00:00 +0000496
Jon Masamitsu28e56b82010-09-20 14:38:38 -0700497 // Balance each of the discovered lists.
498 void balance_all_queues();
499
J. Duke81537792007-12-01 00:00:00 +0000500 // Discover a Reference object, using appropriate discovery criteria
501 bool discover_reference(oop obj, ReferenceType rt);
502
503 // Process references found during GC (called by the garbage collector)
Y. Srinivas Ramakrishna7d7cf3f2008-11-20 16:56:09 -0800504 void process_discovered_references(BoolObjectClosure* is_alive,
J. Duke81537792007-12-01 00:00:00 +0000505 OopClosure* keep_alive,
506 VoidClosure* complete_gc,
507 AbstractRefProcTaskExecutor* task_executor);
508
509 public:
510 // Enqueue references at end of GC (called by the garbage collector)
511 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
512
Y. Srinivas Ramakrishna18f33862008-06-05 15:57:56 -0700513 // If a discovery is in process that is being superceded, abandon it: all
514 // the discovered lists will be empty, and all the objects on them will
515 // have NULL discovered fields. Must be called only at a safepoint.
516 void abandon_partial_discovery();
517
J. Duke81537792007-12-01 00:00:00 +0000518 // debugging
519 void verify_no_references_recorded() PRODUCT_RETURN;
Y. Srinivas Ramakrishna8baafdf2010-12-09 09:22:57 -0800520 void verify_referent(oop obj) PRODUCT_RETURN;
J. Duke81537792007-12-01 00:00:00 +0000521
522 // clear the discovered lists (unlinking each entry).
523 void clear_discovered_references() PRODUCT_RETURN;
524};
525
526// A utility class to disable reference discovery in
527// the scope which contains it, for given ReferenceProcessor.
528class NoRefDiscovery: StackObj {
529 private:
530 ReferenceProcessor* _rp;
531 bool _was_discovering_refs;
532 public:
533 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
Pavel Tisnovskyaf254872010-09-09 05:24:11 -0700534 _was_discovering_refs = _rp->discovery_enabled();
535 if (_was_discovering_refs) {
J. Duke81537792007-12-01 00:00:00 +0000536 _rp->disable_discovery();
537 }
538 }
539
540 ~NoRefDiscovery() {
541 if (_was_discovering_refs) {
John Cuthbertson1b62d102011-09-22 10:57:37 -0700542 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
J. Duke81537792007-12-01 00:00:00 +0000543 }
544 }
545};
546
547
548// A utility class to temporarily mutate the span of the
549// given ReferenceProcessor in the scope that contains it.
550class ReferenceProcessorSpanMutator: StackObj {
551 private:
552 ReferenceProcessor* _rp;
553 MemRegion _saved_span;
554
555 public:
556 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
557 MemRegion span):
558 _rp(rp) {
559 _saved_span = _rp->span();
560 _rp->set_span(span);
561 }
562
563 ~ReferenceProcessorSpanMutator() {
564 _rp->set_span(_saved_span);
565 }
566};
567
568// A utility class to temporarily change the MT'ness of
569// reference discovery for the given ReferenceProcessor
570// in the scope that contains it.
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700571class ReferenceProcessorMTDiscoveryMutator: StackObj {
J. Duke81537792007-12-01 00:00:00 +0000572 private:
573 ReferenceProcessor* _rp;
574 bool _saved_mt;
575
576 public:
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700577 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
578 bool mt):
J. Duke81537792007-12-01 00:00:00 +0000579 _rp(rp) {
580 _saved_mt = _rp->discovery_is_mt();
581 _rp->set_mt_discovery(mt);
582 }
583
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700584 ~ReferenceProcessorMTDiscoveryMutator() {
J. Duke81537792007-12-01 00:00:00 +0000585 _rp->set_mt_discovery(_saved_mt);
586 }
587};
588
589
590// A utility class to temporarily change the disposition
591// of the "is_alive_non_header" closure field of the
592// given ReferenceProcessor in the scope that contains it.
593class ReferenceProcessorIsAliveMutator: StackObj {
594 private:
595 ReferenceProcessor* _rp;
596 BoolObjectClosure* _saved_cl;
597
598 public:
599 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
600 BoolObjectClosure* cl):
601 _rp(rp) {
602 _saved_cl = _rp->is_alive_non_header();
603 _rp->set_is_alive_non_header(cl);
604 }
605
606 ~ReferenceProcessorIsAliveMutator() {
607 _rp->set_is_alive_non_header(_saved_cl);
608 }
609};
610
611// A utility class to temporarily change the disposition
612// of the "discovery_is_atomic" field of the
613// given ReferenceProcessor in the scope that contains it.
614class ReferenceProcessorAtomicMutator: StackObj {
615 private:
616 ReferenceProcessor* _rp;
617 bool _saved_atomic_discovery;
618
619 public:
620 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
621 bool atomic):
622 _rp(rp) {
623 _saved_atomic_discovery = _rp->discovery_is_atomic();
624 _rp->set_atomic_discovery(atomic);
625 }
626
627 ~ReferenceProcessorAtomicMutator() {
628 _rp->set_atomic_discovery(_saved_atomic_discovery);
629 }
630};
631
632
633// A utility class to temporarily change the MT processing
634// disposition of the given ReferenceProcessor instance
635// in the scope that contains it.
636class ReferenceProcessorMTProcMutator: StackObj {
637 private:
638 ReferenceProcessor* _rp;
639 bool _saved_mt;
640
641 public:
642 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
643 bool mt):
644 _rp(rp) {
645 _saved_mt = _rp->processing_is_mt();
646 _rp->set_mt_processing(mt);
647 }
648
649 ~ReferenceProcessorMTProcMutator() {
650 _rp->set_mt_processing(_saved_mt);
651 }
652};
653
654
655// This class is an interface used to implement task execution for the
656// reference processing.
657class AbstractRefProcTaskExecutor {
658public:
659
660 // Abstract tasks to execute.
661 class ProcessTask;
662 class EnqueueTask;
663
664 // Executes a task using worker threads.
665 virtual void execute(ProcessTask& task) = 0;
666 virtual void execute(EnqueueTask& task) = 0;
667
668 // Switch to single threaded mode.
669 virtual void set_single_threaded_mode() { };
670};
671
672// Abstract reference processing task to execute.
673class AbstractRefProcTaskExecutor::ProcessTask {
674protected:
675 ProcessTask(ReferenceProcessor& ref_processor,
676 DiscoveredList refs_lists[],
677 bool marks_oops_alive)
678 : _ref_processor(ref_processor),
679 _refs_lists(refs_lists),
680 _marks_oops_alive(marks_oops_alive)
681 { }
682
683public:
684 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
685 OopClosure& keep_alive,
686 VoidClosure& complete_gc) = 0;
687
688 // Returns true if a task marks some oops as alive.
689 bool marks_oops_alive() const
690 { return _marks_oops_alive; }
691
692protected:
693 ReferenceProcessor& _ref_processor;
694 DiscoveredList* _refs_lists;
695 const bool _marks_oops_alive;
696};
697
698// Abstract reference processing task to execute.
699class AbstractRefProcTaskExecutor::EnqueueTask {
700protected:
701 EnqueueTask(ReferenceProcessor& ref_processor,
702 DiscoveredList refs_lists[],
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400703 HeapWord* pending_list_addr,
J. Duke81537792007-12-01 00:00:00 +0000704 int n_queues)
705 : _ref_processor(ref_processor),
706 _refs_lists(refs_lists),
707 _pending_list_addr(pending_list_addr),
J. Duke81537792007-12-01 00:00:00 +0000708 _n_queues(n_queues)
709 { }
710
711public:
712 virtual void work(unsigned int work_id) = 0;
713
714protected:
715 ReferenceProcessor& _ref_processor;
716 DiscoveredList* _refs_lists;
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400717 HeapWord* _pending_list_addr;
J. Duke81537792007-12-01 00:00:00 +0000718 int _n_queues;
719};
Stefan Karlsson8006fe82010-11-23 13:22:55 -0800720
721#endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP