blob: b7656845c729a62fbd5b1a1525292bfc2dbb501f [file] [log] [blame]
J. Duke81537792007-12-01 00:00:00 +00001/*
Kim Barrettc3808b22015-12-28 13:59:20 -05002 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
J. Duke81537792007-12-01 00:00:00 +00003 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
Erik Trimbleba7c1732010-05-27 19:08:38 -070019 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
J. Duke81537792007-12-01 00:00:00 +000022 *
23 */
24
Per Lidén4dc240f2015-05-13 15:16:06 +020025#ifndef SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP
26#define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP
Stefan Karlsson8006fe82010-11-23 13:22:55 -080027
Per Lidén4dc240f2015-05-13 15:16:06 +020028#include "gc/shared/gcTrace.hpp"
29#include "gc/shared/referencePolicy.hpp"
30#include "gc/shared/referenceProcessorStats.hpp"
Staffan Larsen718f3252013-06-10 11:30:51 +020031#include "memory/referenceType.hpp"
Stefan Karlsson8006fe82010-11-23 13:22:55 -080032#include "oops/instanceRefKlass.hpp"
33
Staffan Larsen718f3252013-06-10 11:30:51 +020034class GCTimer;
35
J. Duke81537792007-12-01 00:00:00 +000036// ReferenceProcessor class encapsulates the per-"collector" processing
Y. Srinivas Ramakrishna7d7cf3f2008-11-20 16:56:09 -080037// of java.lang.Reference objects for GC. The interface is useful for supporting
J. Duke81537792007-12-01 00:00:00 +000038// a generational abstraction, in particular when there are multiple
39// generations that are being independently collected -- possibly
40// concurrently and/or incrementally. Note, however, that the
41// ReferenceProcessor class abstracts away from a generational setting
42// by using only a heap interval (called "span" below), thus allowing
43// its use in a straightforward manner in a general, non-generational
44// setting.
45//
46// The basic idea is that each ReferenceProcessor object concerns
47// itself with ("weak") reference processing in a specific "span"
48// of the heap of interest to a specific collector. Currently,
49// the span is a convex interval of the heap, but, efficiency
50// apart, there seems to be no reason it couldn't be extended
51// (with appropriate modifications) to any "non-convex interval".
52
53// forward references
54class ReferencePolicy;
55class AbstractRefProcTaskExecutor;
John Cuthbertson1b62d102011-09-22 10:57:37 -070056
57// List of discovered references.
58class DiscoveredList {
59public:
60 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
Goetz Lindenmaier6397e802016-01-18 10:25:41 +010061 inline oop head() const;
John Cuthbertson1b62d102011-09-22 10:57:37 -070062 HeapWord* adr_head() {
63 return UseCompressedOops ? (HeapWord*)&_compressed_head :
64 (HeapWord*)&_oop_head;
65 }
Goetz Lindenmaier6397e802016-01-18 10:25:41 +010066 inline void set_head(oop o);
67 inline bool is_empty() const;
John Cuthbertson1b62d102011-09-22 10:57:37 -070068 size_t length() { return _len; }
69 void set_length(size_t len) { _len = len; }
70 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
71 void dec_length(size_t dec) { _len -= dec; }
72private:
73 // Set value depending on UseCompressedOops. This could be a template class
74 // but then we have to fix all the instantiations and declarations that use this class.
75 oop _oop_head;
76 narrowOop _compressed_head;
77 size_t _len;
78};
79
80// Iterator for the list of discovered references.
81class DiscoveredListIterator {
82private:
83 DiscoveredList& _refs_list;
84 HeapWord* _prev_next;
85 oop _prev;
86 oop _ref;
87 HeapWord* _discovered_addr;
88 oop _next;
89 HeapWord* _referent_addr;
90 oop _referent;
91 OopClosure* _keep_alive;
92 BoolObjectClosure* _is_alive;
93
94 DEBUG_ONLY(
95 oop _first_seen; // cyclic linked list check
96 )
97
98 NOT_PRODUCT(
99 size_t _processed;
100 size_t _removed;
101 )
102
103public:
104 inline DiscoveredListIterator(DiscoveredList& refs_list,
105 OopClosure* keep_alive,
Goetz Lindenmaier6397e802016-01-18 10:25:41 +0100106 BoolObjectClosure* is_alive);
John Cuthbertson1b62d102011-09-22 10:57:37 -0700107
108 // End Of List.
109 inline bool has_next() const { return _ref != NULL; }
110
111 // Get oop to the Reference object.
112 inline oop obj() const { return _ref; }
113
114 // Get oop to the referent object.
115 inline oop referent() const { return _referent; }
116
117 // Returns true if referent is alive.
118 inline bool is_referent_alive() const {
119 return _is_alive->do_object_b(_referent);
120 }
121
122 // Loads data for the current reference.
123 // The "allow_null_referent" argument tells us to allow for the possibility
124 // of a NULL referent in the discovered Reference object. This typically
125 // happens in the case of concurrent collectors that may have done the
126 // discovery concurrently, or interleaved, with mutator execution.
127 void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
128
129 // Move to the next discovered reference.
130 inline void next() {
131 _prev_next = _discovered_addr;
132 _prev = _ref;
133 move_to_next();
134 }
135
136 // Remove the current reference from the list
137 void remove();
138
John Cuthbertson1b62d102011-09-22 10:57:37 -0700139 // Make the referent alive.
140 inline void make_referent_alive() {
141 if (UseCompressedOops) {
142 _keep_alive->do_oop((narrowOop*)_referent_addr);
143 } else {
144 _keep_alive->do_oop((oop*)_referent_addr);
145 }
146 }
147
John Cuthbertson1b62d102011-09-22 10:57:37 -0700148 // NULL out referent pointer.
149 void clear_referent();
150
151 // Statistics
152 NOT_PRODUCT(
153 inline size_t processed() const { return _processed; }
154 inline size_t removed() const { return _removed; }
155 )
156
157 inline void move_to_next() {
158 if (_ref == _next) {
159 // End of the list.
160 _ref = NULL;
161 } else {
162 _ref = _next;
163 }
164 assert(_ref != _first_seen, "cyclic ref_list found");
165 NOT_PRODUCT(_processed++);
166 }
John Cuthbertson1b62d102011-09-22 10:57:37 -0700167};
J. Duke81537792007-12-01 00:00:00 +0000168
Zhengyu Gua39b1762012-06-28 17:03:16 -0400169class ReferenceProcessor : public CHeapObj<mtGC> {
Staffan Larsen718f3252013-06-10 11:30:51 +0200170
171 private:
172 size_t total_count(DiscoveredList lists[]);
173
J. Duke81537792007-12-01 00:00:00 +0000174 protected:
John Cuthbertson4738ed82011-10-12 10:25:51 -0700175 // The SoftReference master timestamp clock
176 static jlong _soft_ref_timestamp_clock;
177
John Cuthbertson1b62d102011-09-22 10:57:37 -0700178 MemRegion _span; // (right-open) interval of heap
179 // subject to wkref discovery
180
181 bool _discovering_refs; // true when discovery enabled
182 bool _discovery_is_atomic; // if discovery is atomic wrt
183 // other collectors in configuration
184 bool _discovery_is_mt; // true if reference discovery is MT.
185
John Cuthbertson1b62d102011-09-22 10:57:37 -0700186 bool _enqueuing_is_done; // true if all weak references enqueued
187 bool _processing_is_mt; // true during phases when
188 // reference processing is MT.
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800189 uint _next_id; // round-robin mod _num_q counter in
John Cuthbertson1b62d102011-09-22 10:57:37 -0700190 // support of work distribution
191
192 // For collectors that do not keep GC liveness information
J. Duke81537792007-12-01 00:00:00 +0000193 // in the object header, this field holds a closure that
194 // helps the reference processor determine the reachability
John Cuthbertson1b62d102011-09-22 10:57:37 -0700195 // of an oop. It is currently initialized to NULL for all
196 // collectors except for CMS and G1.
J. Duke81537792007-12-01 00:00:00 +0000197 BoolObjectClosure* _is_alive_non_header;
198
Y. Srinivas Ramakrishna7d7cf3f2008-11-20 16:56:09 -0800199 // Soft ref clearing policies
200 // . the default policy
201 static ReferencePolicy* _default_soft_ref_policy;
202 // . the "clear all" policy
203 static ReferencePolicy* _always_clear_soft_ref_policy;
204 // . the current policy below is either one of the above
205 ReferencePolicy* _current_soft_ref_policy;
206
J. Duke81537792007-12-01 00:00:00 +0000207 // The discovered ref lists themselves
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400208
Jon Masamitsu28e56b82010-09-20 14:38:38 -0700209 // The active MT'ness degree of the queues below
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800210 uint _num_q;
Jon Masamitsu28e56b82010-09-20 14:38:38 -0700211 // The maximum MT'ness degree of the queues below
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800212 uint _max_num_q;
John Cuthbertsoncab40722011-10-17 09:57:41 -0700213
214 // Master array of discovered oops
215 DiscoveredList* _discovered_refs;
216
217 // Arrays of lists of oops, one per thread (pointers into master array above)
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400218 DiscoveredList* _discoveredSoftRefs;
J. Duke81537792007-12-01 00:00:00 +0000219 DiscoveredList* _discoveredWeakRefs;
220 DiscoveredList* _discoveredFinalRefs;
221 DiscoveredList* _discoveredPhantomRefs;
222
223 public:
Kim Barrettc3808b22015-12-28 13:59:20 -0500224 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
John Cuthbertson1b62d102011-09-22 10:57:37 -0700225
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800226 uint num_q() { return _num_q; }
227 uint max_num_q() { return _max_num_q; }
228 void set_active_mt_degree(uint v) { _num_q = v; }
John Cuthbertsoncab40722011-10-17 09:57:41 -0700229
230 DiscoveredList* discovered_refs() { return _discovered_refs; }
John Cuthbertson1b62d102011-09-22 10:57:37 -0700231
Y. Srinivas Ramakrishna16aa57c2008-12-01 23:25:24 -0800232 ReferencePolicy* setup_policy(bool always_clear) {
Y. Srinivas Ramakrishna7d7cf3f2008-11-20 16:56:09 -0800233 _current_soft_ref_policy = always_clear ?
234 _always_clear_soft_ref_policy : _default_soft_ref_policy;
Y. Srinivas Ramakrishna16aa57c2008-12-01 23:25:24 -0800235 _current_soft_ref_policy->setup(); // snapshot the policy threshold
Y. Srinivas Ramakrishna7d7cf3f2008-11-20 16:56:09 -0800236 return _current_soft_ref_policy;
237 }
J. Duke81537792007-12-01 00:00:00 +0000238
J. Duke81537792007-12-01 00:00:00 +0000239 // Process references with a certain reachability level.
Bengt Rutissond3651ac2015-09-28 09:28:53 +0200240 void process_discovered_reflist(DiscoveredList refs_lists[],
241 ReferencePolicy* policy,
242 bool clear_referent,
243 BoolObjectClosure* is_alive,
244 OopClosure* keep_alive,
245 VoidClosure* complete_gc,
246 AbstractRefProcTaskExecutor* task_executor);
J. Duke81537792007-12-01 00:00:00 +0000247
Kim Barrettb78e9672015-09-16 16:25:02 +0200248 void process_phaseJNI(BoolObjectClosure* is_alive,
249 OopClosure* keep_alive,
250 VoidClosure* complete_gc);
J. Duke81537792007-12-01 00:00:00 +0000251
252 // Work methods used by the method process_discovered_reflist
253 // Phase1: keep alive all those referents that are otherwise
254 // dead but which must be kept alive by policy (and their closure).
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400255 void process_phase1(DiscoveredList& refs_list,
J. Duke81537792007-12-01 00:00:00 +0000256 ReferencePolicy* policy,
257 BoolObjectClosure* is_alive,
258 OopClosure* keep_alive,
259 VoidClosure* complete_gc);
260 // Phase2: remove all those references whose referents are
261 // reachable.
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400262 inline void process_phase2(DiscoveredList& refs_list,
J. Duke81537792007-12-01 00:00:00 +0000263 BoolObjectClosure* is_alive,
264 OopClosure* keep_alive,
265 VoidClosure* complete_gc) {
266 if (discovery_is_atomic()) {
267 // complete_gc is ignored in this case for this phase
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400268 pp2_work(refs_list, is_alive, keep_alive);
J. Duke81537792007-12-01 00:00:00 +0000269 } else {
270 assert(complete_gc != NULL, "Error");
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400271 pp2_work_concurrent_discovery(refs_list, is_alive,
J. Duke81537792007-12-01 00:00:00 +0000272 keep_alive, complete_gc);
273 }
274 }
275 // Work methods in support of process_phase2
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400276 void pp2_work(DiscoveredList& refs_list,
J. Duke81537792007-12-01 00:00:00 +0000277 BoolObjectClosure* is_alive,
278 OopClosure* keep_alive);
279 void pp2_work_concurrent_discovery(
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400280 DiscoveredList& refs_list,
J. Duke81537792007-12-01 00:00:00 +0000281 BoolObjectClosure* is_alive,
282 OopClosure* keep_alive,
283 VoidClosure* complete_gc);
284 // Phase3: process the referents by either clearing them
285 // or keeping them alive (and their closure)
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400286 void process_phase3(DiscoveredList& refs_list,
J. Duke81537792007-12-01 00:00:00 +0000287 bool clear_referent,
288 BoolObjectClosure* is_alive,
289 OopClosure* keep_alive,
290 VoidClosure* complete_gc);
291
292 // Enqueue references with a certain reachability level
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400293 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
J. Duke81537792007-12-01 00:00:00 +0000294
295 // "Preclean" all the discovered reference lists
296 // by removing references with strongly reachable referents.
297 // The first argument is a predicate on an oop that indicates
298 // its (strong) reachability and the second is a closure that
299 // may be used to incrementalize or abort the precleaning process.
300 // The caller is responsible for taking care of potential
301 // interference with concurrent operations on these lists
302 // (or predicates involved) by other threads. Currently
Jon Masamitsu5c58d272012-09-01 13:25:18 -0400303 // only used by the CMS collector.
J. Duke81537792007-12-01 00:00:00 +0000304 void preclean_discovered_references(BoolObjectClosure* is_alive,
305 OopClosure* keep_alive,
306 VoidClosure* complete_gc,
Staffan Larsen718f3252013-06-10 11:30:51 +0200307 YieldClosure* yield,
Bengt Rutisson003892f2015-09-30 09:07:21 +0200308 GCTimer* gc_timer);
J. Duke81537792007-12-01 00:00:00 +0000309
J. Duke81537792007-12-01 00:00:00 +0000310 // Returns the name of the discovered reference list
311 // occupying the i / _num_q slot.
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800312 const char* list_name(uint i);
J. Duke81537792007-12-01 00:00:00 +0000313
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400314 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
315
J. Duke81537792007-12-01 00:00:00 +0000316 protected:
317 // "Preclean" the given discovered reference list
318 // by removing references with strongly reachable referents.
319 // Currently used in support of CMS only.
320 void preclean_discovered_reflist(DiscoveredList& refs_list,
321 BoolObjectClosure* is_alive,
322 OopClosure* keep_alive,
323 VoidClosure* complete_gc,
324 YieldClosure* yield);
325
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700326 // round-robin mod _num_q (not: _not_ mode _max_num_q)
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800327 uint next_id() {
328 uint id = _next_id;
J. Duke81537792007-12-01 00:00:00 +0000329 if (++_next_id == _num_q) {
330 _next_id = 0;
331 }
332 return id;
333 }
334 DiscoveredList* get_discovered_list(ReferenceType rt);
335 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400336 HeapWord* discovered_addr);
J. Duke81537792007-12-01 00:00:00 +0000337
Stefan Karlsson5b6ba4e2011-09-01 16:18:17 +0200338 void clear_discovered_references(DiscoveredList& refs_list);
J. Duke81537792007-12-01 00:00:00 +0000339
340 // Calculate the number of jni handles.
Bengt Rutissonffeb0bd2015-12-10 14:57:55 +0100341 size_t count_jni_refs();
342
343 void log_reflist_counts(DiscoveredList ref_lists[], size_t total_count) PRODUCT_RETURN;
J. Duke81537792007-12-01 00:00:00 +0000344
345 // Balances reference queues.
346 void balance_queues(DiscoveredList ref_lists[]);
347
348 // Update (advance) the soft ref master clock field.
349 void update_soft_ref_master_clock();
350
351 public:
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700352 // Default parameters give you a vanilla reference processor.
353 ReferenceProcessor(MemRegion span,
Jon Masamitsu0ebc10b2011-12-14 13:34:57 -0800354 bool mt_processing = false, uint mt_processing_degree = 1,
355 bool mt_discovery = false, uint mt_discovery_degree = 1,
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700356 bool atomic_discovery = true,
Bengt Rutisson426151a2014-06-03 10:44:36 +0200357 BoolObjectClosure* is_alive_non_header = NULL);
J. Duke81537792007-12-01 00:00:00 +0000358
J. Duke81537792007-12-01 00:00:00 +0000359 // RefDiscoveryPolicy values
John Cuthbertsonb34027a2010-01-29 14:51:38 -0800360 enum DiscoveryPolicy {
J. Duke81537792007-12-01 00:00:00 +0000361 ReferenceBasedDiscovery = 0,
John Cuthbertsonb34027a2010-01-29 14:51:38 -0800362 ReferentBasedDiscovery = 1,
363 DiscoveryPolicyMin = ReferenceBasedDiscovery,
364 DiscoveryPolicyMax = ReferentBasedDiscovery
J. Duke81537792007-12-01 00:00:00 +0000365 };
366
367 static void init_statics();
368
369 public:
370 // get and set "is_alive_non_header" field
371 BoolObjectClosure* is_alive_non_header() {
372 return _is_alive_non_header;
373 }
374 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
375 _is_alive_non_header = is_alive_non_header;
376 }
377
378 // get and set span
379 MemRegion span() { return _span; }
380 void set_span(MemRegion span) { _span = span; }
381
382 // start and stop weak ref discovery
Kim Barrett093d2692014-12-17 22:32:44 -0500383 void enable_discovery(bool check_no_refs = true);
J. Duke81537792007-12-01 00:00:00 +0000384 void disable_discovery() { _discovering_refs = false; }
385 bool discovery_enabled() { return _discovering_refs; }
386
387 // whether discovery is atomic wrt other collectors
388 bool discovery_is_atomic() const { return _discovery_is_atomic; }
389 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
390
391 // whether discovery is done by multiple threads same-old-timeously
392 bool discovery_is_mt() const { return _discovery_is_mt; }
393 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
394
395 // Whether we are in a phase when _processing_ is MT.
396 bool processing_is_mt() const { return _processing_is_mt; }
397 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
398
Jesper Wilhelmsson81ba2e32014-01-23 14:47:23 +0100399 // whether all enqueueing of weak references is complete
J. Duke81537792007-12-01 00:00:00 +0000400 bool enqueuing_is_done() { return _enqueuing_is_done; }
401 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
402
403 // iterate over oops
404 void weak_oops_do(OopClosure* f); // weak roots
J. Duke81537792007-12-01 00:00:00 +0000405
Jon Masamitsu28e56b82010-09-20 14:38:38 -0700406 // Balance each of the discovered lists.
407 void balance_all_queues();
Jon Masamitsu5c58d272012-09-01 13:25:18 -0400408 void verify_list(DiscoveredList& ref_list);
Jon Masamitsu28e56b82010-09-20 14:38:38 -0700409
J. Duke81537792007-12-01 00:00:00 +0000410 // Discover a Reference object, using appropriate discovery criteria
411 bool discover_reference(oop obj, ReferenceType rt);
412
413 // Process references found during GC (called by the garbage collector)
Staffan Larsen718f3252013-06-10 11:30:51 +0200414 ReferenceProcessorStats
415 process_discovered_references(BoolObjectClosure* is_alive,
416 OopClosure* keep_alive,
417 VoidClosure* complete_gc,
418 AbstractRefProcTaskExecutor* task_executor,
Bengt Rutisson003892f2015-09-30 09:07:21 +0200419 GCTimer *gc_timer);
J. Duke81537792007-12-01 00:00:00 +0000420
J. Duke81537792007-12-01 00:00:00 +0000421 // Enqueue references at end of GC (called by the garbage collector)
422 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
423
Y. Srinivas Ramakrishna18f33862008-06-05 15:57:56 -0700424 // If a discovery is in process that is being superceded, abandon it: all
425 // the discovered lists will be empty, and all the objects on them will
426 // have NULL discovered fields. Must be called only at a safepoint.
427 void abandon_partial_discovery();
428
J. Duke81537792007-12-01 00:00:00 +0000429 // debugging
430 void verify_no_references_recorded() PRODUCT_RETURN;
Y. Srinivas Ramakrishna8baafdf2010-12-09 09:22:57 -0800431 void verify_referent(oop obj) PRODUCT_RETURN;
J. Duke81537792007-12-01 00:00:00 +0000432};
433
434// A utility class to disable reference discovery in
435// the scope which contains it, for given ReferenceProcessor.
436class NoRefDiscovery: StackObj {
437 private:
438 ReferenceProcessor* _rp;
439 bool _was_discovering_refs;
440 public:
441 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
Pavel Tisnovskyaf254872010-09-09 05:24:11 -0700442 _was_discovering_refs = _rp->discovery_enabled();
443 if (_was_discovering_refs) {
J. Duke81537792007-12-01 00:00:00 +0000444 _rp->disable_discovery();
445 }
446 }
447
448 ~NoRefDiscovery() {
449 if (_was_discovering_refs) {
Kim Barrett093d2692014-12-17 22:32:44 -0500450 _rp->enable_discovery(false /*check_no_refs*/);
J. Duke81537792007-12-01 00:00:00 +0000451 }
452 }
453};
454
455
456// A utility class to temporarily mutate the span of the
457// given ReferenceProcessor in the scope that contains it.
458class ReferenceProcessorSpanMutator: StackObj {
459 private:
460 ReferenceProcessor* _rp;
461 MemRegion _saved_span;
462
463 public:
464 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
465 MemRegion span):
466 _rp(rp) {
467 _saved_span = _rp->span();
468 _rp->set_span(span);
469 }
470
471 ~ReferenceProcessorSpanMutator() {
472 _rp->set_span(_saved_span);
473 }
474};
475
476// A utility class to temporarily change the MT'ness of
477// reference discovery for the given ReferenceProcessor
478// in the scope that contains it.
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700479class ReferenceProcessorMTDiscoveryMutator: StackObj {
J. Duke81537792007-12-01 00:00:00 +0000480 private:
481 ReferenceProcessor* _rp;
482 bool _saved_mt;
483
484 public:
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700485 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
486 bool mt):
J. Duke81537792007-12-01 00:00:00 +0000487 _rp(rp) {
488 _saved_mt = _rp->discovery_is_mt();
489 _rp->set_mt_discovery(mt);
490 }
491
Y. Srinivas Ramakrishnab4b287e2011-03-17 10:32:46 -0700492 ~ReferenceProcessorMTDiscoveryMutator() {
J. Duke81537792007-12-01 00:00:00 +0000493 _rp->set_mt_discovery(_saved_mt);
494 }
495};
496
497
498// A utility class to temporarily change the disposition
499// of the "is_alive_non_header" closure field of the
500// given ReferenceProcessor in the scope that contains it.
501class ReferenceProcessorIsAliveMutator: StackObj {
502 private:
503 ReferenceProcessor* _rp;
504 BoolObjectClosure* _saved_cl;
505
506 public:
507 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
508 BoolObjectClosure* cl):
509 _rp(rp) {
510 _saved_cl = _rp->is_alive_non_header();
511 _rp->set_is_alive_non_header(cl);
512 }
513
514 ~ReferenceProcessorIsAliveMutator() {
515 _rp->set_is_alive_non_header(_saved_cl);
516 }
517};
518
519// A utility class to temporarily change the disposition
520// of the "discovery_is_atomic" field of the
521// given ReferenceProcessor in the scope that contains it.
522class ReferenceProcessorAtomicMutator: StackObj {
523 private:
524 ReferenceProcessor* _rp;
525 bool _saved_atomic_discovery;
526
527 public:
528 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
529 bool atomic):
530 _rp(rp) {
531 _saved_atomic_discovery = _rp->discovery_is_atomic();
532 _rp->set_atomic_discovery(atomic);
533 }
534
535 ~ReferenceProcessorAtomicMutator() {
536 _rp->set_atomic_discovery(_saved_atomic_discovery);
537 }
538};
539
540
541// A utility class to temporarily change the MT processing
542// disposition of the given ReferenceProcessor instance
543// in the scope that contains it.
544class ReferenceProcessorMTProcMutator: StackObj {
545 private:
546 ReferenceProcessor* _rp;
547 bool _saved_mt;
548
549 public:
550 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
551 bool mt):
552 _rp(rp) {
553 _saved_mt = _rp->processing_is_mt();
554 _rp->set_mt_processing(mt);
555 }
556
557 ~ReferenceProcessorMTProcMutator() {
558 _rp->set_mt_processing(_saved_mt);
559 }
560};
561
562
563// This class is an interface used to implement task execution for the
564// reference processing.
565class AbstractRefProcTaskExecutor {
566public:
567
568 // Abstract tasks to execute.
569 class ProcessTask;
570 class EnqueueTask;
571
572 // Executes a task using worker threads.
573 virtual void execute(ProcessTask& task) = 0;
574 virtual void execute(EnqueueTask& task) = 0;
575
576 // Switch to single threaded mode.
577 virtual void set_single_threaded_mode() { };
578};
579
580// Abstract reference processing task to execute.
581class AbstractRefProcTaskExecutor::ProcessTask {
582protected:
583 ProcessTask(ReferenceProcessor& ref_processor,
584 DiscoveredList refs_lists[],
585 bool marks_oops_alive)
586 : _ref_processor(ref_processor),
587 _refs_lists(refs_lists),
588 _marks_oops_alive(marks_oops_alive)
589 { }
590
591public:
592 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
593 OopClosure& keep_alive,
594 VoidClosure& complete_gc) = 0;
595
596 // Returns true if a task marks some oops as alive.
597 bool marks_oops_alive() const
598 { return _marks_oops_alive; }
599
600protected:
601 ReferenceProcessor& _ref_processor;
602 DiscoveredList* _refs_lists;
603 const bool _marks_oops_alive;
604};
605
606// Abstract reference processing task to execute.
607class AbstractRefProcTaskExecutor::EnqueueTask {
608protected:
609 EnqueueTask(ReferenceProcessor& ref_processor,
610 DiscoveredList refs_lists[],
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400611 HeapWord* pending_list_addr,
J. Duke81537792007-12-01 00:00:00 +0000612 int n_queues)
613 : _ref_processor(ref_processor),
614 _refs_lists(refs_lists),
615 _pending_list_addr(pending_list_addr),
J. Duke81537792007-12-01 00:00:00 +0000616 _n_queues(n_queues)
617 { }
618
619public:
620 virtual void work(unsigned int work_id) = 0;
621
622protected:
623 ReferenceProcessor& _ref_processor;
624 DiscoveredList* _refs_lists;
Coleen Phillimore4a831d42008-04-13 17:43:42 -0400625 HeapWord* _pending_list_addr;
J. Duke81537792007-12-01 00:00:00 +0000626 int _n_queues;
627};
Stefan Karlsson8006fe82010-11-23 13:22:55 -0800628
Per Lidén4dc240f2015-05-13 15:16:06 +0200629#endif // SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP