blob: 9053dedba0fc86cb938297fc44e2d68001e6294f [file] [log] [blame]
J. Duke81537792007-12-01 00:00:00 +00001/*
Joseph Provino3bd08572015-01-27 13:50:31 -05002 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
J. Duke81537792007-12-01 00:00:00 +00003 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
Erik Trimbleba7c1732010-05-27 19:08:38 -070019 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
J. Duke81537792007-12-01 00:00:00 +000022 *
23 */
24
Stefan Karlsson8006fe82010-11-23 13:22:55 -080025#include "precompiled.hpp"
Per Lidén4dc240f2015-05-13 15:16:06 +020026#include "gc/shared/cardTableModRefBS.inline.hpp"
Per Lidén4dc240f2015-05-13 15:16:06 +020027#include "gc/shared/collectedHeap.hpp"
28#include "gc/shared/genCollectedHeap.hpp"
Per Lidén4dc240f2015-05-13 15:16:06 +020029#include "gc/shared/space.inline.hpp"
Coleen Phillimore8a788d52015-04-28 16:46:39 -040030#include "memory/virtualspace.hpp"
Bengt Rutissonffeb0bd2015-12-10 14:57:55 +010031#include "logging/log.hpp"
Zhengyu Gua39b1762012-06-28 17:03:16 -040032#include "services/memTracker.hpp"
Joseph Provino698fba92013-01-23 13:02:39 -050033#include "utilities/macros.hpp"
Stefan Karlsson8006fe82010-11-23 13:22:55 -080034
J. Duke81537792007-12-01 00:00:00 +000035// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
36// enumerate ref fields that have been modified (since the last
37// enumeration.)
38
J. Duke81537792007-12-01 00:00:00 +000039size_t CardTableModRefBS::compute_byte_map_size()
40{
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
Jesper Wilhelmsson40ba2bb2013-12-10 15:11:02 +010042 "uninitialized, check declaration order");
43 assert(_page_size != 0, "uninitialized, check declaration order");
J. Duke81537792007-12-01 00:00:00 +000044 const size_t granularity = os::vm_allocation_granularity();
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
46}
47
Kim Barrett40d39862015-02-27 19:52:48 -050048CardTableModRefBS::CardTableModRefBS(
49 MemRegion whole_heap,
50 const BarrierSet::FakeRtti& fake_rtti) :
51 ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
J. Duke81537792007-12-01 00:00:00 +000052 _whole_heap(whole_heap),
Thomas Schatzl100e51a2014-08-19 14:09:10 +020053 _guard_index(0),
54 _guard_region(),
55 _last_valid_index(0),
John Coomes1bfa1532007-12-06 13:59:28 -080056 _page_size(os::vm_page_size()),
Thomas Schatzl100e51a2014-08-19 14:09:10 +020057 _byte_map_size(0),
58 _covered(NULL),
59 _committed(NULL),
60 _cur_covered_regions(0),
61 _byte_map(NULL),
Kim Barrett717679c2015-07-22 00:37:01 -040062 byte_map_base(NULL)
J. Duke81537792007-12-01 00:00:00 +000063{
Thomas Schatzl100e51a2014-08-19 14:09:10 +020064 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
65 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
J. Duke81537792007-12-01 00:00:00 +000066
67 assert(card_size <= 512, "card_size must be less than 512"); // why?
68
Thomas Schatzl100e51a2014-08-19 14:09:10 +020069 _covered = new MemRegion[_max_covered_regions];
70 if (_covered == NULL) {
71 vm_exit_during_initialization("Could not allocate card table covered region set.");
J. Duke81537792007-12-01 00:00:00 +000072 }
Thomas Schatzl100e51a2014-08-19 14:09:10 +020073}
74
75void CardTableModRefBS::initialize() {
76 _guard_index = cards_required(_whole_heap.word_size()) - 1;
77 _last_valid_index = _guard_index - 1;
78
79 _byte_map_size = compute_byte_map_size();
80
81 HeapWord* low_bound = _whole_heap.start();
82 HeapWord* high_bound = _whole_heap.end();
J. Duke81537792007-12-01 00:00:00 +000083
Yumin Qi98151c32013-05-14 09:41:12 -070084 _cur_covered_regions = 0;
Thomas Schatzl100e51a2014-08-19 14:09:10 +020085 _committed = new MemRegion[_max_covered_regions];
86 if (_committed == NULL) {
87 vm_exit_during_initialization("Could not allocate card table committed region set.");
88 }
89
J. Duke81537792007-12-01 00:00:00 +000090 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
91 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
92 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
Zhengyu Gua39b1762012-06-28 17:03:16 -040093
94 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
95
Stefan Karlssoncf254af2016-04-12 07:17:44 +020096 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
J. Duke81537792007-12-01 00:00:00 +000097 _page_size, heap_rs.base(), heap_rs.size());
98 if (!heap_rs.is_reserved()) {
99 vm_exit_during_initialization("Could not reserve enough space for the "
100 "card marking array");
101 }
102
Jesper Wilhelmsson81ba2e32014-01-23 14:47:23 +0100103 // The assembler store_check code will do an unsigned shift of the oop,
J. Duke81537792007-12-01 00:00:00 +0000104 // then add it to byte_map_base, i.e.
105 //
106 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
107 _byte_map = (jbyte*) heap_rs.base();
108 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
109 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
110 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
111
112 jbyte* guard_card = &_byte_map[_guard_index];
113 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
114 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
Daniel D. Daugherty72a51a02013-06-13 11:16:38 -0700115 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
116 !ExecMem, "card table last card");
J. Duke81537792007-12-01 00:00:00 +0000117 *guard_card = last_card;
118
Bengt Rutissonffeb0bd2015-12-10 14:57:55 +0100119 log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
120 log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
121 p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
122 log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
J. Duke81537792007-12-01 00:00:00 +0000123}
124
Yumin Qi98151c32013-05-14 09:41:12 -0700125CardTableModRefBS::~CardTableModRefBS() {
126 if (_covered) {
127 delete[] _covered;
128 _covered = NULL;
129 }
130 if (_committed) {
131 delete[] _committed;
132 _committed = NULL;
133 }
Yumin Qi98151c32013-05-14 09:41:12 -0700134}
135
J. Duke81537792007-12-01 00:00:00 +0000136int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
137 int i;
138 for (i = 0; i < _cur_covered_regions; i++) {
139 if (_covered[i].start() == base) return i;
140 if (_covered[i].start() > base) break;
141 }
142 // If we didn't find it, create a new one.
143 assert(_cur_covered_regions < _max_covered_regions,
144 "too many covered regions");
145 // Move the ones above up, to maintain sorted order.
146 for (int j = _cur_covered_regions; j > i; j--) {
147 _covered[j] = _covered[j-1];
148 _committed[j] = _committed[j-1];
149 }
150 int res = i;
151 _cur_covered_regions++;
152 _covered[res].set_start(base);
153 _covered[res].set_word_size(0);
154 jbyte* ct_start = byte_for(base);
155 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
156 _committed[res].set_start((HeapWord*)ct_start_aligned);
157 _committed[res].set_word_size(0);
158 return res;
159}
160
161int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
162 for (int i = 0; i < _cur_covered_regions; i++) {
163 if (_covered[i].contains(addr)) {
164 return i;
165 }
166 }
167 assert(0, "address outside of heap?");
168 return -1;
169}
170
171HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
172 HeapWord* max_end = NULL;
173 for (int j = 0; j < ind; j++) {
174 HeapWord* this_end = _committed[j].end();
175 if (this_end > max_end) max_end = this_end;
176 }
177 return max_end;
178}
179
180MemRegion CardTableModRefBS::committed_unique_to_self(int self,
181 MemRegion mr) const {
182 MemRegion result = mr;
183 for (int r = 0; r < _cur_covered_regions; r += 1) {
184 if (r != self) {
185 result = result.minus(_committed[r]);
186 }
187 }
188 // Never include the guard page.
189 result = result.minus(_guard_region);
190 return result;
191}
192
193void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
194 // We don't change the start of a region, only the end.
195 assert(_whole_heap.contains(new_region),
196 "attempt to cover area not in reserved area");
197 debug_only(verify_guard();)
Jon Masamitsu0568ea92008-06-10 07:26:42 -0700198 // collided is true if the expansion would push into another committed region
199 debug_only(bool collided = false;)
Jon Masamitsu38fdc8a2008-02-15 07:01:10 -0800200 int const ind = find_covering_region_by_base(new_region.start());
201 MemRegion const old_region = _covered[ind];
J. Duke81537792007-12-01 00:00:00 +0000202 assert(old_region.start() == new_region.start(), "just checking");
203 if (new_region.word_size() != old_region.word_size()) {
204 // Commit new or uncommit old pages, if necessary.
205 MemRegion cur_committed = _committed[ind];
Jesper Wilhelmsson81ba2e32014-01-23 14:47:23 +0100206 // Extend the end of this _committed region
J. Duke81537792007-12-01 00:00:00 +0000207 // to cover the end of any lower _committed regions.
208 // This forms overlapping regions, but never interior regions.
Jon Masamitsu38fdc8a2008-02-15 07:01:10 -0800209 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
J. Duke81537792007-12-01 00:00:00 +0000210 if (max_prev_end > cur_committed.end()) {
211 cur_committed.set_end(max_prev_end);
212 }
213 // Align the end up to a page size (starts are already aligned).
Jon Masamitsu38fdc8a2008-02-15 07:01:10 -0800214 jbyte* const new_end = byte_after(new_region.last());
Jon Masamitsu0568ea92008-06-10 07:26:42 -0700215 HeapWord* new_end_aligned =
Jon Masamitsu38fdc8a2008-02-15 07:01:10 -0800216 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
J. Duke81537792007-12-01 00:00:00 +0000217 assert(new_end_aligned >= (HeapWord*) new_end,
218 "align up, but less");
Jon Masamitsuf1847262009-02-17 15:35:58 -0800219 // Check the other regions (excludes "ind") to ensure that
220 // the new_end_aligned does not intrude onto the committed
221 // space of another region.
Jon Masamitsu0568ea92008-06-10 07:26:42 -0700222 int ri = 0;
Erik Helincbe8efa2014-12-02 09:53:30 +0100223 for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
224 if (new_end_aligned > _committed[ri].start()) {
225 assert(new_end_aligned <= _committed[ri].end(),
226 "An earlier committed region can't cover a later committed region");
227 // Any region containing the new end
228 // should start at or beyond the region found (ind)
229 // for the new end (committed regions are not expected to
230 // be proper subsets of other committed regions).
231 assert(_committed[ri].start() >= _committed[ind].start(),
232 "New end of committed region is inconsistent");
233 new_end_aligned = _committed[ri].start();
234 // new_end_aligned can be equal to the start of its
235 // committed region (i.e., of "ind") if a second
236 // region following "ind" also start at the same location
237 // as "ind".
238 assert(new_end_aligned >= _committed[ind].start(),
239 "New end of committed region is before start");
240 debug_only(collided = true;)
241 // Should only collide with 1 region
242 break;
Jon Masamitsu0568ea92008-06-10 07:26:42 -0700243 }
244 }
245#ifdef ASSERT
246 for (++ri; ri < _cur_covered_regions; ri++) {
247 assert(!_committed[ri].contains(new_end_aligned),
248 "New end of committed region is in a second committed region");
249 }
250#endif
J. Duke81537792007-12-01 00:00:00 +0000251 // The guard page is always committed and should not be committed over.
Jon Masamitsubd1c93a2009-08-02 19:10:31 -0700252 // "guarded" is used for assertion checking below and recalls the fact
253 // that the would-be end of the new committed region would have
254 // penetrated the guard page.
255 HeapWord* new_end_for_commit = new_end_aligned;
256
257 DEBUG_ONLY(bool guarded = false;)
258 if (new_end_for_commit > _guard_region.start()) {
259 new_end_for_commit = _guard_region.start();
260 DEBUG_ONLY(guarded = true;)
261 }
Jon Masamitsu0568ea92008-06-10 07:26:42 -0700262
J. Duke81537792007-12-01 00:00:00 +0000263 if (new_end_for_commit > cur_committed.end()) {
264 // Must commit new pages.
Jon Masamitsu38fdc8a2008-02-15 07:01:10 -0800265 MemRegion const new_committed =
J. Duke81537792007-12-01 00:00:00 +0000266 MemRegion(cur_committed.end(), new_end_for_commit);
267
268 assert(!new_committed.is_empty(), "Region should not be empty here");
Daniel D. Daugherty72a51a02013-06-13 11:16:38 -0700269 os::commit_memory_or_exit((char*)new_committed.start(),
270 new_committed.byte_size(), _page_size,
271 !ExecMem, "card table expansion");
J. Duke81537792007-12-01 00:00:00 +0000272 // Use new_end_aligned (as opposed to new_end_for_commit) because
273 // the cur_committed region may include the guard region.
274 } else if (new_end_aligned < cur_committed.end()) {
275 // Must uncommit pages.
Jon Masamitsu38fdc8a2008-02-15 07:01:10 -0800276 MemRegion const uncommit_region =
J. Duke81537792007-12-01 00:00:00 +0000277 committed_unique_to_self(ind, MemRegion(new_end_aligned,
278 cur_committed.end()));
279 if (!uncommit_region.is_empty()) {
Jon Masamitsuc4437762010-06-23 08:35:31 -0700280 // It is not safe to uncommit cards if the boundary between
281 // the generations is moving. A shrink can uncommit cards
282 // owned by generation A but being used by generation B.
283 if (!UseAdaptiveGCBoundary) {
284 if (!os::uncommit_memory((char*)uncommit_region.start(),
285 uncommit_region.byte_size())) {
286 assert(false, "Card table contraction failed");
287 // The call failed so don't change the end of the
288 // committed region. This is better than taking the
289 // VM down.
290 new_end_aligned = _committed[ind].end();
291 }
292 } else {
Jon Masamitsu0568ea92008-06-10 07:26:42 -0700293 new_end_aligned = _committed[ind].end();
J. Duke81537792007-12-01 00:00:00 +0000294 }
295 }
296 }
297 // In any case, we can reset the end of the current committed entry.
298 _committed[ind].set_end(new_end_aligned);
299
Jon Masamitsuc4437762010-06-23 08:35:31 -0700300#ifdef ASSERT
301 // Check that the last card in the new region is committed according
302 // to the tables.
303 bool covered = false;
304 for (int cr = 0; cr < _cur_covered_regions; cr++) {
305 if (_committed[cr].contains(new_end - 1)) {
306 covered = true;
307 break;
308 }
309 }
310 assert(covered, "Card for end of new region not committed");
311#endif
312
J. Duke81537792007-12-01 00:00:00 +0000313 // The default of 0 is not necessarily clean cards.
314 jbyte* entry;
315 if (old_region.last() < _whole_heap.start()) {
316 entry = byte_for(_whole_heap.start());
317 } else {
318 entry = byte_after(old_region.last());
319 }
Swamy Venkataramanappa06e37c02008-12-15 13:58:57 -0800320 assert(index_for(new_region.last()) < _guard_index,
J. Duke81537792007-12-01 00:00:00 +0000321 "The guard card will be overwritten");
Jon Masamitsu0568ea92008-06-10 07:26:42 -0700322 // This line commented out cleans the newly expanded region and
323 // not the aligned up expanded region.
324 // jbyte* const end = byte_after(new_region.last());
325 jbyte* const end = (jbyte*) new_end_for_commit;
Jon Masamitsubd1c93a2009-08-02 19:10:31 -0700326 assert((end >= byte_after(new_region.last())) || collided || guarded,
Jon Masamitsu0568ea92008-06-10 07:26:42 -0700327 "Expect to be beyond new region unless impacting another region");
J. Duke81537792007-12-01 00:00:00 +0000328 // do nothing if we resized downward.
Jon Masamitsu0568ea92008-06-10 07:26:42 -0700329#ifdef ASSERT
330 for (int ri = 0; ri < _cur_covered_regions; ri++) {
331 if (ri != ind) {
332 // The end of the new committed region should not
333 // be in any existing region unless it matches
334 // the start of the next region.
335 assert(!_committed[ri].contains(end) ||
336 (_committed[ri].start() == (HeapWord*) end),
337 "Overlapping committed regions");
338 }
339 }
340#endif
J. Duke81537792007-12-01 00:00:00 +0000341 if (entry < end) {
342 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
343 }
344 }
345 // In any case, the covered size changes.
346 _covered[ind].set_word_size(new_region.word_size());
Bengt Rutissonffeb0bd2015-12-10 14:57:55 +0100347
348 log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
349 log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
350 ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
351 log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
352 ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
353 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
354 p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
355 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
356 p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
357
Jon Masamitsuc4437762010-06-23 08:35:31 -0700358 // Touch the last card of the covered region to show that it
359 // is committed (or SEGV).
Calvin Cheung53448fd2013-06-13 22:02:40 -0700360 debug_only((void) (*byte_for(_covered[ind].last()));)
J. Duke81537792007-12-01 00:00:00 +0000361 debug_only(verify_guard();)
362}
363
364// Note that these versions are precise! The scanning code has to handle the
365// fact that the write barrier may be either precise or imprecise.
366
Goetz Lindenmaier46c4ef62013-12-02 10:26:14 +0100367void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
368 inline_write_ref_field(field, newVal, release);
J. Duke81537792007-12-01 00:00:00 +0000369}
370
Igor Veresov416d70b2009-03-06 13:50:14 -0800371
J. Duke81537792007-12-01 00:00:00 +0000372void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
Y. Srinivas Ramakrishnad875a8e2009-12-03 15:01:57 -0800373 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
374 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
J. Duke81537792007-12-01 00:00:00 +0000375 jbyte* cur = byte_for(mr.start());
376 jbyte* last = byte_after(mr.last());
377 while (cur < last) {
378 *cur = dirty_card;
379 cur++;
380 }
381}
382
Y. Srinivas Ramakrishna18f33862008-06-05 15:57:56 -0700383void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
Y. Srinivas Ramakrishnad875a8e2009-12-03 15:01:57 -0800384 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
385 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
J. Duke81537792007-12-01 00:00:00 +0000386 for (int i = 0; i < _cur_covered_regions; i++) {
387 MemRegion mri = mr.intersection(_covered[i]);
388 if (!mri.is_empty()) dirty_MemRegion(mri);
389 }
390}
391
392void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
393 // Be conservative: only clean cards entirely contained within the
394 // region.
395 jbyte* cur;
396 if (mr.start() == _whole_heap.start()) {
397 cur = byte_for(mr.start());
398 } else {
399 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
400 cur = byte_after(mr.start() - 1);
401 }
402 jbyte* last = byte_after(mr.last());
403 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
404}
405
406void CardTableModRefBS::clear(MemRegion mr) {
407 for (int i = 0; i < _cur_covered_regions; i++) {
408 MemRegion mri = mr.intersection(_covered[i]);
409 if (!mri.is_empty()) clear_MemRegion(mri);
410 }
411}
412
Y. Srinivas Ramakrishna18f33862008-06-05 15:57:56 -0700413void CardTableModRefBS::dirty(MemRegion mr) {
414 jbyte* first = byte_for(mr.start());
415 jbyte* last = byte_after(mr.last());
416 memset(first, dirty_card, last-first);
417}
418
Y. Srinivas Ramakrishna91657bd2011-04-14 12:10:15 -0700419// Unlike several other card table methods, dirty_card_iterate()
420// iterates over dirty cards ranges in increasing address order.
J. Duke81537792007-12-01 00:00:00 +0000421void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
422 MemRegionClosure* cl) {
423 for (int i = 0; i < _cur_covered_regions; i++) {
424 MemRegion mri = mr.intersection(_covered[i]);
425 if (!mri.is_empty()) {
426 jbyte *cur_entry, *next_entry, *limit;
427 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
428 cur_entry <= limit;
429 cur_entry = next_entry) {
430 next_entry = cur_entry + 1;
431 if (*cur_entry == dirty_card) {
432 size_t dirty_cards;
433 // Accumulate maximal dirty card range, starting at cur_entry
434 for (dirty_cards = 1;
435 next_entry <= limit && *next_entry == dirty_card;
436 dirty_cards++, next_entry++);
437 MemRegion cur_cards(addr_for(cur_entry),
438 dirty_cards*card_size_in_words);
439 cl->do_MemRegion(cur_cards);
440 }
441 }
442 }
443 }
444}
445
Y. Srinivas Ramakrishna18f33862008-06-05 15:57:56 -0700446MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
447 bool reset,
448 int reset_val) {
J. Duke81537792007-12-01 00:00:00 +0000449 for (int i = 0; i < _cur_covered_regions; i++) {
450 MemRegion mri = mr.intersection(_covered[i]);
451 if (!mri.is_empty()) {
452 jbyte* cur_entry, *next_entry, *limit;
453 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
454 cur_entry <= limit;
455 cur_entry = next_entry) {
456 next_entry = cur_entry + 1;
457 if (*cur_entry == dirty_card) {
458 size_t dirty_cards;
459 // Accumulate maximal dirty card range, starting at cur_entry
460 for (dirty_cards = 1;
461 next_entry <= limit && *next_entry == dirty_card;
462 dirty_cards++, next_entry++);
463 MemRegion cur_cards(addr_for(cur_entry),
464 dirty_cards*card_size_in_words);
Y. Srinivas Ramakrishna18f33862008-06-05 15:57:56 -0700465 if (reset) {
466 for (size_t i = 0; i < dirty_cards; i++) {
467 cur_entry[i] = reset_val;
468 }
J. Duke81537792007-12-01 00:00:00 +0000469 }
470 return cur_cards;
471 }
472 }
473 }
474 }
475 return MemRegion(mr.end(), mr.end());
476}
477
J. Duke81537792007-12-01 00:00:00 +0000478uintx CardTableModRefBS::ct_max_alignment_constraint() {
479 return card_size * os::vm_page_size();
480}
481
482void CardTableModRefBS::verify_guard() {
483 // For product build verification
484 guarantee(_byte_map[_guard_index] == last_card,
485 "card table guard has been modified");
486}
487
488void CardTableModRefBS::verify() {
489 verify_guard();
490}
491
492#ifndef PRODUCT
Antonios Printezis10f6cc72011-04-29 14:59:04 -0400493void CardTableModRefBS::verify_region(MemRegion mr,
494 jbyte val, bool val_equals) {
495 jbyte* start = byte_for(mr.start());
496 jbyte* end = byte_for(mr.last());
Thomas Schatzl100e51a2014-08-19 14:09:10 +0200497 bool failures = false;
Antonios Printezis10f6cc72011-04-29 14:59:04 -0400498 for (jbyte* curr = start; curr <= end; ++curr) {
499 jbyte curr_val = *curr;
500 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
501 if (failed) {
502 if (!failures) {
Bengt Rutisson0c5e3bc2016-04-01 07:08:55 +0200503 log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
504 log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val);
Antonios Printezis10f6cc72011-04-29 14:59:04 -0400505 failures = true;
506 }
Bengt Rutisson0c5e3bc2016-04-01 07:08:55 +0200507 log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
508 p2i(curr), p2i(addr_for(curr)),
509 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
510 (int) curr_val);
Antonios Printezis10f6cc72011-04-29 14:59:04 -0400511 }
J. Duke81537792007-12-01 00:00:00 +0000512 }
Antonios Printezis10f6cc72011-04-29 14:59:04 -0400513 guarantee(!failures, "there should not have been any failures");
J. Duke81537792007-12-01 00:00:00 +0000514}
Andrey Petrusenko24917512009-08-31 05:27:29 -0700515
Antonios Printezis10f6cc72011-04-29 14:59:04 -0400516void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
517 verify_region(mr, dirty_card, false /* val_equals */);
518}
Andrey Petrusenko24917512009-08-31 05:27:29 -0700519
520void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
Antonios Printezis10f6cc72011-04-29 14:59:04 -0400521 verify_region(mr, dirty_card, true /* val_equals */);
Andrey Petrusenko24917512009-08-31 05:27:29 -0700522}
J. Duke81537792007-12-01 00:00:00 +0000523#endif
524
Tom Rodriguez8fc6b2e2012-04-02 16:05:56 -0700525void CardTableModRefBS::print_on(outputStream* st) const {
526 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
David Chase305ec3b2014-05-09 16:50:54 -0400527 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
Tom Rodriguez8fc6b2e2012-04-02 16:05:56 -0700528}
529