blob: 0386280de65b0948c22c54638cbba4c0e3646457 [file] [log] [blame]
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000028#include "store-buffer.h"
danno@chromium.orgca29dd82013-04-26 11:59:48 +000029
30#include <algorithm>
31
32#include "v8.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000033#include "store-buffer-inl.h"
34#include "v8-counters.h"
35
36namespace v8 {
37namespace internal {
38
39StoreBuffer::StoreBuffer(Heap* heap)
40 : heap_(heap),
41 start_(NULL),
42 limit_(NULL),
43 old_start_(NULL),
44 old_limit_(NULL),
45 old_top_(NULL),
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +000046 old_reserved_limit_(NULL),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000047 old_buffer_is_sorted_(false),
48 old_buffer_is_filtered_(false),
49 during_gc_(false),
50 store_buffer_rebuilding_enabled_(false),
51 callback_(NULL),
52 may_move_store_buffer_entries_(true),
53 virtual_memory_(NULL),
vegorov@chromium.orgb2957762012-01-05 11:49:16 +000054 hash_set_1_(NULL),
55 hash_set_2_(NULL),
56 hash_sets_are_empty_(true) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000057}
58
59
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +000060void StoreBuffer::SetUp() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000061 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
62 uintptr_t start_as_int =
63 reinterpret_cast<uintptr_t>(virtual_memory_->address());
64 start_ =
65 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +000066 limit_ = start_ + (kStoreBufferSize / kPointerSize);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000067
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +000068 old_virtual_memory_ =
69 new VirtualMemory(kOldStoreBufferLength * kPointerSize);
70 old_top_ = old_start_ =
71 reinterpret_cast<Address*>(old_virtual_memory_->address());
72 // Don't know the alignment requirements of the OS, but it is certainly not
73 // less than 0xfff.
74 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
75 int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
76 ASSERT(initial_length > 0);
77 ASSERT(initial_length <= kOldStoreBufferLength);
78 old_limit_ = old_start_ + initial_length;
79 old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
80
81 CHECK(old_virtual_memory_->Commit(
82 reinterpret_cast<void*>(old_start_),
83 (old_limit_ - old_start_) * kPointerSize,
84 false));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000085
86 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
87 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
88 Address* vm_limit = reinterpret_cast<Address*>(
89 reinterpret_cast<char*>(virtual_memory_->address()) +
90 virtual_memory_->size());
91 ASSERT(start_ <= vm_limit);
92 ASSERT(limit_ <= vm_limit);
93 USE(vm_limit);
94 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
95 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
96 0);
97
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +000098 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
99 kStoreBufferSize,
100 false)); // Not executable.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000101 heap_->public_set_store_buffer_top(start_);
102
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000103 hash_set_1_ = new uintptr_t[kHashSetLength];
104 hash_set_2_ = new uintptr_t[kHashSetLength];
105 hash_sets_are_empty_ = false;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000106
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000107 ClearFilteringHashSets();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000108}
109
110
111void StoreBuffer::TearDown() {
112 delete virtual_memory_;
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000113 delete old_virtual_memory_;
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000114 delete[] hash_set_1_;
115 delete[] hash_set_2_;
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000116 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000117 start_ = limit_ = NULL;
118 heap_->public_set_store_buffer_top(start_);
119}
120
121
122void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
123 isolate->heap()->store_buffer()->Compact();
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +0000124 isolate->counters()->store_buffer_overflows()->Increment();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000125}
126
127
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000128void StoreBuffer::Uniq() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000129 // Remove adjacent duplicates and cells that do not point at new space.
130 Address previous = NULL;
131 Address* write = old_start_;
132 ASSERT(may_move_store_buffer_entries_);
133 for (Address* read = old_start_; read < old_top_; read++) {
134 Address current = *read;
135 if (current != previous) {
136 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
137 *write++ = current;
138 }
139 }
140 previous = current;
141 }
142 old_top_ = write;
143}
144
145
danno@chromium.org41728482013-06-12 22:31:22 +0000146bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
147 return old_limit_ - old_top_ >= space_needed;
148}
149
150
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000151void StoreBuffer::EnsureSpace(intptr_t space_needed) {
152 while (old_limit_ - old_top_ < space_needed &&
153 old_limit_ < old_reserved_limit_) {
154 size_t grow = old_limit_ - old_start_; // Double size.
155 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
156 grow * kPointerSize,
157 false));
158 old_limit_ += grow;
159 }
160
danno@chromium.org41728482013-06-12 22:31:22 +0000161 if (SpaceAvailable(space_needed)) return;
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000162
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000163 if (old_buffer_is_filtered_) return;
164 ASSERT(may_move_store_buffer_entries_);
165 Compact();
166
167 old_buffer_is_filtered_ = true;
168 bool page_has_scan_on_scavenge_flag = false;
169
170 PointerChunkIterator it(heap_);
171 MemoryChunk* chunk;
172 while ((chunk = it.next()) != NULL) {
173 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
174 }
175
176 if (page_has_scan_on_scavenge_flag) {
177 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
178 }
179
danno@chromium.org41728482013-06-12 22:31:22 +0000180 if (SpaceAvailable(space_needed)) return;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000181
182 // Sample 1 entry in 97 and filter out the pages where we estimate that more
183 // than 1 in 8 pointers are to new space.
184 static const int kSampleFinenesses = 5;
185 static const struct Samples {
186 int prime_sample_step;
187 int threshold;
188 } samples[kSampleFinenesses] = {
189 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
190 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
191 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
192 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
193 { 1, 0}
194 };
verwaest@chromium.org8a00e822013-06-10 15:11:22 +0000195 for (int i = 0; i < kSampleFinenesses; i++) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000196 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
197 // As a last resort we mark all pages as being exempt from the store buffer.
verwaest@chromium.org8a00e822013-06-10 15:11:22 +0000198 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
danno@chromium.org41728482013-06-12 22:31:22 +0000199 if (SpaceAvailable(space_needed)) return;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000200 }
201 UNREACHABLE();
202}
203
204
205// Sample the store buffer to see if some pages are taking up a lot of space
206// in the store buffer.
207void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
208 PointerChunkIterator it(heap_);
209 MemoryChunk* chunk;
210 while ((chunk = it.next()) != NULL) {
211 chunk->set_store_buffer_counter(0);
212 }
213 bool created_new_scan_on_scavenge_pages = false;
214 MemoryChunk* previous_chunk = NULL;
215 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
216 Address addr = *p;
217 MemoryChunk* containing_chunk = NULL;
218 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
219 containing_chunk = previous_chunk;
220 } else {
221 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
222 }
223 int old_counter = containing_chunk->store_buffer_counter();
224 if (old_counter == threshold) {
225 containing_chunk->set_scan_on_scavenge(true);
226 created_new_scan_on_scavenge_pages = true;
227 }
228 containing_chunk->set_store_buffer_counter(old_counter + 1);
229 previous_chunk = containing_chunk;
230 }
231 if (created_new_scan_on_scavenge_pages) {
232 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
233 }
234 old_buffer_is_filtered_ = true;
235}
236
237
238void StoreBuffer::Filter(int flag) {
239 Address* new_top = old_start_;
240 MemoryChunk* previous_chunk = NULL;
241 for (Address* p = old_start_; p < old_top_; p++) {
242 Address addr = *p;
243 MemoryChunk* containing_chunk = NULL;
244 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
245 containing_chunk = previous_chunk;
246 } else {
247 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
248 previous_chunk = containing_chunk;
249 }
250 if (!containing_chunk->IsFlagSet(flag)) {
251 *new_top++ = addr;
252 }
253 }
254 old_top_ = new_top;
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000255
256 // Filtering hash sets are inconsistent with the store buffer after this
257 // operation.
258 ClearFilteringHashSets();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000259}
260
261
262void StoreBuffer::SortUniq() {
263 Compact();
264 if (old_buffer_is_sorted_) return;
danno@chromium.orgca29dd82013-04-26 11:59:48 +0000265 std::sort(old_start_, old_top_);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000266 Uniq();
267
268 old_buffer_is_sorted_ = true;
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000269
270 // Filtering hash sets are inconsistent with the store buffer after this
271 // operation.
272 ClearFilteringHashSets();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000273}
274
275
276bool StoreBuffer::PrepareForIteration() {
277 Compact();
278 PointerChunkIterator it(heap_);
279 MemoryChunk* chunk;
280 bool page_has_scan_on_scavenge_flag = false;
281 while ((chunk = it.next()) != NULL) {
282 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
283 }
284
285 if (page_has_scan_on_scavenge_flag) {
286 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
287 }
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000288
289 // Filtering hash sets are inconsistent with the store buffer after
290 // iteration.
291 ClearFilteringHashSets();
292
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000293 return page_has_scan_on_scavenge_flag;
294}
295
296
297#ifdef DEBUG
298void StoreBuffer::Clean() {
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000299 ClearFilteringHashSets();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000300 Uniq(); // Also removes things that no longer point to new space.
danno@chromium.org41728482013-06-12 22:31:22 +0000301 EnsureSpace(kStoreBufferSize / 2);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000302}
303
304
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000305static Address* in_store_buffer_1_element_cache = NULL;
306
307
308bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
309 if (!FLAG_enable_slow_asserts) return true;
310 if (in_store_buffer_1_element_cache != NULL &&
311 *in_store_buffer_1_element_cache == cell_address) {
312 return true;
313 }
314 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
315 for (Address* current = top - 1; current >= start_; current--) {
316 if (*current == cell_address) {
317 in_store_buffer_1_element_cache = current;
318 return true;
319 }
320 }
321 for (Address* current = old_top_ - 1; current >= old_start_; current--) {
322 if (*current == cell_address) {
323 in_store_buffer_1_element_cache = current;
324 return true;
325 }
326 }
327 return false;
328}
329#endif
330
331
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000332void StoreBuffer::ClearFilteringHashSets() {
333 if (!hash_sets_are_empty_) {
334 memset(reinterpret_cast<void*>(hash_set_1_),
335 0,
336 sizeof(uintptr_t) * kHashSetLength);
337 memset(reinterpret_cast<void*>(hash_set_2_),
338 0,
339 sizeof(uintptr_t) * kHashSetLength);
340 hash_sets_are_empty_ = true;
341 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000342}
343
344
345void StoreBuffer::GCPrologue() {
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000346 ClearFilteringHashSets();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000347 during_gc_ = true;
348}
349
350
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000351#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000352static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
353 // Do nothing.
354}
355
356
357void StoreBuffer::VerifyPointers(PagedSpace* space,
358 RegionCallback region_callback) {
359 PageIterator it(space);
360
361 while (it.has_next()) {
362 Page* page = it.next();
363 FindPointersToNewSpaceOnPage(
364 reinterpret_cast<PagedSpace*>(page->owner()),
365 page,
366 region_callback,
367 &DummyScavengePointer);
368 }
369}
370
371
372void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
373 LargeObjectIterator it(space);
374 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
375 if (object->IsFixedArray()) {
376 Address slot_address = object->address();
377 Address end = object->address() + object->Size();
378
379 while (slot_address < end) {
380 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
381 // When we are not in GC the Heap::InNewSpace() predicate
382 // checks that pointers which satisfy predicate point into
383 // the active semispace.
384 heap_->InNewSpace(*slot);
385 slot_address += kPointerSize;
386 }
387 }
388 }
389}
390#endif
391
392
393void StoreBuffer::Verify() {
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000394#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000395 VerifyPointers(heap_->old_pointer_space(),
396 &StoreBuffer::FindPointersToNewSpaceInRegion);
397 VerifyPointers(heap_->map_space(),
398 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
399 VerifyPointers(heap_->lo_space());
400#endif
401}
402
403
404void StoreBuffer::GCEpilogue() {
405 during_gc_ = false;
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000406#ifdef VERIFY_HEAP
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000407 if (FLAG_verify_heap) {
408 Verify();
409 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000410#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000411}
412
413
414void StoreBuffer::FindPointersToNewSpaceInRegion(
415 Address start, Address end, ObjectSlotCallback slot_callback) {
416 for (Address slot_address = start;
417 slot_address < end;
418 slot_address += kPointerSize) {
419 Object** slot = reinterpret_cast<Object**>(slot_address);
420 if (heap_->InNewSpace(*slot)) {
421 HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
422 ASSERT(object->IsHeapObject());
423 slot_callback(reinterpret_cast<HeapObject**>(slot), object);
424 if (heap_->InNewSpace(*slot)) {
425 EnterDirectlyIntoStoreBuffer(slot_address);
426 }
427 }
428 }
429}
430
431
432// Compute start address of the first map following given addr.
433static inline Address MapStartAlign(Address addr) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000434 Address page = Page::FromAddress(addr)->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000435 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
436}
437
438
439// Compute end address of the first map preceding given addr.
440static inline Address MapEndAlign(Address addr) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000441 Address page = Page::FromAllocationTop(addr)->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000442 return page + ((addr - page) / Map::kSize * Map::kSize);
443}
444
445
446void StoreBuffer::FindPointersToNewSpaceInMaps(
447 Address start,
448 Address end,
449 ObjectSlotCallback slot_callback) {
450 ASSERT(MapStartAlign(start) == start);
451 ASSERT(MapEndAlign(end) == end);
452
453 Address map_address = start;
454 while (map_address < end) {
455 ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
456 ASSERT(Memory::Object_at(map_address)->IsMap());
457
458 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
459 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
460
461 FindPointersToNewSpaceInRegion(pointer_fields_start,
462 pointer_fields_end,
463 slot_callback);
464 map_address += Map::kSize;
465 }
466}
467
468
469void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
470 Address start,
471 Address end,
472 ObjectSlotCallback slot_callback) {
473 Address map_aligned_start = MapStartAlign(start);
474 Address map_aligned_end = MapEndAlign(end);
475
476 ASSERT(map_aligned_start == start);
477 ASSERT(map_aligned_end == end);
478
479 FindPointersToNewSpaceInMaps(map_aligned_start,
480 map_aligned_end,
481 slot_callback);
482}
483
484
485// This function iterates over all the pointers in a paged space in the heap,
486// looking for pointers into new space. Within the pages there may be dead
487// objects that have not been overwritten by free spaces or fillers because of
488// lazy sweeping. These dead objects may not contain pointers to new space.
489// The garbage areas that have been swept properly (these will normally be the
490// large ones) will be marked with free space and filler map words. In
491// addition any area that has never been used at all for object allocation must
492// be marked with a free space or filler. Because the free space and filler
493// maps do not move we can always recognize these even after a compaction.
494// Normal objects like FixedArrays and JSObjects should not contain references
495// to these maps. The special garbage section (see comment in spaces.h) is
496// skipped since it can contain absolutely anything. Any objects that are
497// allocated during iteration may or may not be visited by the iteration, but
498// they will not be partially visited.
499void StoreBuffer::FindPointersToNewSpaceOnPage(
500 PagedSpace* space,
501 Page* page,
502 RegionCallback region_callback,
503 ObjectSlotCallback slot_callback) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000504 Address visitable_start = page->area_start();
505 Address end_of_page = page->area_end();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000506
507 Address visitable_end = visitable_start;
508
509 Object* free_space_map = heap_->free_space_map();
510 Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
511
512 while (visitable_end < end_of_page) {
513 Object* o = *reinterpret_cast<Object**>(visitable_end);
514 // Skip fillers but not things that look like fillers in the special
515 // garbage section which can contain anything.
516 if (o == free_space_map ||
517 o == two_pointer_filler_map ||
518 (visitable_end == space->top() && visitable_end != space->limit())) {
519 if (visitable_start != visitable_end) {
520 // After calling this the special garbage section may have moved.
521 (this->*region_callback)(visitable_start,
522 visitable_end,
523 slot_callback);
524 if (visitable_end >= space->top() && visitable_end < space->limit()) {
525 visitable_end = space->limit();
526 visitable_start = visitable_end;
527 continue;
528 }
529 }
530 if (visitable_end == space->top() && visitable_end != space->limit()) {
531 visitable_start = visitable_end = space->limit();
532 } else {
533 // At this point we are either at the start of a filler or we are at
534 // the point where the space->top() used to be before the
535 // visit_pointer_region call above. Either way we can skip the
536 // object at the current spot: We don't promise to visit objects
537 // allocated during heap traversal, and if space->top() moved then it
538 // must be because an object was allocated at this point.
539 visitable_start =
540 visitable_end + HeapObject::FromAddress(visitable_end)->Size();
541 visitable_end = visitable_start;
542 }
543 } else {
544 ASSERT(o != free_space_map);
545 ASSERT(o != two_pointer_filler_map);
546 ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
547 visitable_end += kPointerSize;
548 }
549 }
550 ASSERT(visitable_end == end_of_page);
551 if (visitable_start != visitable_end) {
552 (this->*region_callback)(visitable_start,
553 visitable_end,
554 slot_callback);
555 }
556}
557
558
559void StoreBuffer::IteratePointersInStoreBuffer(
560 ObjectSlotCallback slot_callback) {
561 Address* limit = old_top_;
562 old_top_ = old_start_;
563 {
564 DontMoveStoreBufferEntriesScope scope(this);
565 for (Address* current = old_start_; current < limit; current++) {
566#ifdef DEBUG
567 Address* saved_top = old_top_;
568#endif
569 Object** slot = reinterpret_cast<Object**>(*current);
570 Object* object = *slot;
571 if (heap_->InFromSpace(object)) {
572 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
573 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
574 if (heap_->InNewSpace(*slot)) {
575 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
576 }
577 }
578 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
579 }
580 }
581}
582
583
584void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
585 // We do not sort or remove duplicated entries from the store buffer because
586 // we expect that callback will rebuild the store buffer thus removing
587 // all duplicates and pointers to old space.
588 bool some_pages_to_scan = PrepareForIteration();
589
590 // TODO(gc): we want to skip slots on evacuation candidates
591 // but we can't simply figure that out from slot address
592 // because slot can belong to a large object.
593 IteratePointersInStoreBuffer(slot_callback);
594
595 // We are done scanning all the pointers that were in the store buffer, but
596 // there may be some pages marked scan_on_scavenge that have pointers to new
597 // space that are not in the store buffer. We must scan them now. As we
598 // scan, the surviving pointers to new space will be added to the store
599 // buffer. If there are still a lot of pointers to new space then we will
600 // keep the scan_on_scavenge flag on the page and discard the pointers that
601 // were added to the store buffer. If there are not many pointers to new
602 // space left on the page we will keep the pointers in the store buffer and
603 // remove the flag from the page.
604 if (some_pages_to_scan) {
605 if (callback_ != NULL) {
606 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
607 }
608 PointerChunkIterator it(heap_);
609 MemoryChunk* chunk;
610 while ((chunk = it.next()) != NULL) {
611 if (chunk->scan_on_scavenge()) {
612 chunk->set_scan_on_scavenge(false);
613 if (callback_ != NULL) {
614 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
615 }
616 if (chunk->owner() == heap_->lo_space()) {
617 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
618 HeapObject* array = large_page->GetObject();
619 ASSERT(array->IsFixedArray());
620 Address start = array->address();
621 Address end = start + array->Size();
622 FindPointersToNewSpaceInRegion(start, end, slot_callback);
623 } else {
624 Page* page = reinterpret_cast<Page*>(chunk);
625 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
626 FindPointersToNewSpaceOnPage(
627 owner,
628 page,
629 (owner == heap_->map_space() ?
630 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
631 &StoreBuffer::FindPointersToNewSpaceInRegion),
632 slot_callback);
633 }
634 }
635 }
636 if (callback_ != NULL) {
637 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
638 }
639 }
640}
641
642
643void StoreBuffer::Compact() {
644 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
645
646 if (top == start_) return;
647
648 // There's no check of the limit in the loop below so we check here for
649 // the worst case (compaction doesn't eliminate any pointers).
650 ASSERT(top <= limit_);
651 heap_->public_set_store_buffer_top(start_);
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000652 EnsureSpace(top - start_);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000653 ASSERT(may_move_store_buffer_entries_);
654 // Goes through the addresses in the store buffer attempting to remove
655 // duplicates. In the interest of speed this is a lossy operation. Some
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000656 // duplicates will remain. We have two hash sets with different hash
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000657 // functions to reduce the number of unnecessary clashes.
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000658 hash_sets_are_empty_ = false; // Hash sets are in use.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000659 for (Address* current = start_; current < top; current++) {
660 ASSERT(!heap_->cell_space()->Contains(*current));
661 ASSERT(!heap_->code_space()->Contains(*current));
662 ASSERT(!heap_->old_data_space()->Contains(*current));
663 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
664 // Shift out the last bits including any tags.
665 int_addr >>= kPointerSizeLog2;
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +0000666 // The upper part of an address is basically random because of ASLR and OS
667 // non-determinism, so we use only the bits within a page for hashing to
668 // make v8's behavior (more) deterministic.
669 uintptr_t hash_addr =
670 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
671 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
672 (kHashSetLength - 1));
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000673 if (hash_set_1_[hash1] == int_addr) continue;
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +0000674 uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000675 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
rossberg@chromium.orgfab14982012-01-05 15:02:15 +0000676 hash2 &= (kHashSetLength - 1);
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000677 if (hash_set_2_[hash2] == int_addr) continue;
678 if (hash_set_1_[hash1] == 0) {
679 hash_set_1_[hash1] = int_addr;
680 } else if (hash_set_2_[hash2] == 0) {
681 hash_set_2_[hash2] = int_addr;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000682 } else {
683 // Rather than slowing down we just throw away some entries. This will
684 // cause some duplicates to remain undetected.
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000685 hash_set_1_[hash1] = int_addr;
686 hash_set_2_[hash2] = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000687 }
688 old_buffer_is_sorted_ = false;
689 old_buffer_is_filtered_ = false;
690 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
691 ASSERT(old_top_ <= old_limit_);
692 }
693 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000694}
695
696} } // namespace v8::internal