blob: c0315f27a8ed2482a468532227ff5c17723e4580 [file] [log] [blame]
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "store-buffer.h"
31#include "store-buffer-inl.h"
32#include "v8-counters.h"
33
34namespace v8 {
35namespace internal {
36
37StoreBuffer::StoreBuffer(Heap* heap)
38 : heap_(heap),
39 start_(NULL),
40 limit_(NULL),
41 old_start_(NULL),
42 old_limit_(NULL),
43 old_top_(NULL),
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +000044 old_reserved_limit_(NULL),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000045 old_buffer_is_sorted_(false),
46 old_buffer_is_filtered_(false),
47 during_gc_(false),
48 store_buffer_rebuilding_enabled_(false),
49 callback_(NULL),
50 may_move_store_buffer_entries_(true),
51 virtual_memory_(NULL),
52 hash_map_1_(NULL),
53 hash_map_2_(NULL) {
54}
55
56
57void StoreBuffer::Setup() {
58 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
59 uintptr_t start_as_int =
60 reinterpret_cast<uintptr_t>(virtual_memory_->address());
61 start_ =
62 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +000063 limit_ = start_ + (kStoreBufferSize / kPointerSize);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000064
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +000065 old_virtual_memory_ =
66 new VirtualMemory(kOldStoreBufferLength * kPointerSize);
67 old_top_ = old_start_ =
68 reinterpret_cast<Address*>(old_virtual_memory_->address());
69 // Don't know the alignment requirements of the OS, but it is certainly not
70 // less than 0xfff.
71 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
72 int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
73 ASSERT(initial_length > 0);
74 ASSERT(initial_length <= kOldStoreBufferLength);
75 old_limit_ = old_start_ + initial_length;
76 old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
77
78 CHECK(old_virtual_memory_->Commit(
79 reinterpret_cast<void*>(old_start_),
80 (old_limit_ - old_start_) * kPointerSize,
81 false));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000082
83 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
84 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
85 Address* vm_limit = reinterpret_cast<Address*>(
86 reinterpret_cast<char*>(virtual_memory_->address()) +
87 virtual_memory_->size());
88 ASSERT(start_ <= vm_limit);
89 ASSERT(limit_ <= vm_limit);
90 USE(vm_limit);
91 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
92 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
93 0);
94
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +000095 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
96 kStoreBufferSize,
97 false)); // Not executable.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000098 heap_->public_set_store_buffer_top(start_);
99
100 hash_map_1_ = new uintptr_t[kHashMapLength];
101 hash_map_2_ = new uintptr_t[kHashMapLength];
102
103 ZapHashTables();
104}
105
106
107void StoreBuffer::TearDown() {
108 delete virtual_memory_;
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000109 delete old_virtual_memory_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000110 delete[] hash_map_1_;
111 delete[] hash_map_2_;
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000112 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000113 start_ = limit_ = NULL;
114 heap_->public_set_store_buffer_top(start_);
115}
116
117
118void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
119 isolate->heap()->store_buffer()->Compact();
120}
121
122
123#if V8_TARGET_ARCH_X64
124static int CompareAddresses(const void* void_a, const void* void_b) {
125 intptr_t a =
126 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
127 intptr_t b =
128 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
129 // Unfortunately if int is smaller than intptr_t there is no branch-free
130 // way to return a number with the same sign as the difference between the
131 // pointers.
132 if (a == b) return 0;
133 if (a < b) return -1;
134 ASSERT(a > b);
135 return 1;
136}
137#else
138static int CompareAddresses(const void* void_a, const void* void_b) {
139 intptr_t a =
140 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
141 intptr_t b =
142 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
143 ASSERT(sizeof(1) == sizeof(a));
144 // Shift down to avoid wraparound.
145 return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
146}
147#endif
148
149
150void StoreBuffer::Uniq() {
151 ASSERT(HashTablesAreZapped());
152 // Remove adjacent duplicates and cells that do not point at new space.
153 Address previous = NULL;
154 Address* write = old_start_;
155 ASSERT(may_move_store_buffer_entries_);
156 for (Address* read = old_start_; read < old_top_; read++) {
157 Address current = *read;
158 if (current != previous) {
159 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
160 *write++ = current;
161 }
162 }
163 previous = current;
164 }
165 old_top_ = write;
166}
167
168
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000169void StoreBuffer::EnsureSpace(intptr_t space_needed) {
170 while (old_limit_ - old_top_ < space_needed &&
171 old_limit_ < old_reserved_limit_) {
172 size_t grow = old_limit_ - old_start_; // Double size.
173 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
174 grow * kPointerSize,
175 false));
176 old_limit_ += grow;
177 }
178
179 if (old_limit_ - old_top_ >= space_needed) return;
180
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000181 if (old_buffer_is_filtered_) return;
182 ASSERT(may_move_store_buffer_entries_);
183 Compact();
184
185 old_buffer_is_filtered_ = true;
186 bool page_has_scan_on_scavenge_flag = false;
187
188 PointerChunkIterator it(heap_);
189 MemoryChunk* chunk;
190 while ((chunk = it.next()) != NULL) {
191 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
192 }
193
194 if (page_has_scan_on_scavenge_flag) {
195 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
196 }
197
198 // If filtering out the entries from scan_on_scavenge pages got us down to
199 // less than half full, then we are satisfied with that.
200 if (old_limit_ - old_top_ > old_top_ - old_start_) return;
201
202 // Sample 1 entry in 97 and filter out the pages where we estimate that more
203 // than 1 in 8 pointers are to new space.
204 static const int kSampleFinenesses = 5;
205 static const struct Samples {
206 int prime_sample_step;
207 int threshold;
208 } samples[kSampleFinenesses] = {
209 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
210 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
211 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
212 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
213 { 1, 0}
214 };
215 for (int i = kSampleFinenesses - 1; i >= 0; i--) {
216 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
217 // As a last resort we mark all pages as being exempt from the store buffer.
218 ASSERT(i != 0 || old_top_ == old_start_);
219 if (old_limit_ - old_top_ > old_top_ - old_start_) return;
220 }
221 UNREACHABLE();
222}
223
224
225// Sample the store buffer to see if some pages are taking up a lot of space
226// in the store buffer.
227void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
228 PointerChunkIterator it(heap_);
229 MemoryChunk* chunk;
230 while ((chunk = it.next()) != NULL) {
231 chunk->set_store_buffer_counter(0);
232 }
233 bool created_new_scan_on_scavenge_pages = false;
234 MemoryChunk* previous_chunk = NULL;
235 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
236 Address addr = *p;
237 MemoryChunk* containing_chunk = NULL;
238 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
239 containing_chunk = previous_chunk;
240 } else {
241 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
242 }
243 int old_counter = containing_chunk->store_buffer_counter();
244 if (old_counter == threshold) {
245 containing_chunk->set_scan_on_scavenge(true);
246 created_new_scan_on_scavenge_pages = true;
247 }
248 containing_chunk->set_store_buffer_counter(old_counter + 1);
249 previous_chunk = containing_chunk;
250 }
251 if (created_new_scan_on_scavenge_pages) {
252 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
253 }
254 old_buffer_is_filtered_ = true;
255}
256
257
258void StoreBuffer::Filter(int flag) {
259 Address* new_top = old_start_;
260 MemoryChunk* previous_chunk = NULL;
261 for (Address* p = old_start_; p < old_top_; p++) {
262 Address addr = *p;
263 MemoryChunk* containing_chunk = NULL;
264 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
265 containing_chunk = previous_chunk;
266 } else {
267 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
268 previous_chunk = containing_chunk;
269 }
270 if (!containing_chunk->IsFlagSet(flag)) {
271 *new_top++ = addr;
272 }
273 }
274 old_top_ = new_top;
275}
276
277
278void StoreBuffer::SortUniq() {
279 Compact();
280 if (old_buffer_is_sorted_) return;
281 ZapHashTables();
282 qsort(reinterpret_cast<void*>(old_start_),
283 old_top_ - old_start_,
284 sizeof(*old_top_),
285 &CompareAddresses);
286 Uniq();
287
288 old_buffer_is_sorted_ = true;
289}
290
291
292bool StoreBuffer::PrepareForIteration() {
293 Compact();
294 PointerChunkIterator it(heap_);
295 MemoryChunk* chunk;
296 bool page_has_scan_on_scavenge_flag = false;
297 while ((chunk = it.next()) != NULL) {
298 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
299 }
300
301 if (page_has_scan_on_scavenge_flag) {
302 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
303 }
304 ZapHashTables();
305 return page_has_scan_on_scavenge_flag;
306}
307
308
309#ifdef DEBUG
310void StoreBuffer::Clean() {
311 ZapHashTables();
312 Uniq(); // Also removes things that no longer point to new space.
313 CheckForFullBuffer();
314}
315
316
317static bool Zapped(char* start, int size) {
318 for (int i = 0; i < size; i++) {
319 if (start[i] != 0) return false;
320 }
321 return true;
322}
323
324
325bool StoreBuffer::HashTablesAreZapped() {
326 return Zapped(reinterpret_cast<char*>(hash_map_1_),
327 sizeof(uintptr_t) * kHashMapLength) &&
328 Zapped(reinterpret_cast<char*>(hash_map_2_),
329 sizeof(uintptr_t) * kHashMapLength);
330}
331
332
333static Address* in_store_buffer_1_element_cache = NULL;
334
335
336bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
337 if (!FLAG_enable_slow_asserts) return true;
338 if (in_store_buffer_1_element_cache != NULL &&
339 *in_store_buffer_1_element_cache == cell_address) {
340 return true;
341 }
342 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
343 for (Address* current = top - 1; current >= start_; current--) {
344 if (*current == cell_address) {
345 in_store_buffer_1_element_cache = current;
346 return true;
347 }
348 }
349 for (Address* current = old_top_ - 1; current >= old_start_; current--) {
350 if (*current == cell_address) {
351 in_store_buffer_1_element_cache = current;
352 return true;
353 }
354 }
355 return false;
356}
357#endif
358
359
360void StoreBuffer::ZapHashTables() {
361 memset(reinterpret_cast<void*>(hash_map_1_),
362 0,
363 sizeof(uintptr_t) * kHashMapLength);
364 memset(reinterpret_cast<void*>(hash_map_2_),
365 0,
366 sizeof(uintptr_t) * kHashMapLength);
367}
368
369
370void StoreBuffer::GCPrologue() {
371 ZapHashTables();
372 during_gc_ = true;
373}
374
375
376#ifdef DEBUG
377static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
378 // Do nothing.
379}
380
381
382void StoreBuffer::VerifyPointers(PagedSpace* space,
383 RegionCallback region_callback) {
384 PageIterator it(space);
385
386 while (it.has_next()) {
387 Page* page = it.next();
388 FindPointersToNewSpaceOnPage(
389 reinterpret_cast<PagedSpace*>(page->owner()),
390 page,
391 region_callback,
392 &DummyScavengePointer);
393 }
394}
395
396
397void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
398 LargeObjectIterator it(space);
399 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
400 if (object->IsFixedArray()) {
401 Address slot_address = object->address();
402 Address end = object->address() + object->Size();
403
404 while (slot_address < end) {
405 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
406 // When we are not in GC the Heap::InNewSpace() predicate
407 // checks that pointers which satisfy predicate point into
408 // the active semispace.
409 heap_->InNewSpace(*slot);
410 slot_address += kPointerSize;
411 }
412 }
413 }
414}
415#endif
416
417
418void StoreBuffer::Verify() {
419#ifdef DEBUG
420 VerifyPointers(heap_->old_pointer_space(),
421 &StoreBuffer::FindPointersToNewSpaceInRegion);
422 VerifyPointers(heap_->map_space(),
423 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
424 VerifyPointers(heap_->lo_space());
425#endif
426}
427
428
429void StoreBuffer::GCEpilogue() {
430 during_gc_ = false;
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000431 if (FLAG_verify_heap) {
432 Verify();
433 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000434}
435
436
437void StoreBuffer::FindPointersToNewSpaceInRegion(
438 Address start, Address end, ObjectSlotCallback slot_callback) {
439 for (Address slot_address = start;
440 slot_address < end;
441 slot_address += kPointerSize) {
442 Object** slot = reinterpret_cast<Object**>(slot_address);
443 if (heap_->InNewSpace(*slot)) {
444 HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
445 ASSERT(object->IsHeapObject());
446 slot_callback(reinterpret_cast<HeapObject**>(slot), object);
447 if (heap_->InNewSpace(*slot)) {
448 EnterDirectlyIntoStoreBuffer(slot_address);
449 }
450 }
451 }
452}
453
454
455// Compute start address of the first map following given addr.
456static inline Address MapStartAlign(Address addr) {
457 Address page = Page::FromAddress(addr)->ObjectAreaStart();
458 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
459}
460
461
462// Compute end address of the first map preceding given addr.
463static inline Address MapEndAlign(Address addr) {
464 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
465 return page + ((addr - page) / Map::kSize * Map::kSize);
466}
467
468
469void StoreBuffer::FindPointersToNewSpaceInMaps(
470 Address start,
471 Address end,
472 ObjectSlotCallback slot_callback) {
473 ASSERT(MapStartAlign(start) == start);
474 ASSERT(MapEndAlign(end) == end);
475
476 Address map_address = start;
477 while (map_address < end) {
478 ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
479 ASSERT(Memory::Object_at(map_address)->IsMap());
480
481 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
482 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
483
484 FindPointersToNewSpaceInRegion(pointer_fields_start,
485 pointer_fields_end,
486 slot_callback);
487 map_address += Map::kSize;
488 }
489}
490
491
492void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
493 Address start,
494 Address end,
495 ObjectSlotCallback slot_callback) {
496 Address map_aligned_start = MapStartAlign(start);
497 Address map_aligned_end = MapEndAlign(end);
498
499 ASSERT(map_aligned_start == start);
500 ASSERT(map_aligned_end == end);
501
502 FindPointersToNewSpaceInMaps(map_aligned_start,
503 map_aligned_end,
504 slot_callback);
505}
506
507
508// This function iterates over all the pointers in a paged space in the heap,
509// looking for pointers into new space. Within the pages there may be dead
510// objects that have not been overwritten by free spaces or fillers because of
511// lazy sweeping. These dead objects may not contain pointers to new space.
512// The garbage areas that have been swept properly (these will normally be the
513// large ones) will be marked with free space and filler map words. In
514// addition any area that has never been used at all for object allocation must
515// be marked with a free space or filler. Because the free space and filler
516// maps do not move we can always recognize these even after a compaction.
517// Normal objects like FixedArrays and JSObjects should not contain references
518// to these maps. The special garbage section (see comment in spaces.h) is
519// skipped since it can contain absolutely anything. Any objects that are
520// allocated during iteration may or may not be visited by the iteration, but
521// they will not be partially visited.
522void StoreBuffer::FindPointersToNewSpaceOnPage(
523 PagedSpace* space,
524 Page* page,
525 RegionCallback region_callback,
526 ObjectSlotCallback slot_callback) {
527 Address visitable_start = page->ObjectAreaStart();
528 Address end_of_page = page->ObjectAreaEnd();
529
530 Address visitable_end = visitable_start;
531
532 Object* free_space_map = heap_->free_space_map();
533 Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
534
535 while (visitable_end < end_of_page) {
536 Object* o = *reinterpret_cast<Object**>(visitable_end);
537 // Skip fillers but not things that look like fillers in the special
538 // garbage section which can contain anything.
539 if (o == free_space_map ||
540 o == two_pointer_filler_map ||
541 (visitable_end == space->top() && visitable_end != space->limit())) {
542 if (visitable_start != visitable_end) {
543 // After calling this the special garbage section may have moved.
544 (this->*region_callback)(visitable_start,
545 visitable_end,
546 slot_callback);
547 if (visitable_end >= space->top() && visitable_end < space->limit()) {
548 visitable_end = space->limit();
549 visitable_start = visitable_end;
550 continue;
551 }
552 }
553 if (visitable_end == space->top() && visitable_end != space->limit()) {
554 visitable_start = visitable_end = space->limit();
555 } else {
556 // At this point we are either at the start of a filler or we are at
557 // the point where the space->top() used to be before the
558 // visit_pointer_region call above. Either way we can skip the
559 // object at the current spot: We don't promise to visit objects
560 // allocated during heap traversal, and if space->top() moved then it
561 // must be because an object was allocated at this point.
562 visitable_start =
563 visitable_end + HeapObject::FromAddress(visitable_end)->Size();
564 visitable_end = visitable_start;
565 }
566 } else {
567 ASSERT(o != free_space_map);
568 ASSERT(o != two_pointer_filler_map);
569 ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
570 visitable_end += kPointerSize;
571 }
572 }
573 ASSERT(visitable_end == end_of_page);
574 if (visitable_start != visitable_end) {
575 (this->*region_callback)(visitable_start,
576 visitable_end,
577 slot_callback);
578 }
579}
580
581
582void StoreBuffer::IteratePointersInStoreBuffer(
583 ObjectSlotCallback slot_callback) {
584 Address* limit = old_top_;
585 old_top_ = old_start_;
586 {
587 DontMoveStoreBufferEntriesScope scope(this);
588 for (Address* current = old_start_; current < limit; current++) {
589#ifdef DEBUG
590 Address* saved_top = old_top_;
591#endif
592 Object** slot = reinterpret_cast<Object**>(*current);
593 Object* object = *slot;
594 if (heap_->InFromSpace(object)) {
595 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
596 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
597 if (heap_->InNewSpace(*slot)) {
598 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
599 }
600 }
601 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
602 }
603 }
604}
605
606
607void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
608 // We do not sort or remove duplicated entries from the store buffer because
609 // we expect that callback will rebuild the store buffer thus removing
610 // all duplicates and pointers to old space.
611 bool some_pages_to_scan = PrepareForIteration();
612
613 // TODO(gc): we want to skip slots on evacuation candidates
614 // but we can't simply figure that out from slot address
615 // because slot can belong to a large object.
616 IteratePointersInStoreBuffer(slot_callback);
617
618 // We are done scanning all the pointers that were in the store buffer, but
619 // there may be some pages marked scan_on_scavenge that have pointers to new
620 // space that are not in the store buffer. We must scan them now. As we
621 // scan, the surviving pointers to new space will be added to the store
622 // buffer. If there are still a lot of pointers to new space then we will
623 // keep the scan_on_scavenge flag on the page and discard the pointers that
624 // were added to the store buffer. If there are not many pointers to new
625 // space left on the page we will keep the pointers in the store buffer and
626 // remove the flag from the page.
627 if (some_pages_to_scan) {
628 if (callback_ != NULL) {
629 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
630 }
631 PointerChunkIterator it(heap_);
632 MemoryChunk* chunk;
633 while ((chunk = it.next()) != NULL) {
634 if (chunk->scan_on_scavenge()) {
635 chunk->set_scan_on_scavenge(false);
636 if (callback_ != NULL) {
637 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
638 }
639 if (chunk->owner() == heap_->lo_space()) {
640 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
641 HeapObject* array = large_page->GetObject();
642 ASSERT(array->IsFixedArray());
643 Address start = array->address();
644 Address end = start + array->Size();
645 FindPointersToNewSpaceInRegion(start, end, slot_callback);
646 } else {
647 Page* page = reinterpret_cast<Page*>(chunk);
648 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
649 FindPointersToNewSpaceOnPage(
650 owner,
651 page,
652 (owner == heap_->map_space() ?
653 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
654 &StoreBuffer::FindPointersToNewSpaceInRegion),
655 slot_callback);
656 }
657 }
658 }
659 if (callback_ != NULL) {
660 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
661 }
662 }
663}
664
665
666void StoreBuffer::Compact() {
667 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
668
669 if (top == start_) return;
670
671 // There's no check of the limit in the loop below so we check here for
672 // the worst case (compaction doesn't eliminate any pointers).
673 ASSERT(top <= limit_);
674 heap_->public_set_store_buffer_top(start_);
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000675 EnsureSpace(top - start_);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000676 ASSERT(may_move_store_buffer_entries_);
677 // Goes through the addresses in the store buffer attempting to remove
678 // duplicates. In the interest of speed this is a lossy operation. Some
679 // duplicates will remain. We have two hash tables with different hash
680 // functions to reduce the number of unnecessary clashes.
681 for (Address* current = start_; current < top; current++) {
682 ASSERT(!heap_->cell_space()->Contains(*current));
683 ASSERT(!heap_->code_space()->Contains(*current));
684 ASSERT(!heap_->old_data_space()->Contains(*current));
685 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
686 // Shift out the last bits including any tags.
687 int_addr >>= kPointerSizeLog2;
688 int hash1 =
689 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
690 if (hash_map_1_[hash1] == int_addr) continue;
691 int hash2 =
692 ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
693 hash2 ^= hash2 >> (kHashMapLengthLog2 * 2);
694 if (hash_map_2_[hash2] == int_addr) continue;
695 if (hash_map_1_[hash1] == 0) {
696 hash_map_1_[hash1] = int_addr;
697 } else if (hash_map_2_[hash2] == 0) {
698 hash_map_2_[hash2] = int_addr;
699 } else {
700 // Rather than slowing down we just throw away some entries. This will
701 // cause some duplicates to remain undetected.
702 hash_map_1_[hash1] = int_addr;
703 hash_map_2_[hash2] = 0;
704 }
705 old_buffer_is_sorted_ = false;
706 old_buffer_is_filtered_ = false;
707 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
708 ASSERT(old_top_ <= old_limit_);
709 }
710 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
711 CheckForFullBuffer();
712}
713
714
715void StoreBuffer::CheckForFullBuffer() {
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000716 EnsureSpace(kStoreBufferSize * 2);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000717}
718
719} } // namespace v8::internal