blob: 5efd6922bcd6bc30f3a21c298761ae52beeb8513 [file] [log] [blame]
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01004
5#ifndef V8_STORE_BUFFER_H_
6#define V8_STORE_BUFFER_H_
7
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008#include "src/allocation.h"
9#include "src/base/logging.h"
10#include "src/base/platform/platform.h"
11#include "src/globals.h"
Ben Murdoch3ef787d2012-04-12 10:51:47 +010012
13namespace v8 {
14namespace internal {
15
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016class Page;
17class PagedSpace;
Ben Murdoch3ef787d2012-04-12 10:51:47 +010018class StoreBuffer;
19
20typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
21
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022typedef void (StoreBuffer::*RegionCallback)(Address start, Address end,
23 ObjectSlotCallback slot_callback,
24 bool clear_maps);
Ben Murdoch3ef787d2012-04-12 10:51:47 +010025
26// Used to implement the write barrier by collecting addresses of pointers
27// between spaces.
28class StoreBuffer {
29 public:
30 explicit StoreBuffer(Heap* heap);
31
32 static void StoreBufferOverflow(Isolate* isolate);
33
34 inline Address TopAddress();
35
36 void SetUp();
37 void TearDown();
38
39 // This is used by the mutator to enter addresses into the store buffer.
40 inline void Mark(Address addr);
41
42 // This is used by the heap traversal to enter the addresses into the store
43 // buffer that should still be in the store buffer after GC. It enters
44 // addresses directly into the old buffer because the GC starts by wiping the
45 // old buffer and thereafter only visits each cell once so there is no need
46 // to attempt to remove any dupes. During the first part of a GC we
47 // are using the store buffer to access the old spaces and at the same time
48 // we are rebuilding the store buffer using this function. There is, however
49 // no issue of overwriting the buffer we are iterating over, because this
50 // stage of the scavenge can only reduce the number of addresses in the store
51 // buffer (some objects are promoted so pointers to them do not need to be in
52 // the store buffer). The later parts of the GC scan the pages that are
53 // exempt from the store buffer and process the promotion queue. These steps
54 // can overflow this buffer. We check for this and on overflow we call the
55 // callback set up with the StoreBufferRebuildScope object.
56 inline void EnterDirectlyIntoStoreBuffer(Address addr);
57
58 // Iterates over all pointers that go from old space to new space. It will
59 // delete the store buffer as it starts so the callback should reenter
60 // surviving old-to-new pointers into the store buffer to rebuild it.
61 void IteratePointersToNewSpace(ObjectSlotCallback callback);
62
Ben Murdochb8a8cc12014-11-26 15:28:44 +000063 // Same as IteratePointersToNewSpace but additonally clears maps in objects
64 // referenced from the store buffer that do not contain a forwarding pointer.
65 void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
66
Ben Murdoch3ef787d2012-04-12 10:51:47 +010067 static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
68 static const int kStoreBufferSize = kStoreBufferOverflowBit;
69 static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
70 static const int kOldStoreBufferLength = kStoreBufferLength * 16;
71 static const int kHashSetLengthLog2 = 12;
72 static const int kHashSetLength = 1 << kHashSetLengthLog2;
73
74 void Compact();
75
76 void GCPrologue();
77 void GCEpilogue();
78
79 Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
80 Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
81 Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
82 void SetTop(Object*** top) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000083 DCHECK(top >= Start());
84 DCHECK(top <= Limit());
Ben Murdoch3ef787d2012-04-12 10:51:47 +010085 old_top_ = reinterpret_cast<Address*>(top);
86 }
87
88 bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
89 bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
90
91 // Goes through the store buffer removing pointers to things that have
92 // been promoted. Rebuilds the store buffer completely if it overflowed.
93 void SortUniq();
94
95 void EnsureSpace(intptr_t space_needed);
96 void Verify();
97
98 bool PrepareForIteration();
99
100#ifdef DEBUG
101 void Clean();
102 // Slow, for asserts only.
103 bool CellIsInStoreBuffer(Address cell);
104#endif
105
106 void Filter(int flag);
107
108 private:
109 Heap* heap_;
110
111 // The store buffer is divided up into a new buffer that is constantly being
112 // filled by mutator activity and an old buffer that is filled with the data
113 // from the new buffer after compression.
114 Address* start_;
115 Address* limit_;
116
117 Address* old_start_;
118 Address* old_limit_;
119 Address* old_top_;
120 Address* old_reserved_limit_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000121 base::VirtualMemory* old_virtual_memory_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100122
123 bool old_buffer_is_sorted_;
124 bool old_buffer_is_filtered_;
125 bool during_gc_;
126 // The garbage collector iterates over many pointers to new space that are not
127 // handled by the store buffer. This flag indicates whether the pointers
128 // found by the callbacks should be added to the store buffer or not.
129 bool store_buffer_rebuilding_enabled_;
130 StoreBufferCallback callback_;
131 bool may_move_store_buffer_entries_;
132
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000133 base::VirtualMemory* virtual_memory_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100134
135 // Two hash sets used for filtering.
136 // If address is in the hash set then it is guaranteed to be in the
137 // old part of the store buffer.
138 uintptr_t* hash_set_1_;
139 uintptr_t* hash_set_2_;
140 bool hash_sets_are_empty_;
141
142 void ClearFilteringHashSets();
143
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000144 bool SpaceAvailable(intptr_t space_needed);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100145 void Uniq();
146 void ExemptPopularPages(int prime_sample_step, int threshold);
147
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000148 // Set the map field of the object to NULL if contains a map.
149 inline void ClearDeadObject(HeapObject* object);
150
151 void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
152
153 void FindPointersToNewSpaceInRegion(Address start, Address end,
154 ObjectSlotCallback slot_callback,
155 bool clear_maps);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100156
157 // For each region of pointers on a page in use from an old space call
158 // visit_pointer_region callback.
159 // If either visit_pointer_region or callback can cause an allocation
160 // in old space and changes in allocation watermark then
161 // can_preallocate_during_iteration should be set to true.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000162 void IteratePointersOnPage(PagedSpace* space, Page* page,
163 RegionCallback region_callback,
164 ObjectSlotCallback slot_callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100165
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000166 void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
167 bool clear_maps);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100168
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000169#ifdef VERIFY_HEAP
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100170 void VerifyPointers(LargeObjectSpace* space);
171#endif
172
173 friend class StoreBufferRebuildScope;
174 friend class DontMoveStoreBufferEntriesScope;
175};
176
177
178class StoreBufferRebuildScope {
179 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000180 explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100181 StoreBufferCallback callback)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000182 : store_buffer_(store_buffer),
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100183 stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
184 stored_callback_(store_buffer->callback_) {
185 store_buffer_->store_buffer_rebuilding_enabled_ = true;
186 store_buffer_->callback_ = callback;
187 (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
188 }
189
190 ~StoreBufferRebuildScope() {
191 store_buffer_->callback_ = stored_callback_;
192 store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100193 }
194
195 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100196 StoreBuffer* store_buffer_;
197 bool stored_state_;
198 StoreBufferCallback stored_callback_;
199};
200
201
202class DontMoveStoreBufferEntriesScope {
203 public:
204 explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
205 : store_buffer_(store_buffer),
206 stored_state_(store_buffer->may_move_store_buffer_entries_) {
207 store_buffer_->may_move_store_buffer_entries_ = false;
208 }
209
210 ~DontMoveStoreBufferEntriesScope() {
211 store_buffer_->may_move_store_buffer_entries_ = stored_state_;
212 }
213
214 private:
215 StoreBuffer* store_buffer_;
216 bool stored_state_;
217};
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000218}
219} // namespace v8::internal
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100220
221#endif // V8_STORE_BUFFER_H_