blob: 9eeb00117b8509cd9082b2fdc9f61ee9692ad718 [file] [log] [blame]
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01004
5#ifndef V8_STORE_BUFFER_H_
6#define V8_STORE_BUFFER_H_
7
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008#include "src/allocation.h"
9#include "src/base/logging.h"
10#include "src/base/platform/platform.h"
11#include "src/globals.h"
Ben Murdoch3ef787d2012-04-12 10:51:47 +010012
13namespace v8 {
14namespace internal {
15
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016class Page;
17class PagedSpace;
Ben Murdoch3ef787d2012-04-12 10:51:47 +010018class StoreBuffer;
19
20typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
21
Ben Murdoch3ef787d2012-04-12 10:51:47 +010022// Used to implement the write barrier by collecting addresses of pointers
23// between spaces.
24class StoreBuffer {
25 public:
26 explicit StoreBuffer(Heap* heap);
27
28 static void StoreBufferOverflow(Isolate* isolate);
29
Ben Murdoch3ef787d2012-04-12 10:51:47 +010030 void SetUp();
31 void TearDown();
32
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000033 // This is used to add addresses to the store buffer non-concurrently.
Ben Murdoch3ef787d2012-04-12 10:51:47 +010034 inline void Mark(Address addr);
35
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000036 // This is used to add addresses to the store buffer when multiple threads
37 // may operate on the store buffer.
38 inline void MarkSynchronized(Address addr);
39
Ben Murdoch3ef787d2012-04-12 10:51:47 +010040 // This is used by the heap traversal to enter the addresses into the store
41 // buffer that should still be in the store buffer after GC. It enters
42 // addresses directly into the old buffer because the GC starts by wiping the
43 // old buffer and thereafter only visits each cell once so there is no need
44 // to attempt to remove any dupes. During the first part of a GC we
45 // are using the store buffer to access the old spaces and at the same time
46 // we are rebuilding the store buffer using this function. There is, however
47 // no issue of overwriting the buffer we are iterating over, because this
48 // stage of the scavenge can only reduce the number of addresses in the store
49 // buffer (some objects are promoted so pointers to them do not need to be in
50 // the store buffer). The later parts of the GC scan the pages that are
51 // exempt from the store buffer and process the promotion queue. These steps
52 // can overflow this buffer. We check for this and on overflow we call the
53 // callback set up with the StoreBufferRebuildScope object.
54 inline void EnterDirectlyIntoStoreBuffer(Address addr);
55
56 // Iterates over all pointers that go from old space to new space. It will
57 // delete the store buffer as it starts so the callback should reenter
58 // surviving old-to-new pointers into the store buffer to rebuild it.
59 void IteratePointersToNewSpace(ObjectSlotCallback callback);
60
61 static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
62 static const int kStoreBufferSize = kStoreBufferOverflowBit;
63 static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
64 static const int kOldStoreBufferLength = kStoreBufferLength * 16;
65 static const int kHashSetLengthLog2 = 12;
66 static const int kHashSetLength = 1 << kHashSetLengthLog2;
67
68 void Compact();
69
70 void GCPrologue();
71 void GCEpilogue();
72
73 Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
74 Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
75 Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
76 void SetTop(Object*** top) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000077 DCHECK(top >= Start());
78 DCHECK(top <= Limit());
Ben Murdoch3ef787d2012-04-12 10:51:47 +010079 old_top_ = reinterpret_cast<Address*>(top);
80 }
81
82 bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
83 bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
84
Ben Murdoch3ef787d2012-04-12 10:51:47 +010085 void EnsureSpace(intptr_t space_needed);
86 void Verify();
87
88 bool PrepareForIteration();
89
Ben Murdoch3ef787d2012-04-12 10:51:47 +010090 void Filter(int flag);
91
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000092 // Eliminates all stale store buffer entries from the store buffer, i.e.,
93 // slots that are not part of live objects anymore. This method must be
94 // called after marking, when the whole transitive closure is known and
95 // must be called before sweeping when mark bits are still intact.
96 void ClearInvalidStoreBufferEntries();
97 void VerifyValidStoreBufferEntries();
98
Ben Murdoch3ef787d2012-04-12 10:51:47 +010099 private:
100 Heap* heap_;
101
102 // The store buffer is divided up into a new buffer that is constantly being
103 // filled by mutator activity and an old buffer that is filled with the data
104 // from the new buffer after compression.
105 Address* start_;
106 Address* limit_;
107
108 Address* old_start_;
109 Address* old_limit_;
110 Address* old_top_;
111 Address* old_reserved_limit_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000112 base::VirtualMemory* old_virtual_memory_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100113
114 bool old_buffer_is_sorted_;
115 bool old_buffer_is_filtered_;
116 bool during_gc_;
117 // The garbage collector iterates over many pointers to new space that are not
118 // handled by the store buffer. This flag indicates whether the pointers
119 // found by the callbacks should be added to the store buffer or not.
120 bool store_buffer_rebuilding_enabled_;
121 StoreBufferCallback callback_;
122 bool may_move_store_buffer_entries_;
123
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000124 base::VirtualMemory* virtual_memory_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100125
126 // Two hash sets used for filtering.
127 // If address is in the hash set then it is guaranteed to be in the
128 // old part of the store buffer.
129 uintptr_t* hash_set_1_;
130 uintptr_t* hash_set_2_;
131 bool hash_sets_are_empty_;
132
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000133 // Used for synchronization of concurrent store buffer access.
134 base::Mutex mutex_;
135
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100136 void ClearFilteringHashSets();
137
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000138 bool SpaceAvailable(intptr_t space_needed);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100139 void ExemptPopularPages(int prime_sample_step, int threshold);
140
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000141 void ProcessOldToNewSlot(Address slot_address,
142 ObjectSlotCallback slot_callback);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000143
144 void FindPointersToNewSpaceInRegion(Address start, Address end,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000145 ObjectSlotCallback slot_callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100146
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000147 void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100148
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000149#ifdef VERIFY_HEAP
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100150 void VerifyPointers(LargeObjectSpace* space);
151#endif
152
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100153 friend class DontMoveStoreBufferEntriesScope;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000154 friend class FindPointersToNewSpaceVisitor;
155 friend class StoreBufferRebuildScope;
156};
157
158
159class StoreBufferRebuilder {
160 public:
161 explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
162 : store_buffer_(store_buffer) {}
163
164 void Callback(MemoryChunk* page, StoreBufferEvent event);
165
166 private:
167 StoreBuffer* store_buffer_;
168
169 // We record in this variable how full the store buffer was when we started
170 // iterating over the current page, finding pointers to new space. If the
171 // store buffer overflows again we can exempt the page from the store buffer
172 // by rewinding to this point instead of having to search the store buffer.
173 Object*** start_of_current_page_;
174 // The current page we are scanning in the store buffer iterator.
175 MemoryChunk* current_page_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100176};
177
178
179class StoreBufferRebuildScope {
180 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000181 explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100182 StoreBufferCallback callback)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000183 : store_buffer_(store_buffer),
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100184 stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
185 stored_callback_(store_buffer->callback_) {
186 store_buffer_->store_buffer_rebuilding_enabled_ = true;
187 store_buffer_->callback_ = callback;
188 (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
189 }
190
191 ~StoreBufferRebuildScope() {
192 store_buffer_->callback_ = stored_callback_;
193 store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100194 }
195
196 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100197 StoreBuffer* store_buffer_;
198 bool stored_state_;
199 StoreBufferCallback stored_callback_;
200};
201
202
203class DontMoveStoreBufferEntriesScope {
204 public:
205 explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
206 : store_buffer_(store_buffer),
207 stored_state_(store_buffer->may_move_store_buffer_entries_) {
208 store_buffer_->may_move_store_buffer_entries_ = false;
209 }
210
211 ~DontMoveStoreBufferEntriesScope() {
212 store_buffer_->may_move_store_buffer_entries_ = stored_state_;
213 }
214
215 private:
216 StoreBuffer* store_buffer_;
217 bool stored_state_;
218};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000219} // namespace internal
220} // namespace v8
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100221
222#endif // V8_STORE_BUFFER_H_