blob: 520cbc01626b698607928dbd83638ba220096fb2 [file] [log] [blame]
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_STORE_BUFFER_H_
29#define V8_STORE_BUFFER_H_
30
31#include "allocation.h"
32#include "checks.h"
33#include "globals.h"
34#include "platform.h"
35#include "v8globals.h"
36
37namespace v8 {
38namespace internal {
39
danno@chromium.orgca29dd82013-04-26 11:59:48 +000040class Page;
41class PagedSpace;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000042class StoreBuffer;
43
44typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
45
46typedef void (StoreBuffer::*RegionCallback)(
47 Address start, Address end, ObjectSlotCallback slot_callback);
48
49// Used to implement the write barrier by collecting addresses of pointers
50// between spaces.
51class StoreBuffer {
52 public:
53 explicit StoreBuffer(Heap* heap);
54
55 static void StoreBufferOverflow(Isolate* isolate);
56
57 inline Address TopAddress();
58
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +000059 void SetUp();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000060 void TearDown();
61
62 // This is used by the mutator to enter addresses into the store buffer.
63 inline void Mark(Address addr);
64
65 // This is used by the heap traversal to enter the addresses into the store
66 // buffer that should still be in the store buffer after GC. It enters
67 // addresses directly into the old buffer because the GC starts by wiping the
68 // old buffer and thereafter only visits each cell once so there is no need
69 // to attempt to remove any dupes. During the first part of a GC we
70 // are using the store buffer to access the old spaces and at the same time
71 // we are rebuilding the store buffer using this function. There is, however
72 // no issue of overwriting the buffer we are iterating over, because this
73 // stage of the scavenge can only reduce the number of addresses in the store
74 // buffer (some objects are promoted so pointers to them do not need to be in
75 // the store buffer). The later parts of the GC scan the pages that are
76 // exempt from the store buffer and process the promotion queue. These steps
77 // can overflow this buffer. We check for this and on overflow we call the
78 // callback set up with the StoreBufferRebuildScope object.
79 inline void EnterDirectlyIntoStoreBuffer(Address addr);
80
81 // Iterates over all pointers that go from old space to new space. It will
82 // delete the store buffer as it starts so the callback should reenter
83 // surviving old-to-new pointers into the store buffer to rebuild it.
84 void IteratePointersToNewSpace(ObjectSlotCallback callback);
85
danno@chromium.orgc612e022011-11-10 11:38:15 +000086 static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000087 static const int kStoreBufferSize = kStoreBufferOverflowBit;
88 static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
89 static const int kOldStoreBufferLength = kStoreBufferLength * 16;
vegorov@chromium.orgb2957762012-01-05 11:49:16 +000090 static const int kHashSetLengthLog2 = 12;
91 static const int kHashSetLength = 1 << kHashSetLengthLog2;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000092
93 void Compact();
94
95 void GCPrologue();
96 void GCEpilogue();
97
98 Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
99 Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
100 Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
101 void SetTop(Object*** top) {
102 ASSERT(top >= Start());
103 ASSERT(top <= Limit());
104 old_top_ = reinterpret_cast<Address*>(top);
105 }
106
107 bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
108 bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
109
110 // Goes through the store buffer removing pointers to things that have
111 // been promoted. Rebuilds the store buffer completely if it overflowed.
112 void SortUniq();
113
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000114 void EnsureSpace(intptr_t space_needed);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000115 void Verify();
116
117 bool PrepareForIteration();
118
119#ifdef DEBUG
120 void Clean();
121 // Slow, for asserts only.
122 bool CellIsInStoreBuffer(Address cell);
123#endif
124
125 void Filter(int flag);
126
127 private:
128 Heap* heap_;
129
130 // The store buffer is divided up into a new buffer that is constantly being
131 // filled by mutator activity and an old buffer that is filled with the data
132 // from the new buffer after compression.
133 Address* start_;
134 Address* limit_;
135
136 Address* old_start_;
137 Address* old_limit_;
138 Address* old_top_;
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000139 Address* old_reserved_limit_;
140 VirtualMemory* old_virtual_memory_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000141
142 bool old_buffer_is_sorted_;
143 bool old_buffer_is_filtered_;
144 bool during_gc_;
145 // The garbage collector iterates over many pointers to new space that are not
146 // handled by the store buffer. This flag indicates whether the pointers
147 // found by the callbacks should be added to the store buffer or not.
148 bool store_buffer_rebuilding_enabled_;
149 StoreBufferCallback callback_;
150 bool may_move_store_buffer_entries_;
151
152 VirtualMemory* virtual_memory_;
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000153
154 // Two hash sets used for filtering.
155 // If address is in the hash set then it is guaranteed to be in the
156 // old part of the store buffer.
157 uintptr_t* hash_set_1_;
158 uintptr_t* hash_set_2_;
159 bool hash_sets_are_empty_;
160
161 void ClearFilteringHashSets();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000162
danno@chromium.org41728482013-06-12 22:31:22 +0000163 bool SpaceAvailable(intptr_t space_needed);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000164 void Uniq();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000165 void ExemptPopularPages(int prime_sample_step, int threshold);
166
167 void FindPointersToNewSpaceInRegion(Address start,
168 Address end,
169 ObjectSlotCallback slot_callback);
170
171 // For each region of pointers on a page in use from an old space call
172 // visit_pointer_region callback.
173 // If either visit_pointer_region or callback can cause an allocation
174 // in old space and changes in allocation watermark then
175 // can_preallocate_during_iteration should be set to true.
176 void IteratePointersOnPage(
177 PagedSpace* space,
178 Page* page,
179 RegionCallback region_callback,
180 ObjectSlotCallback slot_callback);
181
182 void FindPointersToNewSpaceInMaps(
183 Address start,
184 Address end,
185 ObjectSlotCallback slot_callback);
186
187 void FindPointersToNewSpaceInMapsRegion(
188 Address start,
189 Address end,
190 ObjectSlotCallback slot_callback);
191
192 void FindPointersToNewSpaceOnPage(
193 PagedSpace* space,
194 Page* page,
195 RegionCallback region_callback,
196 ObjectSlotCallback slot_callback);
197
198 void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
199
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000200#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000201 void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
202 void VerifyPointers(LargeObjectSpace* space);
203#endif
204
205 friend class StoreBufferRebuildScope;
206 friend class DontMoveStoreBufferEntriesScope;
207};
208
209
210class StoreBufferRebuildScope {
211 public:
212 explicit StoreBufferRebuildScope(Heap* heap,
213 StoreBuffer* store_buffer,
214 StoreBufferCallback callback)
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000215 : store_buffer_(store_buffer),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000216 stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
217 stored_callback_(store_buffer->callback_) {
218 store_buffer_->store_buffer_rebuilding_enabled_ = true;
219 store_buffer_->callback_ = callback;
220 (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
221 }
222
223 ~StoreBufferRebuildScope() {
224 store_buffer_->callback_ = stored_callback_;
225 store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000226 }
227
228 private:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000229 StoreBuffer* store_buffer_;
230 bool stored_state_;
231 StoreBufferCallback stored_callback_;
232};
233
234
235class DontMoveStoreBufferEntriesScope {
236 public:
237 explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
238 : store_buffer_(store_buffer),
239 stored_state_(store_buffer->may_move_store_buffer_entries_) {
240 store_buffer_->may_move_store_buffer_entries_ = false;
241 }
242
243 ~DontMoveStoreBufferEntriesScope() {
244 store_buffer_->may_move_store_buffer_entries_ = stored_state_;
245 }
246
247 private:
248 StoreBuffer* store_buffer_;
249 bool stored_state_;
250};
251
252} } // namespace v8::internal
253
254#endif // V8_STORE_BUFFER_H_