blob: 01e7cbeb8d2a439784617f20a16a61793df14459 [file] [log] [blame]
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_STORE_BUFFER_H_
29#define V8_STORE_BUFFER_H_
30
31#include "allocation.h"
32#include "checks.h"
33#include "globals.h"
34#include "platform.h"
35#include "v8globals.h"
36
37namespace v8 {
38namespace internal {
39
danno@chromium.orgca29dd82013-04-26 11:59:48 +000040class Page;
41class PagedSpace;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000042class StoreBuffer;
43
44typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
45
danno@chromium.org169691d2013-07-15 08:01:13 +000046typedef void (StoreBuffer::*RegionCallback)(Address start,
47 Address end,
48 ObjectSlotCallback slot_callback,
49 bool clear_maps);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000050
51// Used to implement the write barrier by collecting addresses of pointers
52// between spaces.
53class StoreBuffer {
54 public:
55 explicit StoreBuffer(Heap* heap);
56
57 static void StoreBufferOverflow(Isolate* isolate);
58
59 inline Address TopAddress();
60
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +000061 void SetUp();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000062 void TearDown();
63
64 // This is used by the mutator to enter addresses into the store buffer.
65 inline void Mark(Address addr);
66
67 // This is used by the heap traversal to enter the addresses into the store
68 // buffer that should still be in the store buffer after GC. It enters
69 // addresses directly into the old buffer because the GC starts by wiping the
70 // old buffer and thereafter only visits each cell once so there is no need
71 // to attempt to remove any dupes. During the first part of a GC we
72 // are using the store buffer to access the old spaces and at the same time
73 // we are rebuilding the store buffer using this function. There is, however
74 // no issue of overwriting the buffer we are iterating over, because this
75 // stage of the scavenge can only reduce the number of addresses in the store
76 // buffer (some objects are promoted so pointers to them do not need to be in
77 // the store buffer). The later parts of the GC scan the pages that are
78 // exempt from the store buffer and process the promotion queue. These steps
79 // can overflow this buffer. We check for this and on overflow we call the
80 // callback set up with the StoreBufferRebuildScope object.
81 inline void EnterDirectlyIntoStoreBuffer(Address addr);
82
83 // Iterates over all pointers that go from old space to new space. It will
84 // delete the store buffer as it starts so the callback should reenter
85 // surviving old-to-new pointers into the store buffer to rebuild it.
86 void IteratePointersToNewSpace(ObjectSlotCallback callback);
87
danno@chromium.org169691d2013-07-15 08:01:13 +000088 // Same as IteratePointersToNewSpace but additonally clears maps in objects
89 // referenced from the store buffer that do not contain a forwarding pointer.
90 void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
91
danno@chromium.orgc612e022011-11-10 11:38:15 +000092 static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000093 static const int kStoreBufferSize = kStoreBufferOverflowBit;
94 static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
95 static const int kOldStoreBufferLength = kStoreBufferLength * 16;
vegorov@chromium.orgb2957762012-01-05 11:49:16 +000096 static const int kHashSetLengthLog2 = 12;
97 static const int kHashSetLength = 1 << kHashSetLengthLog2;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000098
99 void Compact();
100
101 void GCPrologue();
102 void GCEpilogue();
103
104 Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
105 Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
106 Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
107 void SetTop(Object*** top) {
108 ASSERT(top >= Start());
109 ASSERT(top <= Limit());
110 old_top_ = reinterpret_cast<Address*>(top);
111 }
112
113 bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
114 bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
115
116 // Goes through the store buffer removing pointers to things that have
117 // been promoted. Rebuilds the store buffer completely if it overflowed.
118 void SortUniq();
119
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000120 void EnsureSpace(intptr_t space_needed);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000121 void Verify();
122
123 bool PrepareForIteration();
124
125#ifdef DEBUG
126 void Clean();
127 // Slow, for asserts only.
128 bool CellIsInStoreBuffer(Address cell);
129#endif
130
131 void Filter(int flag);
132
133 private:
134 Heap* heap_;
135
136 // The store buffer is divided up into a new buffer that is constantly being
137 // filled by mutator activity and an old buffer that is filled with the data
138 // from the new buffer after compression.
139 Address* start_;
140 Address* limit_;
141
142 Address* old_start_;
143 Address* old_limit_;
144 Address* old_top_;
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000145 Address* old_reserved_limit_;
146 VirtualMemory* old_virtual_memory_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000147
148 bool old_buffer_is_sorted_;
149 bool old_buffer_is_filtered_;
150 bool during_gc_;
151 // The garbage collector iterates over many pointers to new space that are not
152 // handled by the store buffer. This flag indicates whether the pointers
153 // found by the callbacks should be added to the store buffer or not.
154 bool store_buffer_rebuilding_enabled_;
155 StoreBufferCallback callback_;
156 bool may_move_store_buffer_entries_;
157
158 VirtualMemory* virtual_memory_;
vegorov@chromium.orgb2957762012-01-05 11:49:16 +0000159
160 // Two hash sets used for filtering.
161 // If address is in the hash set then it is guaranteed to be in the
162 // old part of the store buffer.
163 uintptr_t* hash_set_1_;
164 uintptr_t* hash_set_2_;
165 bool hash_sets_are_empty_;
166
167 void ClearFilteringHashSets();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000168
danno@chromium.org41728482013-06-12 22:31:22 +0000169 bool SpaceAvailable(intptr_t space_needed);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000170 void Uniq();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000171 void ExemptPopularPages(int prime_sample_step, int threshold);
172
danno@chromium.org169691d2013-07-15 08:01:13 +0000173 // Set the map field of the object to NULL if contains a map.
174 inline void ClearDeadObject(HeapObject *object);
175
176 void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
177
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000178 void FindPointersToNewSpaceInRegion(Address start,
179 Address end,
danno@chromium.org169691d2013-07-15 08:01:13 +0000180 ObjectSlotCallback slot_callback,
181 bool clear_maps);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000182
183 // For each region of pointers on a page in use from an old space call
184 // visit_pointer_region callback.
185 // If either visit_pointer_region or callback can cause an allocation
186 // in old space and changes in allocation watermark then
187 // can_preallocate_during_iteration should be set to true.
188 void IteratePointersOnPage(
189 PagedSpace* space,
190 Page* page,
191 RegionCallback region_callback,
192 ObjectSlotCallback slot_callback);
193
194 void FindPointersToNewSpaceInMaps(
195 Address start,
196 Address end,
danno@chromium.org169691d2013-07-15 08:01:13 +0000197 ObjectSlotCallback slot_callback,
198 bool clear_maps);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000199
200 void FindPointersToNewSpaceInMapsRegion(
201 Address start,
202 Address end,
danno@chromium.org169691d2013-07-15 08:01:13 +0000203 ObjectSlotCallback slot_callback,
204 bool clear_maps);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000205
206 void FindPointersToNewSpaceOnPage(
207 PagedSpace* space,
208 Page* page,
209 RegionCallback region_callback,
danno@chromium.org169691d2013-07-15 08:01:13 +0000210 ObjectSlotCallback slot_callback,
211 bool clear_maps);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000212
danno@chromium.org169691d2013-07-15 08:01:13 +0000213 void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
214 bool clear_maps);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000215
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000216#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000217 void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
218 void VerifyPointers(LargeObjectSpace* space);
219#endif
220
221 friend class StoreBufferRebuildScope;
222 friend class DontMoveStoreBufferEntriesScope;
223};
224
225
226class StoreBufferRebuildScope {
227 public:
228 explicit StoreBufferRebuildScope(Heap* heap,
229 StoreBuffer* store_buffer,
230 StoreBufferCallback callback)
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000231 : store_buffer_(store_buffer),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000232 stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
233 stored_callback_(store_buffer->callback_) {
234 store_buffer_->store_buffer_rebuilding_enabled_ = true;
235 store_buffer_->callback_ = callback;
236 (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
237 }
238
239 ~StoreBufferRebuildScope() {
240 store_buffer_->callback_ = stored_callback_;
241 store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000242 }
243
244 private:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000245 StoreBuffer* store_buffer_;
246 bool stored_state_;
247 StoreBufferCallback stored_callback_;
248};
249
250
251class DontMoveStoreBufferEntriesScope {
252 public:
253 explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
254 : store_buffer_(store_buffer),
255 stored_state_(store_buffer->may_move_store_buffer_entries_) {
256 store_buffer_->may_move_store_buffer_entries_ = false;
257 }
258
259 ~DontMoveStoreBufferEntriesScope() {
260 store_buffer_->may_move_store_buffer_entries_ = stored_state_;
261 }
262
263 private:
264 StoreBuffer* store_buffer_;
265 bool stored_state_;
266};
267
268} } // namespace v8::internal
269
270#endif // V8_STORE_BUFFER_H_