Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1 | // Copyright 2011 the V8 project authors. All rights reserved. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 4 | |
| 5 | #ifndef V8_STORE_BUFFER_H_ |
| 6 | #define V8_STORE_BUFFER_H_ |
| 7 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 8 | #include "src/allocation.h" |
| 9 | #include "src/base/logging.h" |
| 10 | #include "src/base/platform/platform.h" |
| 11 | #include "src/globals.h" |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 12 | |
| 13 | namespace v8 { |
| 14 | namespace internal { |
| 15 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 16 | class Page; |
| 17 | class PagedSpace; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 18 | class StoreBuffer; |
| 19 | |
| 20 | typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to); |
| 21 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 22 | // Used to implement the write barrier by collecting addresses of pointers |
| 23 | // between spaces. |
| 24 | class StoreBuffer { |
| 25 | public: |
| 26 | explicit StoreBuffer(Heap* heap); |
| 27 | |
| 28 | static void StoreBufferOverflow(Isolate* isolate); |
| 29 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 30 | void SetUp(); |
| 31 | void TearDown(); |
| 32 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 33 | // This is used to add addresses to the store buffer non-concurrently. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 34 | inline void Mark(Address addr); |
| 35 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 36 | // This is used to add addresses to the store buffer when multiple threads |
| 37 | // may operate on the store buffer. |
| 38 | inline void MarkSynchronized(Address addr); |
| 39 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 40 | // This is used by the heap traversal to enter the addresses into the store |
| 41 | // buffer that should still be in the store buffer after GC. It enters |
| 42 | // addresses directly into the old buffer because the GC starts by wiping the |
| 43 | // old buffer and thereafter only visits each cell once so there is no need |
| 44 | // to attempt to remove any dupes. During the first part of a GC we |
| 45 | // are using the store buffer to access the old spaces and at the same time |
| 46 | // we are rebuilding the store buffer using this function. There is, however |
| 47 | // no issue of overwriting the buffer we are iterating over, because this |
| 48 | // stage of the scavenge can only reduce the number of addresses in the store |
| 49 | // buffer (some objects are promoted so pointers to them do not need to be in |
| 50 | // the store buffer). The later parts of the GC scan the pages that are |
| 51 | // exempt from the store buffer and process the promotion queue. These steps |
| 52 | // can overflow this buffer. We check for this and on overflow we call the |
| 53 | // callback set up with the StoreBufferRebuildScope object. |
| 54 | inline void EnterDirectlyIntoStoreBuffer(Address addr); |
| 55 | |
| 56 | // Iterates over all pointers that go from old space to new space. It will |
| 57 | // delete the store buffer as it starts so the callback should reenter |
| 58 | // surviving old-to-new pointers into the store buffer to rebuild it. |
| 59 | void IteratePointersToNewSpace(ObjectSlotCallback callback); |
| 60 | |
| 61 | static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2); |
| 62 | static const int kStoreBufferSize = kStoreBufferOverflowBit; |
| 63 | static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address); |
| 64 | static const int kOldStoreBufferLength = kStoreBufferLength * 16; |
| 65 | static const int kHashSetLengthLog2 = 12; |
| 66 | static const int kHashSetLength = 1 << kHashSetLengthLog2; |
| 67 | |
| 68 | void Compact(); |
| 69 | |
| 70 | void GCPrologue(); |
| 71 | void GCEpilogue(); |
| 72 | |
| 73 | Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); } |
| 74 | Object*** Start() { return reinterpret_cast<Object***>(old_start_); } |
| 75 | Object*** Top() { return reinterpret_cast<Object***>(old_top_); } |
| 76 | void SetTop(Object*** top) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 77 | DCHECK(top >= Start()); |
| 78 | DCHECK(top <= Limit()); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 79 | old_top_ = reinterpret_cast<Address*>(top); |
| 80 | } |
| 81 | |
| 82 | bool old_buffer_is_sorted() { return old_buffer_is_sorted_; } |
| 83 | bool old_buffer_is_filtered() { return old_buffer_is_filtered_; } |
| 84 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 85 | void EnsureSpace(intptr_t space_needed); |
| 86 | void Verify(); |
| 87 | |
| 88 | bool PrepareForIteration(); |
| 89 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 90 | void Filter(int flag); |
| 91 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 92 | // Eliminates all stale store buffer entries from the store buffer, i.e., |
| 93 | // slots that are not part of live objects anymore. This method must be |
| 94 | // called after marking, when the whole transitive closure is known and |
| 95 | // must be called before sweeping when mark bits are still intact. |
| 96 | void ClearInvalidStoreBufferEntries(); |
| 97 | void VerifyValidStoreBufferEntries(); |
| 98 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 99 | private: |
| 100 | Heap* heap_; |
| 101 | |
| 102 | // The store buffer is divided up into a new buffer that is constantly being |
| 103 | // filled by mutator activity and an old buffer that is filled with the data |
| 104 | // from the new buffer after compression. |
| 105 | Address* start_; |
| 106 | Address* limit_; |
| 107 | |
| 108 | Address* old_start_; |
| 109 | Address* old_limit_; |
| 110 | Address* old_top_; |
| 111 | Address* old_reserved_limit_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 112 | base::VirtualMemory* old_virtual_memory_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 113 | |
| 114 | bool old_buffer_is_sorted_; |
| 115 | bool old_buffer_is_filtered_; |
| 116 | bool during_gc_; |
| 117 | // The garbage collector iterates over many pointers to new space that are not |
| 118 | // handled by the store buffer. This flag indicates whether the pointers |
| 119 | // found by the callbacks should be added to the store buffer or not. |
| 120 | bool store_buffer_rebuilding_enabled_; |
| 121 | StoreBufferCallback callback_; |
| 122 | bool may_move_store_buffer_entries_; |
| 123 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 124 | base::VirtualMemory* virtual_memory_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 125 | |
| 126 | // Two hash sets used for filtering. |
| 127 | // If address is in the hash set then it is guaranteed to be in the |
| 128 | // old part of the store buffer. |
| 129 | uintptr_t* hash_set_1_; |
| 130 | uintptr_t* hash_set_2_; |
| 131 | bool hash_sets_are_empty_; |
| 132 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 133 | // Used for synchronization of concurrent store buffer access. |
| 134 | base::Mutex mutex_; |
| 135 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 136 | void ClearFilteringHashSets(); |
| 137 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 138 | bool SpaceAvailable(intptr_t space_needed); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 139 | void ExemptPopularPages(int prime_sample_step, int threshold); |
| 140 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 141 | void ProcessOldToNewSlot(Address slot_address, |
| 142 | ObjectSlotCallback slot_callback); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 143 | |
| 144 | void FindPointersToNewSpaceInRegion(Address start, Address end, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 145 | ObjectSlotCallback slot_callback); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 146 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 147 | void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 148 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 149 | #ifdef VERIFY_HEAP |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 150 | void VerifyPointers(LargeObjectSpace* space); |
| 151 | #endif |
| 152 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 153 | friend class DontMoveStoreBufferEntriesScope; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 154 | friend class FindPointersToNewSpaceVisitor; |
| 155 | friend class StoreBufferRebuildScope; |
| 156 | }; |
| 157 | |
| 158 | |
| 159 | class StoreBufferRebuilder { |
| 160 | public: |
| 161 | explicit StoreBufferRebuilder(StoreBuffer* store_buffer) |
| 162 | : store_buffer_(store_buffer) {} |
| 163 | |
| 164 | void Callback(MemoryChunk* page, StoreBufferEvent event); |
| 165 | |
| 166 | private: |
| 167 | StoreBuffer* store_buffer_; |
| 168 | |
| 169 | // We record in this variable how full the store buffer was when we started |
| 170 | // iterating over the current page, finding pointers to new space. If the |
| 171 | // store buffer overflows again we can exempt the page from the store buffer |
| 172 | // by rewinding to this point instead of having to search the store buffer. |
| 173 | Object*** start_of_current_page_; |
| 174 | // The current page we are scanning in the store buffer iterator. |
| 175 | MemoryChunk* current_page_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 176 | }; |
| 177 | |
| 178 | |
| 179 | class StoreBufferRebuildScope { |
| 180 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 181 | explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 182 | StoreBufferCallback callback) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 183 | : store_buffer_(store_buffer), |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 184 | stored_state_(store_buffer->store_buffer_rebuilding_enabled_), |
| 185 | stored_callback_(store_buffer->callback_) { |
| 186 | store_buffer_->store_buffer_rebuilding_enabled_ = true; |
| 187 | store_buffer_->callback_ = callback; |
| 188 | (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent); |
| 189 | } |
| 190 | |
| 191 | ~StoreBufferRebuildScope() { |
| 192 | store_buffer_->callback_ = stored_callback_; |
| 193 | store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | private: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 197 | StoreBuffer* store_buffer_; |
| 198 | bool stored_state_; |
| 199 | StoreBufferCallback stored_callback_; |
| 200 | }; |
| 201 | |
| 202 | |
| 203 | class DontMoveStoreBufferEntriesScope { |
| 204 | public: |
| 205 | explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer) |
| 206 | : store_buffer_(store_buffer), |
| 207 | stored_state_(store_buffer->may_move_store_buffer_entries_) { |
| 208 | store_buffer_->may_move_store_buffer_entries_ = false; |
| 209 | } |
| 210 | |
| 211 | ~DontMoveStoreBufferEntriesScope() { |
| 212 | store_buffer_->may_move_store_buffer_entries_ = stored_state_; |
| 213 | } |
| 214 | |
| 215 | private: |
| 216 | StoreBuffer* store_buffer_; |
| 217 | bool stored_state_; |
| 218 | }; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 219 | } // namespace internal |
| 220 | } // namespace v8 |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 221 | |
| 222 | #endif // V8_STORE_BUFFER_H_ |