Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1 | // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "src/heap/scavenger.h" |
| 6 | |
| 7 | #include "src/contexts.h" |
| 8 | #include "src/heap/heap.h" |
| 9 | #include "src/heap/objects-visiting-inl.h" |
| 10 | #include "src/heap/scavenger-inl.h" |
| 11 | #include "src/isolate.h" |
| 12 | #include "src/log.h" |
| 13 | #include "src/profiler/cpu-profiler.h" |
| 14 | |
| 15 | namespace v8 { |
| 16 | namespace internal { |
| 17 | |
| 18 | enum LoggingAndProfiling { |
| 19 | LOGGING_AND_PROFILING_ENABLED, |
| 20 | LOGGING_AND_PROFILING_DISABLED |
| 21 | }; |
| 22 | |
| 23 | |
| 24 | enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS }; |
| 25 | |
| 26 | |
| 27 | template <MarksHandling marks_handling, |
| 28 | LoggingAndProfiling logging_and_profiling_mode> |
| 29 | class ScavengingVisitor : public StaticVisitorBase { |
| 30 | public: |
| 31 | static void Initialize() { |
| 32 | table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString); |
| 33 | table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString); |
| 34 | table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate); |
| 35 | table_.Register(kVisitByteArray, &EvacuateByteArray); |
| 36 | table_.Register(kVisitFixedArray, &EvacuateFixedArray); |
| 37 | table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray); |
| 38 | table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray); |
| 39 | table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array); |
| 40 | table_.Register(kVisitJSArrayBuffer, &EvacuateJSArrayBuffer); |
| 41 | |
| 42 | table_.Register( |
| 43 | kVisitNativeContext, |
| 44 | &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< |
| 45 | Context::kSize>); |
| 46 | |
| 47 | table_.Register( |
| 48 | kVisitConsString, |
| 49 | &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< |
| 50 | ConsString::kSize>); |
| 51 | |
| 52 | table_.Register( |
| 53 | kVisitSlicedString, |
| 54 | &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< |
| 55 | SlicedString::kSize>); |
| 56 | |
| 57 | table_.Register( |
| 58 | kVisitSymbol, |
| 59 | &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< |
| 60 | Symbol::kSize>); |
| 61 | |
| 62 | table_.Register( |
| 63 | kVisitSharedFunctionInfo, |
| 64 | &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized< |
| 65 | SharedFunctionInfo::kSize>); |
| 66 | |
| 67 | table_.Register(kVisitJSWeakCollection, |
| 68 | &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit); |
| 69 | |
| 70 | table_.Register(kVisitJSRegExp, |
| 71 | &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit); |
| 72 | |
| 73 | table_.Register(kVisitJSFunction, &EvacuateJSFunction); |
| 74 | |
| 75 | table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, |
| 76 | kVisitDataObject, kVisitDataObjectGeneric>(); |
| 77 | |
| 78 | table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, |
| 79 | kVisitJSObject, kVisitJSObjectGeneric>(); |
| 80 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 81 | table_ |
| 82 | .RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, |
| 83 | kVisitJSApiObject, kVisitJSApiObjectGeneric>(); |
| 84 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 85 | table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, |
| 86 | kVisitStruct, kVisitStructGeneric>(); |
| 87 | } |
| 88 | |
| 89 | static VisitorDispatchTable<ScavengingCallback>* GetTable() { |
| 90 | return &table_; |
| 91 | } |
| 92 | |
| 93 | private: |
| 94 | enum ObjectContents { DATA_OBJECT, POINTER_OBJECT }; |
| 95 | |
| 96 | static void RecordCopiedObject(Heap* heap, HeapObject* obj) { |
| 97 | bool should_record = false; |
| 98 | #ifdef DEBUG |
| 99 | should_record = FLAG_heap_stats; |
| 100 | #endif |
| 101 | should_record = should_record || FLAG_log_gc; |
| 102 | if (should_record) { |
| 103 | if (heap->new_space()->Contains(obj)) { |
| 104 | heap->new_space()->RecordAllocation(obj); |
| 105 | } else { |
| 106 | heap->new_space()->RecordPromotion(obj); |
| 107 | } |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | // Helper function used by CopyObject to copy a source object to an |
| 112 | // allocated target object and update the forwarding pointer in the source |
| 113 | // object. Returns the target object. |
| 114 | INLINE(static void MigrateObject(Heap* heap, HeapObject* source, |
| 115 | HeapObject* target, int size)) { |
| 116 | // If we migrate into to-space, then the to-space top pointer should be |
| 117 | // right after the target object. Incorporate double alignment |
| 118 | // over-allocation. |
| 119 | DCHECK(!heap->InToSpace(target) || |
| 120 | target->address() + size == heap->new_space()->top() || |
| 121 | target->address() + size + kPointerSize == heap->new_space()->top()); |
| 122 | |
| 123 | // Make sure that we do not overwrite the promotion queue which is at |
| 124 | // the end of to-space. |
| 125 | DCHECK(!heap->InToSpace(target) || |
| 126 | heap->promotion_queue()->IsBelowPromotionQueue( |
| 127 | heap->new_space()->top())); |
| 128 | |
| 129 | // Copy the content of source to target. |
| 130 | heap->CopyBlock(target->address(), source->address(), size); |
| 131 | |
| 132 | // Set the forwarding address. |
| 133 | source->set_map_word(MapWord::FromForwardingAddress(target)); |
| 134 | |
| 135 | if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) { |
| 136 | // Update NewSpace stats if necessary. |
| 137 | RecordCopiedObject(heap, target); |
| 138 | heap->OnMoveEvent(target, source, size); |
| 139 | } |
| 140 | |
| 141 | if (marks_handling == TRANSFER_MARKS) { |
| 142 | if (Marking::TransferColor(source, target)) { |
| 143 | MemoryChunk::IncrementLiveBytesFromGC(target, size); |
| 144 | } |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | template <AllocationAlignment alignment> |
| 149 | static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot, |
| 150 | HeapObject* object, int object_size) { |
| 151 | Heap* heap = map->GetHeap(); |
| 152 | |
| 153 | DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE)); |
| 154 | AllocationResult allocation = |
| 155 | heap->new_space()->AllocateRaw(object_size, alignment); |
| 156 | |
| 157 | HeapObject* target = NULL; // Initialization to please compiler. |
| 158 | if (allocation.To(&target)) { |
| 159 | // Order is important here: Set the promotion limit before storing a |
| 160 | // filler for double alignment or migrating the object. Otherwise we |
| 161 | // may end up overwriting promotion queue entries when we migrate the |
| 162 | // object. |
| 163 | heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); |
| 164 | |
| 165 | MigrateObject(heap, object, target, object_size); |
| 166 | |
| 167 | // Update slot to new target. |
| 168 | *slot = target; |
| 169 | |
| 170 | heap->IncrementSemiSpaceCopiedObjectSize(object_size); |
| 171 | return true; |
| 172 | } |
| 173 | return false; |
| 174 | } |
| 175 | |
| 176 | |
| 177 | template <ObjectContents object_contents, AllocationAlignment alignment> |
| 178 | static inline bool PromoteObject(Map* map, HeapObject** slot, |
| 179 | HeapObject* object, int object_size) { |
| 180 | Heap* heap = map->GetHeap(); |
| 181 | |
| 182 | AllocationResult allocation = |
| 183 | heap->old_space()->AllocateRaw(object_size, alignment); |
| 184 | |
| 185 | HeapObject* target = NULL; // Initialization to please compiler. |
| 186 | if (allocation.To(&target)) { |
| 187 | MigrateObject(heap, object, target, object_size); |
| 188 | |
| 189 | // Update slot to new target. |
| 190 | *slot = target; |
| 191 | |
| 192 | if (object_contents == POINTER_OBJECT) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 193 | heap->promotion_queue()->insert( |
| 194 | target, object_size, |
| 195 | Marking::IsBlack(Marking::MarkBitFrom(object))); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 196 | } |
| 197 | heap->IncrementPromotedObjectsSize(object_size); |
| 198 | return true; |
| 199 | } |
| 200 | return false; |
| 201 | } |
| 202 | |
| 203 | |
| 204 | template <ObjectContents object_contents, AllocationAlignment alignment> |
| 205 | static inline void EvacuateObject(Map* map, HeapObject** slot, |
| 206 | HeapObject* object, int object_size) { |
| 207 | SLOW_DCHECK(object_size <= Page::kAllocatableMemory); |
| 208 | SLOW_DCHECK(object->Size() == object_size); |
| 209 | Heap* heap = map->GetHeap(); |
| 210 | |
| 211 | if (!heap->ShouldBePromoted(object->address(), object_size)) { |
| 212 | // A semi-space copy may fail due to fragmentation. In that case, we |
| 213 | // try to promote the object. |
| 214 | if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) { |
| 215 | return; |
| 216 | } |
| 217 | } |
| 218 | |
| 219 | if (PromoteObject<object_contents, alignment>(map, slot, object, |
| 220 | object_size)) { |
| 221 | return; |
| 222 | } |
| 223 | |
| 224 | // If promotion failed, we try to copy the object to the other semi-space |
| 225 | if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return; |
| 226 | |
| 227 | FatalProcessOutOfMemory("Scavenger: semi-space copy\n"); |
| 228 | } |
| 229 | |
| 230 | |
| 231 | static inline void EvacuateJSFunction(Map* map, HeapObject** slot, |
| 232 | HeapObject* object) { |
| 233 | ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object); |
| 234 | |
| 235 | if (marks_handling == IGNORE_MARKS) return; |
| 236 | |
| 237 | MapWord map_word = object->map_word(); |
| 238 | DCHECK(map_word.IsForwardingAddress()); |
| 239 | HeapObject* target = map_word.ToForwardingAddress(); |
| 240 | |
| 241 | MarkBit mark_bit = Marking::MarkBitFrom(target); |
| 242 | if (Marking::IsBlack(mark_bit)) { |
| 243 | // This object is black and it might not be rescanned by marker. |
| 244 | // We should explicitly record code entry slot for compaction because |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 245 | // promotion queue processing (IteratePromotedObjectPointers) will |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 246 | // miss it as it is not HeapObject-tagged. |
| 247 | Address code_entry_slot = |
| 248 | target->address() + JSFunction::kCodeEntryOffset; |
| 249 | Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot)); |
| 250 | map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot( |
| 251 | target, code_entry_slot, code); |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | |
| 256 | static inline void EvacuateFixedArray(Map* map, HeapObject** slot, |
| 257 | HeapObject* object) { |
| 258 | int length = reinterpret_cast<FixedArray*>(object)->synchronized_length(); |
| 259 | int object_size = FixedArray::SizeFor(length); |
| 260 | EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object, |
| 261 | object_size); |
| 262 | } |
| 263 | |
| 264 | |
| 265 | static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot, |
| 266 | HeapObject* object) { |
| 267 | int length = reinterpret_cast<FixedDoubleArray*>(object)->length(); |
| 268 | int object_size = FixedDoubleArray::SizeFor(length); |
| 269 | EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size); |
| 270 | } |
| 271 | |
| 272 | |
| 273 | static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot, |
| 274 | HeapObject* object) { |
| 275 | int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size(); |
| 276 | EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object, |
| 277 | object_size); |
| 278 | } |
| 279 | |
| 280 | |
| 281 | static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot, |
| 282 | HeapObject* object) { |
| 283 | int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size(); |
| 284 | EvacuateObject<POINTER_OBJECT, kDoubleAligned>(map, slot, object, |
| 285 | object_size); |
| 286 | } |
| 287 | |
| 288 | |
| 289 | static inline void EvacuateJSArrayBuffer(Map* map, HeapObject** slot, |
| 290 | HeapObject* object) { |
| 291 | ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object); |
| 292 | |
| 293 | Heap* heap = map->GetHeap(); |
| 294 | MapWord map_word = object->map_word(); |
| 295 | DCHECK(map_word.IsForwardingAddress()); |
| 296 | HeapObject* target = map_word.ToForwardingAddress(); |
| 297 | if (!heap->InNewSpace(target)) { |
| 298 | heap->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target)); |
| 299 | } |
| 300 | } |
| 301 | |
| 302 | |
| 303 | static inline void EvacuateByteArray(Map* map, HeapObject** slot, |
| 304 | HeapObject* object) { |
| 305 | int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize(); |
| 306 | EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size); |
| 307 | } |
| 308 | |
| 309 | |
| 310 | static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot, |
| 311 | HeapObject* object) { |
| 312 | int object_size = SeqOneByteString::cast(object) |
| 313 | ->SeqOneByteStringSize(map->instance_type()); |
| 314 | EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size); |
| 315 | } |
| 316 | |
| 317 | |
| 318 | static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot, |
| 319 | HeapObject* object) { |
| 320 | int object_size = SeqTwoByteString::cast(object) |
| 321 | ->SeqTwoByteStringSize(map->instance_type()); |
| 322 | EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size); |
| 323 | } |
| 324 | |
| 325 | |
| 326 | static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot, |
| 327 | HeapObject* object) { |
| 328 | DCHECK(IsShortcutCandidate(map->instance_type())); |
| 329 | |
| 330 | Heap* heap = map->GetHeap(); |
| 331 | |
| 332 | if (marks_handling == IGNORE_MARKS && |
| 333 | ConsString::cast(object)->unchecked_second() == heap->empty_string()) { |
| 334 | HeapObject* first = |
| 335 | HeapObject::cast(ConsString::cast(object)->unchecked_first()); |
| 336 | |
| 337 | *slot = first; |
| 338 | |
| 339 | if (!heap->InNewSpace(first)) { |
| 340 | object->set_map_word(MapWord::FromForwardingAddress(first)); |
| 341 | return; |
| 342 | } |
| 343 | |
| 344 | MapWord first_word = first->map_word(); |
| 345 | if (first_word.IsForwardingAddress()) { |
| 346 | HeapObject* target = first_word.ToForwardingAddress(); |
| 347 | |
| 348 | *slot = target; |
| 349 | object->set_map_word(MapWord::FromForwardingAddress(target)); |
| 350 | return; |
| 351 | } |
| 352 | |
| 353 | Scavenger::ScavengeObjectSlow(slot, first); |
| 354 | object->set_map_word(MapWord::FromForwardingAddress(*slot)); |
| 355 | return; |
| 356 | } |
| 357 | |
| 358 | int object_size = ConsString::kSize; |
| 359 | EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object, |
| 360 | object_size); |
| 361 | } |
| 362 | |
| 363 | template <ObjectContents object_contents> |
| 364 | class ObjectEvacuationStrategy { |
| 365 | public: |
| 366 | template <int object_size> |
| 367 | static inline void VisitSpecialized(Map* map, HeapObject** slot, |
| 368 | HeapObject* object) { |
| 369 | EvacuateObject<object_contents, kWordAligned>(map, slot, object, |
| 370 | object_size); |
| 371 | } |
| 372 | |
| 373 | static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) { |
| 374 | int object_size = map->instance_size(); |
| 375 | EvacuateObject<object_contents, kWordAligned>(map, slot, object, |
| 376 | object_size); |
| 377 | } |
| 378 | }; |
| 379 | |
| 380 | static VisitorDispatchTable<ScavengingCallback> table_; |
| 381 | }; |
| 382 | |
| 383 | |
| 384 | template <MarksHandling marks_handling, |
| 385 | LoggingAndProfiling logging_and_profiling_mode> |
| 386 | VisitorDispatchTable<ScavengingCallback> |
| 387 | ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_; |
| 388 | |
| 389 | |
| 390 | // static |
| 391 | void Scavenger::Initialize() { |
| 392 | ScavengingVisitor<TRANSFER_MARKS, |
| 393 | LOGGING_AND_PROFILING_DISABLED>::Initialize(); |
| 394 | ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize(); |
| 395 | ScavengingVisitor<TRANSFER_MARKS, |
| 396 | LOGGING_AND_PROFILING_ENABLED>::Initialize(); |
| 397 | ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize(); |
| 398 | } |
| 399 | |
| 400 | |
| 401 | // static |
| 402 | void Scavenger::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { |
| 403 | SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object)); |
| 404 | MapWord first_word = object->map_word(); |
| 405 | SLOW_DCHECK(!first_word.IsForwardingAddress()); |
| 406 | Map* map = first_word.ToMap(); |
| 407 | Scavenger* scavenger = map->GetHeap()->scavenge_collector_; |
| 408 | scavenger->scavenging_visitors_table_.GetVisitor(map)(map, p, object); |
| 409 | } |
| 410 | |
| 411 | |
| 412 | void Scavenger::SelectScavengingVisitorsTable() { |
| 413 | bool logging_and_profiling = |
| 414 | FLAG_verify_predictable || isolate()->logger()->is_logging() || |
| 415 | isolate()->cpu_profiler()->is_profiling() || |
| 416 | (isolate()->heap_profiler() != NULL && |
| 417 | isolate()->heap_profiler()->is_tracking_object_moves()); |
| 418 | |
| 419 | if (!heap()->incremental_marking()->IsMarking()) { |
| 420 | if (!logging_and_profiling) { |
| 421 | scavenging_visitors_table_.CopyFrom( |
| 422 | ScavengingVisitor<IGNORE_MARKS, |
| 423 | LOGGING_AND_PROFILING_DISABLED>::GetTable()); |
| 424 | } else { |
| 425 | scavenging_visitors_table_.CopyFrom( |
| 426 | ScavengingVisitor<IGNORE_MARKS, |
| 427 | LOGGING_AND_PROFILING_ENABLED>::GetTable()); |
| 428 | } |
| 429 | } else { |
| 430 | if (!logging_and_profiling) { |
| 431 | scavenging_visitors_table_.CopyFrom( |
| 432 | ScavengingVisitor<TRANSFER_MARKS, |
| 433 | LOGGING_AND_PROFILING_DISABLED>::GetTable()); |
| 434 | } else { |
| 435 | scavenging_visitors_table_.CopyFrom( |
| 436 | ScavengingVisitor<TRANSFER_MARKS, |
| 437 | LOGGING_AND_PROFILING_ENABLED>::GetTable()); |
| 438 | } |
| 439 | |
| 440 | if (heap()->incremental_marking()->IsCompacting()) { |
| 441 | // When compacting forbid short-circuiting of cons-strings. |
| 442 | // Scavenging code relies on the fact that new space object |
| 443 | // can't be evacuated into evacuation candidate but |
| 444 | // short-circuiting violates this assumption. |
| 445 | scavenging_visitors_table_.Register( |
| 446 | StaticVisitorBase::kVisitShortcutCandidate, |
| 447 | scavenging_visitors_table_.GetVisitorById( |
| 448 | StaticVisitorBase::kVisitConsString)); |
| 449 | } |
| 450 | } |
| 451 | } |
| 452 | |
| 453 | |
| 454 | Isolate* Scavenger::isolate() { return heap()->isolate(); } |
| 455 | |
| 456 | |
| 457 | void ScavengeVisitor::VisitPointer(Object** p) { ScavengePointer(p); } |
| 458 | |
| 459 | |
| 460 | void ScavengeVisitor::VisitPointers(Object** start, Object** end) { |
| 461 | // Copy all HeapObject pointers in [start, end) |
| 462 | for (Object** p = start; p < end; p++) ScavengePointer(p); |
| 463 | } |
| 464 | |
| 465 | |
| 466 | void ScavengeVisitor::ScavengePointer(Object** p) { |
| 467 | Object* object = *p; |
| 468 | if (!heap_->InNewSpace(object)) return; |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 469 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 470 | Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p), |
| 471 | reinterpret_cast<HeapObject*>(object)); |
| 472 | } |
| 473 | |
| 474 | } // namespace internal |
| 475 | } // namespace v8 |