blob: 9b8bfc248cfb58ffa2b2fcc30f06ccf4d263909f [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/heap/scavenger.h"
6
7#include "src/contexts.h"
8#include "src/heap/heap.h"
9#include "src/heap/objects-visiting-inl.h"
10#include "src/heap/scavenger-inl.h"
11#include "src/isolate.h"
12#include "src/log.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013
14namespace v8 {
15namespace internal {
16
17enum LoggingAndProfiling {
18 LOGGING_AND_PROFILING_ENABLED,
19 LOGGING_AND_PROFILING_DISABLED
20};
21
22
23enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
24
Ben Murdoch61f157c2016-09-16 13:49:30 +010025template <MarksHandling marks_handling, PromotionMode promotion_mode,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000026 LoggingAndProfiling logging_and_profiling_mode>
27class ScavengingVisitor : public StaticVisitorBase {
28 public:
29 static void Initialize() {
30 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
31 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
32 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
33 table_.Register(kVisitByteArray, &EvacuateByteArray);
34 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
35 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
36 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
37 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
Ben Murdoch61f157c2016-09-16 13:49:30 +010038 table_.Register(kVisitJSArrayBuffer,
39 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000040
41 table_.Register(
42 kVisitNativeContext,
43 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
44 Context::kSize>);
45
46 table_.Register(
47 kVisitConsString,
48 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
49 ConsString::kSize>);
50
51 table_.Register(
52 kVisitSlicedString,
53 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
54 SlicedString::kSize>);
55
56 table_.Register(
57 kVisitSymbol,
58 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
59 Symbol::kSize>);
60
61 table_.Register(
62 kVisitSharedFunctionInfo,
63 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
64 SharedFunctionInfo::kSize>);
65
66 table_.Register(kVisitJSWeakCollection,
67 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
68
69 table_.Register(kVisitJSRegExp,
70 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
71
72 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
73
74 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
75 kVisitDataObject, kVisitDataObjectGeneric>();
76
77 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
78 kVisitJSObject, kVisitJSObjectGeneric>();
79
Ben Murdochc5610432016-08-08 18:44:38 +010080 table_
81 .RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
82 kVisitJSApiObject, kVisitJSApiObjectGeneric>();
83
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000084 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
85 kVisitStruct, kVisitStructGeneric>();
86 }
87
88 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
89 return &table_;
90 }
91
92 private:
93 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
94
95 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
96 bool should_record = false;
97#ifdef DEBUG
98 should_record = FLAG_heap_stats;
99#endif
100 should_record = should_record || FLAG_log_gc;
101 if (should_record) {
102 if (heap->new_space()->Contains(obj)) {
103 heap->new_space()->RecordAllocation(obj);
104 } else {
105 heap->new_space()->RecordPromotion(obj);
106 }
107 }
108 }
109
110 // Helper function used by CopyObject to copy a source object to an
111 // allocated target object and update the forwarding pointer in the source
112 // object. Returns the target object.
113 INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
114 HeapObject* target, int size)) {
115 // If we migrate into to-space, then the to-space top pointer should be
116 // right after the target object. Incorporate double alignment
117 // over-allocation.
118 DCHECK(!heap->InToSpace(target) ||
119 target->address() + size == heap->new_space()->top() ||
120 target->address() + size + kPointerSize == heap->new_space()->top());
121
122 // Make sure that we do not overwrite the promotion queue which is at
123 // the end of to-space.
124 DCHECK(!heap->InToSpace(target) ||
125 heap->promotion_queue()->IsBelowPromotionQueue(
126 heap->new_space()->top()));
127
128 // Copy the content of source to target.
129 heap->CopyBlock(target->address(), source->address(), size);
130
131 // Set the forwarding address.
132 source->set_map_word(MapWord::FromForwardingAddress(target));
133
134 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
135 // Update NewSpace stats if necessary.
136 RecordCopiedObject(heap, target);
137 heap->OnMoveEvent(target, source, size);
138 }
139
140 if (marks_handling == TRANSFER_MARKS) {
141 if (Marking::TransferColor(source, target)) {
142 MemoryChunk::IncrementLiveBytesFromGC(target, size);
143 }
144 }
145 }
146
147 template <AllocationAlignment alignment>
148 static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
149 HeapObject* object, int object_size) {
150 Heap* heap = map->GetHeap();
151
152 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
153 AllocationResult allocation =
154 heap->new_space()->AllocateRaw(object_size, alignment);
155
156 HeapObject* target = NULL; // Initialization to please compiler.
157 if (allocation.To(&target)) {
158 // Order is important here: Set the promotion limit before storing a
159 // filler for double alignment or migrating the object. Otherwise we
160 // may end up overwriting promotion queue entries when we migrate the
161 // object.
162 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
163
164 MigrateObject(heap, object, target, object_size);
165
166 // Update slot to new target.
167 *slot = target;
168
169 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
170 return true;
171 }
172 return false;
173 }
174
175
176 template <ObjectContents object_contents, AllocationAlignment alignment>
177 static inline bool PromoteObject(Map* map, HeapObject** slot,
178 HeapObject* object, int object_size) {
179 Heap* heap = map->GetHeap();
180
181 AllocationResult allocation =
182 heap->old_space()->AllocateRaw(object_size, alignment);
183
184 HeapObject* target = NULL; // Initialization to please compiler.
185 if (allocation.To(&target)) {
186 MigrateObject(heap, object, target, object_size);
187
188 // Update slot to new target.
189 *slot = target;
190
191 if (object_contents == POINTER_OBJECT) {
Ben Murdochda12d292016-06-02 14:46:10 +0100192 heap->promotion_queue()->insert(
193 target, object_size,
194 Marking::IsBlack(Marking::MarkBitFrom(object)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000195 }
196 heap->IncrementPromotedObjectsSize(object_size);
197 return true;
198 }
199 return false;
200 }
201
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000202 template <ObjectContents object_contents, AllocationAlignment alignment>
203 static inline void EvacuateObject(Map* map, HeapObject** slot,
204 HeapObject* object, int object_size) {
205 SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
206 SLOW_DCHECK(object->Size() == object_size);
207 Heap* heap = map->GetHeap();
208
Ben Murdoch61f157c2016-09-16 13:49:30 +0100209 if (!heap->ShouldBePromoted<promotion_mode>(object->address(),
210 object_size)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000211 // A semi-space copy may fail due to fragmentation. In that case, we
212 // try to promote the object.
213 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
214 return;
215 }
216 }
217
218 if (PromoteObject<object_contents, alignment>(map, slot, object,
219 object_size)) {
220 return;
221 }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100222 if (promotion_mode == PROMOTE_MARKED) {
223 FatalProcessOutOfMemory("Scavenger: promoting marked\n");
224 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000225 // If promotion failed, we try to copy the object to the other semi-space
226 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
227
228 FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
229 }
230
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000231 static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
232 HeapObject* object) {
233 ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
234
235 if (marks_handling == IGNORE_MARKS) return;
236
237 MapWord map_word = object->map_word();
238 DCHECK(map_word.IsForwardingAddress());
239 HeapObject* target = map_word.ToForwardingAddress();
240
241 MarkBit mark_bit = Marking::MarkBitFrom(target);
242 if (Marking::IsBlack(mark_bit)) {
243 // This object is black and it might not be rescanned by marker.
244 // We should explicitly record code entry slot for compaction because
Ben Murdochda12d292016-06-02 14:46:10 +0100245 // promotion queue processing (IteratePromotedObjectPointers) will
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000246 // miss it as it is not HeapObject-tagged.
247 Address code_entry_slot =
248 target->address() + JSFunction::kCodeEntryOffset;
249 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
250 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
251 target, code_entry_slot, code);
252 }
253 }
254
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000255 static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
256 HeapObject* object) {
257 int length = reinterpret_cast<FixedArray*>(object)->synchronized_length();
258 int object_size = FixedArray::SizeFor(length);
259 EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
260 object_size);
261 }
262
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000263 static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
264 HeapObject* object) {
265 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
266 int object_size = FixedDoubleArray::SizeFor(length);
267 EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
268 }
269
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000270 static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
271 HeapObject* object) {
272 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
273 EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
274 object_size);
275 }
276
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000277 static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
278 HeapObject* object) {
279 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
280 EvacuateObject<POINTER_OBJECT, kDoubleAligned>(map, slot, object,
281 object_size);
282 }
283
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000284 static inline void EvacuateByteArray(Map* map, HeapObject** slot,
285 HeapObject* object) {
286 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
287 EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
288 }
289
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000290 static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
291 HeapObject* object) {
292 int object_size = SeqOneByteString::cast(object)
293 ->SeqOneByteStringSize(map->instance_type());
294 EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
295 }
296
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000297 static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
298 HeapObject* object) {
299 int object_size = SeqTwoByteString::cast(object)
300 ->SeqTwoByteStringSize(map->instance_type());
301 EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
302 }
303
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000304 static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
305 HeapObject* object) {
306 DCHECK(IsShortcutCandidate(map->instance_type()));
307
308 Heap* heap = map->GetHeap();
309
310 if (marks_handling == IGNORE_MARKS &&
311 ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
312 HeapObject* first =
313 HeapObject::cast(ConsString::cast(object)->unchecked_first());
314
315 *slot = first;
316
317 if (!heap->InNewSpace(first)) {
318 object->set_map_word(MapWord::FromForwardingAddress(first));
319 return;
320 }
321
322 MapWord first_word = first->map_word();
323 if (first_word.IsForwardingAddress()) {
324 HeapObject* target = first_word.ToForwardingAddress();
325
326 *slot = target;
327 object->set_map_word(MapWord::FromForwardingAddress(target));
328 return;
329 }
330
331 Scavenger::ScavengeObjectSlow(slot, first);
332 object->set_map_word(MapWord::FromForwardingAddress(*slot));
333 return;
334 }
335
336 int object_size = ConsString::kSize;
337 EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
338 object_size);
339 }
340
341 template <ObjectContents object_contents>
342 class ObjectEvacuationStrategy {
343 public:
344 template <int object_size>
345 static inline void VisitSpecialized(Map* map, HeapObject** slot,
346 HeapObject* object) {
347 EvacuateObject<object_contents, kWordAligned>(map, slot, object,
348 object_size);
349 }
350
351 static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
352 int object_size = map->instance_size();
353 EvacuateObject<object_contents, kWordAligned>(map, slot, object,
354 object_size);
355 }
356 };
357
358 static VisitorDispatchTable<ScavengingCallback> table_;
359};
360
Ben Murdoch61f157c2016-09-16 13:49:30 +0100361template <MarksHandling marks_handling, PromotionMode promotion_mode,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000362 LoggingAndProfiling logging_and_profiling_mode>
Ben Murdoch61f157c2016-09-16 13:49:30 +0100363VisitorDispatchTable<ScavengingCallback> ScavengingVisitor<
364 marks_handling, promotion_mode, logging_and_profiling_mode>::table_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000365
366// static
367void Scavenger::Initialize() {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100368 ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000369 LOGGING_AND_PROFILING_DISABLED>::Initialize();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100370 ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
371 LOGGING_AND_PROFILING_DISABLED>::Initialize();
372 ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000373 LOGGING_AND_PROFILING_ENABLED>::Initialize();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100374 ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
375 LOGGING_AND_PROFILING_ENABLED>::Initialize();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000376}
377
378
379// static
380void Scavenger::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
381 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
382 MapWord first_word = object->map_word();
383 SLOW_DCHECK(!first_word.IsForwardingAddress());
384 Map* map = first_word.ToMap();
385 Scavenger* scavenger = map->GetHeap()->scavenge_collector_;
386 scavenger->scavenging_visitors_table_.GetVisitor(map)(map, p, object);
387}
388
389
390void Scavenger::SelectScavengingVisitorsTable() {
391 bool logging_and_profiling =
392 FLAG_verify_predictable || isolate()->logger()->is_logging() ||
Ben Murdoch61f157c2016-09-16 13:49:30 +0100393 isolate()->is_profiling() ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000394 (isolate()->heap_profiler() != NULL &&
395 isolate()->heap_profiler()->is_tracking_object_moves());
396
397 if (!heap()->incremental_marking()->IsMarking()) {
398 if (!logging_and_profiling) {
399 scavenging_visitors_table_.CopyFrom(
Ben Murdoch61f157c2016-09-16 13:49:30 +0100400 ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000401 LOGGING_AND_PROFILING_DISABLED>::GetTable());
402 } else {
403 scavenging_visitors_table_.CopyFrom(
Ben Murdoch61f157c2016-09-16 13:49:30 +0100404 ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000405 LOGGING_AND_PROFILING_ENABLED>::GetTable());
406 }
407 } else {
408 if (!logging_and_profiling) {
409 scavenging_visitors_table_.CopyFrom(
Ben Murdoch61f157c2016-09-16 13:49:30 +0100410 ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000411 LOGGING_AND_PROFILING_DISABLED>::GetTable());
412 } else {
413 scavenging_visitors_table_.CopyFrom(
Ben Murdoch61f157c2016-09-16 13:49:30 +0100414 ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000415 LOGGING_AND_PROFILING_ENABLED>::GetTable());
416 }
417
418 if (heap()->incremental_marking()->IsCompacting()) {
419 // When compacting forbid short-circuiting of cons-strings.
420 // Scavenging code relies on the fact that new space object
421 // can't be evacuated into evacuation candidate but
422 // short-circuiting violates this assumption.
423 scavenging_visitors_table_.Register(
424 StaticVisitorBase::kVisitShortcutCandidate,
425 scavenging_visitors_table_.GetVisitorById(
426 StaticVisitorBase::kVisitConsString));
427 }
428 }
429}
430
431
432Isolate* Scavenger::isolate() { return heap()->isolate(); }
433
434
435void ScavengeVisitor::VisitPointer(Object** p) { ScavengePointer(p); }
436
437
438void ScavengeVisitor::VisitPointers(Object** start, Object** end) {
439 // Copy all HeapObject pointers in [start, end)
440 for (Object** p = start; p < end; p++) ScavengePointer(p);
441}
442
443
444void ScavengeVisitor::ScavengePointer(Object** p) {
445 Object* object = *p;
446 if (!heap_->InNewSpace(object)) return;
Ben Murdochc5610432016-08-08 18:44:38 +0100447
Ben Murdoch61f157c2016-09-16 13:49:30 +0100448 if (heap_->PurgeLeftTrimmedObject(p)) return;
449
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000450 Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
451 reinterpret_cast<HeapObject*>(object));
452}
453
454} // namespace internal
455} // namespace v8