blob: c59a8d3d373eeb6e43d66c9c1cfa03313add697f [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/heap.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/accessors.h"
8#include "src/api.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/ast/scopeinfo.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/base/bits.h"
11#include "src/base/once.h"
12#include "src/base/utils/random-number-generator.h"
13#include "src/bootstrapper.h"
14#include "src/codegen.h"
15#include "src/compilation-cache.h"
16#include "src/conversions.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000017#include "src/debug/debug.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000018#include "src/deoptimizer.h"
19#include "src/global-handles.h"
Ben Murdoch61f157c2016-09-16 13:49:30 +010020#include "src/heap/array-buffer-tracker-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000021#include "src/heap/gc-idle-time-handler.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000022#include "src/heap/gc-tracer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000023#include "src/heap/incremental-marking.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000024#include "src/heap/mark-compact-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000025#include "src/heap/mark-compact.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000026#include "src/heap/memory-reducer.h"
27#include "src/heap/object-stats.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000028#include "src/heap/objects-visiting-inl.h"
29#include "src/heap/objects-visiting.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010030#include "src/heap/remembered-set.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031#include "src/heap/scavenge-job.h"
32#include "src/heap/scavenger-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000033#include "src/heap/store-buffer.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000034#include "src/interpreter/interpreter.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000035#include "src/regexp/jsregexp.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036#include "src/runtime-profiler.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037#include "src/snapshot/natives.h"
Ben Murdochda12d292016-06-02 14:46:10 +010038#include "src/snapshot/serializer-common.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000039#include "src/snapshot/snapshot.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010040#include "src/tracing/trace-event.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000041#include "src/type-feedback-vector.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000042#include "src/utils.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000043#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000044#include "src/v8threads.h"
45#include "src/vm-state-inl.h"
46
Ben Murdochb8a8cc12014-11-26 15:28:44 +000047namespace v8 {
48namespace internal {
49
50
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000051struct Heap::StrongRootsList {
52 Object** start;
53 Object** end;
54 StrongRootsList* next;
55};
56
Ben Murdoch097c5b22016-05-18 11:27:45 +010057class IdleScavengeObserver : public AllocationObserver {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000058 public:
59 IdleScavengeObserver(Heap& heap, intptr_t step_size)
Ben Murdoch097c5b22016-05-18 11:27:45 +010060 : AllocationObserver(step_size), heap_(heap) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000061
62 void Step(int bytes_allocated, Address, size_t) override {
63 heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
64 }
65
66 private:
67 Heap& heap_;
68};
69
Ben Murdochb8a8cc12014-11-26 15:28:44 +000070Heap::Heap()
Ben Murdoch61f157c2016-09-16 13:49:30 +010071 : external_memory_(0),
72 external_memory_limit_(kExternalAllocationLimit),
73 external_memory_at_last_mark_compact_(0),
Ben Murdochc5610432016-08-08 18:44:38 +010074 isolate_(nullptr),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000075 code_range_size_(0),
76 // semispace_size_ should be a power of 2 and old_generation_size_ should
77 // be a multiple of Page::kPageSize.
Ben Murdochb8a8cc12014-11-26 15:28:44 +000078 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
79 initial_semispace_size_(Page::kPageSize),
80 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000081 initial_old_generation_size_(max_old_generation_size_ /
82 kInitalOldGenerationLimitFactor),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040083 old_generation_size_configured_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000084 max_executable_size_(256ul * (kPointerSize / 4) * MB),
85 // Variables set based on semispace_size_ and old_generation_size_ in
86 // ConfigureHeap.
87 // Will be 4 * reserved_semispace_size_ to ensure that young
88 // generation can be aligned to its size.
89 maximum_committed_(0),
90 survived_since_last_expansion_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040091 survived_last_scavenge_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000092 always_allocate_scope_count_(0),
Ben Murdochda12d292016-06-02 14:46:10 +010093 memory_pressure_level_(MemoryPressureLevel::kNone),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000094 contexts_disposed_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000095 number_of_disposed_maps_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000096 global_ic_age_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000097 new_space_(this),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000098 old_space_(NULL),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000099 code_space_(NULL),
100 map_space_(NULL),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000101 lo_space_(NULL),
102 gc_state_(NOT_IN_GC),
103 gc_post_processing_depth_(0),
104 allocations_count_(0),
105 raw_allocations_hash_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000106 ms_count_(0),
107 gc_count_(0),
108 remembered_unmapped_pages_index_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000109#ifdef DEBUG
110 allocation_timeout_(0),
111#endif // DEBUG
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400112 old_generation_allocation_limit_(initial_old_generation_size_),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000113 old_gen_exhausted_(false),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000114 optimize_for_memory_usage_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000115 inline_allocation_disabled_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000116 total_regexp_code_generated_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000117 tracer_(nullptr),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000118 high_survival_rate_period_length_(0),
119 promoted_objects_size_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400120 promotion_ratio_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000121 semi_space_copied_object_size_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400122 previous_semi_space_copied_object_size_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000123 semi_space_copied_rate_(0),
124 nodes_died_in_new_space_(0),
125 nodes_copied_in_new_space_(0),
126 nodes_promoted_(0),
127 maximum_size_scavenges_(0),
128 max_gc_pause_(0.0),
129 total_gc_time_ms_(0.0),
130 max_alive_after_gc_(0),
131 min_in_mutator_(kMaxInt),
132 marking_time_(0.0),
133 sweeping_time_(0.0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400134 last_idle_notification_time_(0.0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000135 last_gc_time_(0.0),
136 scavenge_collector_(nullptr),
137 mark_compact_collector_(nullptr),
Ben Murdochc5610432016-08-08 18:44:38 +0100138 memory_allocator_(nullptr),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000139 store_buffer_(this),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000140 incremental_marking_(nullptr),
141 gc_idle_time_handler_(nullptr),
142 memory_reducer_(nullptr),
143 object_stats_(nullptr),
144 scavenge_job_(nullptr),
145 idle_scavenge_observer_(nullptr),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000146 full_codegen_bytes_generated_(0),
147 crankshaft_codegen_bytes_generated_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000148 new_space_allocation_counter_(0),
149 old_generation_allocation_counter_(0),
150 old_generation_size_at_last_gc_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000151 gcs_since_last_deopt_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000152 global_pretenuring_feedback_(nullptr),
153 ring_buffer_full_(false),
154 ring_buffer_end_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000155 promotion_queue_(this),
156 configured_(false),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000157 current_gc_flags_(Heap::kNoGCFlags),
158 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000159 external_string_table_(this),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400160 gc_callbacks_depth_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000161 deserialization_complete_(false),
162 strong_roots_list_(NULL),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000163 heap_iterator_depth_(0),
164 force_oom_(false) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000165// Allow build-time customization of the max semispace size. Building
166// V8 with snapshots and a non-default max semispace size is much
167// easier if you can define it as part of the build environment.
168#if defined(V8_MAX_SEMISPACE_SIZE)
169 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
170#endif
171
172 // Ensure old_generation_size_ is a multiple of kPageSize.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000173 DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000174
175 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
176 set_native_contexts_list(NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000177 set_allocation_sites_list(Smi::FromInt(0));
178 set_encountered_weak_collections(Smi::FromInt(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400179 set_encountered_weak_cells(Smi::FromInt(0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000180 set_encountered_transition_arrays(Smi::FromInt(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000181 // Put a dummy entry in the remembered pages so we can find the list the
182 // minidump even if there are no real unmapped pages.
183 RememberUnmappedPage(NULL, false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000184}
185
186
187intptr_t Heap::Capacity() {
188 if (!HasBeenSetUp()) return 0;
189
Ben Murdochc5610432016-08-08 18:44:38 +0100190 return new_space_.Capacity() + OldGenerationCapacity();
191}
192
193intptr_t Heap::OldGenerationCapacity() {
194 if (!HasBeenSetUp()) return 0;
195
196 return old_space_->Capacity() + code_space_->Capacity() +
197 map_space_->Capacity() + lo_space_->SizeOfObjects();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000198}
199
200
201intptr_t Heap::CommittedOldGenerationMemory() {
202 if (!HasBeenSetUp()) return 0;
203
204 return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
205 map_space_->CommittedMemory() + lo_space_->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000206}
207
208
209intptr_t Heap::CommittedMemory() {
210 if (!HasBeenSetUp()) return 0;
211
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000212 return new_space_.CommittedMemory() + CommittedOldGenerationMemory();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000213}
214
215
216size_t Heap::CommittedPhysicalMemory() {
217 if (!HasBeenSetUp()) return 0;
218
219 return new_space_.CommittedPhysicalMemory() +
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000220 old_space_->CommittedPhysicalMemory() +
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000221 code_space_->CommittedPhysicalMemory() +
222 map_space_->CommittedPhysicalMemory() +
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000223 lo_space_->CommittedPhysicalMemory();
224}
225
226
227intptr_t Heap::CommittedMemoryExecutable() {
228 if (!HasBeenSetUp()) return 0;
229
Ben Murdochc5610432016-08-08 18:44:38 +0100230 return memory_allocator()->SizeExecutable();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000231}
232
233
234void Heap::UpdateMaximumCommitted() {
235 if (!HasBeenSetUp()) return;
236
237 intptr_t current_committed_memory = CommittedMemory();
238 if (current_committed_memory > maximum_committed_) {
239 maximum_committed_ = current_committed_memory;
240 }
241}
242
243
244intptr_t Heap::Available() {
245 if (!HasBeenSetUp()) return 0;
246
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000247 intptr_t total = 0;
248 AllSpaces spaces(this);
249 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
250 total += space->Available();
251 }
252 return total;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000253}
254
255
256bool Heap::HasBeenSetUp() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000257 return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
258 lo_space_ != NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000259}
260
261
262GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
263 const char** reason) {
264 // Is global GC requested?
265 if (space != NEW_SPACE) {
266 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
267 *reason = "GC in old space requested";
268 return MARK_COMPACTOR;
269 }
270
271 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
272 *reason = "GC in old space forced by flags";
273 return MARK_COMPACTOR;
274 }
275
276 // Is enough data promoted to justify a global GC?
277 if (OldGenerationAllocationLimitReached()) {
278 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
279 *reason = "promotion limit reached";
280 return MARK_COMPACTOR;
281 }
282
283 // Have allocation in OLD and LO failed?
284 if (old_gen_exhausted_) {
285 isolate_->counters()
286 ->gc_compactor_caused_by_oldspace_exhaustion()
287 ->Increment();
288 *reason = "old generations exhausted";
289 return MARK_COMPACTOR;
290 }
291
292 // Is there enough space left in OLD to guarantee that a scavenge can
293 // succeed?
294 //
295 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
296 // for object promotion. It counts only the bytes that the memory
297 // allocator has not yet allocated from the OS and assigned to any space,
298 // and does not count available bytes already in the old space or code
299 // space. Undercounting is safe---we may get an unrequested full GC when
300 // a scavenge would have succeeded.
Ben Murdochc5610432016-08-08 18:44:38 +0100301 if (memory_allocator()->MaxAvailable() <= new_space_.Size()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000302 isolate_->counters()
303 ->gc_compactor_caused_by_oldspace_exhaustion()
304 ->Increment();
305 *reason = "scavenge might not succeed";
306 return MARK_COMPACTOR;
307 }
308
309 // Default
310 *reason = NULL;
311 return SCAVENGER;
312}
313
314
315// TODO(1238405): Combine the infrastructure for --heap-stats and
316// --log-gc to avoid the complicated preprocessor and flag testing.
317void Heap::ReportStatisticsBeforeGC() {
318// Heap::ReportHeapStatistics will also log NewSpace statistics when
319// compiled --log-gc is set. The following logic is used to avoid
320// double logging.
321#ifdef DEBUG
322 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
323 if (FLAG_heap_stats) {
324 ReportHeapStatistics("Before GC");
325 } else if (FLAG_log_gc) {
326 new_space_.ReportStatistics();
327 }
328 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
329#else
330 if (FLAG_log_gc) {
331 new_space_.CollectStatistics();
332 new_space_.ReportStatistics();
333 new_space_.ClearHistograms();
334 }
335#endif // DEBUG
336}
337
338
339void Heap::PrintShortHeapStatistics() {
340 if (!FLAG_trace_gc_verbose) return;
Ben Murdochc5610432016-08-08 18:44:38 +0100341 PrintIsolate(isolate_, "Memory allocator, used: %6" V8PRIdPTR
342 " KB, available: %6" V8PRIdPTR " KB\n",
343 memory_allocator()->Size() / KB,
344 memory_allocator()->Available() / KB);
345 PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR
346 " KB"
347 ", available: %6" V8PRIdPTR
348 " KB"
349 ", committed: %6" V8PRIdPTR " KB\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000350 new_space_.Size() / KB, new_space_.Available() / KB,
351 new_space_.CommittedMemory() / KB);
Ben Murdochc5610432016-08-08 18:44:38 +0100352 PrintIsolate(isolate_, "Old space, used: %6" V8PRIdPTR
353 " KB"
354 ", available: %6" V8PRIdPTR
355 " KB"
356 ", committed: %6" V8PRIdPTR " KB\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000357 old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
358 old_space_->CommittedMemory() / KB);
Ben Murdochc5610432016-08-08 18:44:38 +0100359 PrintIsolate(isolate_, "Code space, used: %6" V8PRIdPTR
360 " KB"
361 ", available: %6" V8PRIdPTR
362 " KB"
363 ", committed: %6" V8PRIdPTR " KB\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000364 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
365 code_space_->CommittedMemory() / KB);
Ben Murdochc5610432016-08-08 18:44:38 +0100366 PrintIsolate(isolate_, "Map space, used: %6" V8PRIdPTR
367 " KB"
368 ", available: %6" V8PRIdPTR
369 " KB"
370 ", committed: %6" V8PRIdPTR " KB\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000371 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
372 map_space_->CommittedMemory() / KB);
Ben Murdochc5610432016-08-08 18:44:38 +0100373 PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR
374 " KB"
375 ", available: %6" V8PRIdPTR
376 " KB"
377 ", committed: %6" V8PRIdPTR " KB\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000378 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
379 lo_space_->CommittedMemory() / KB);
Ben Murdochc5610432016-08-08 18:44:38 +0100380 PrintIsolate(isolate_, "All spaces, used: %6" V8PRIdPTR
381 " KB"
382 ", available: %6" V8PRIdPTR
383 " KB"
384 ", committed: %6" V8PRIdPTR " KB\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000385 this->SizeOfObjects() / KB, this->Available() / KB,
386 this->CommittedMemory() / KB);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100387 PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
388 static_cast<intptr_t>(external_memory_ / KB));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000389 PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
390 total_gc_time_ms_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000391}
392
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000393// TODO(1238405): Combine the infrastructure for --heap-stats and
394// --log-gc to avoid the complicated preprocessor and flag testing.
395void Heap::ReportStatisticsAfterGC() {
396// Similar to the before GC, we use some complicated logic to ensure that
397// NewSpace statistics are logged exactly once when --log-gc is turned on.
398#if defined(DEBUG)
399 if (FLAG_heap_stats) {
400 new_space_.CollectStatistics();
401 ReportHeapStatistics("After GC");
402 } else if (FLAG_log_gc) {
403 new_space_.ReportStatistics();
404 }
405#else
406 if (FLAG_log_gc) new_space_.ReportStatistics();
407#endif // DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000408 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
409 ++i) {
410 int count = deferred_counters_[i];
411 deferred_counters_[i] = 0;
412 while (count > 0) {
413 count--;
414 isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
415 }
416 }
417}
418
419
420void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
421 deferred_counters_[feature]++;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000422}
423
424
425void Heap::GarbageCollectionPrologue() {
426 {
427 AllowHeapAllocation for_the_first_part_of_prologue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000428 gc_count_++;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000429
430#ifdef VERIFY_HEAP
431 if (FLAG_verify_heap) {
432 Verify();
433 }
434#endif
435 }
436
437 // Reset GC statistics.
438 promoted_objects_size_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400439 previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000440 semi_space_copied_object_size_ = 0;
441 nodes_died_in_new_space_ = 0;
442 nodes_copied_in_new_space_ = 0;
443 nodes_promoted_ = 0;
444
445 UpdateMaximumCommitted();
446
447#ifdef DEBUG
448 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
449
450 if (FLAG_gc_verbose) Print();
451
452 ReportStatisticsBeforeGC();
453#endif // DEBUG
454
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000455 if (new_space_.IsAtMaximumCapacity()) {
456 maximum_size_scavenges_++;
457 } else {
458 maximum_size_scavenges_ = 0;
459 }
460 CheckNewSpaceExpansionCriteria();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000461 UpdateNewSpaceAllocationCounter();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100462 store_buffer()->MoveEntriesToRememberedSet();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000463}
464
465
466intptr_t Heap::SizeOfObjects() {
467 intptr_t total = 0;
468 AllSpaces spaces(this);
469 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
470 total += space->SizeOfObjects();
471 }
472 return total;
473}
474
475
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000476const char* Heap::GetSpaceName(int idx) {
477 switch (idx) {
478 case NEW_SPACE:
479 return "new_space";
480 case OLD_SPACE:
481 return "old_space";
482 case MAP_SPACE:
483 return "map_space";
484 case CODE_SPACE:
485 return "code_space";
486 case LO_SPACE:
487 return "large_object_space";
488 default:
489 UNREACHABLE();
490 }
491 return nullptr;
492}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000493
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000494
495void Heap::RepairFreeListsAfterDeserialization() {
496 PagedSpaces spaces(this);
497 for (PagedSpace* space = spaces.next(); space != NULL;
498 space = spaces.next()) {
499 space->RepairFreeListsAfterDeserialization();
500 }
501}
502
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000503void Heap::MergeAllocationSitePretenuringFeedback(
Ben Murdoch61f157c2016-09-16 13:49:30 +0100504 const base::HashMap& local_pretenuring_feedback) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000505 AllocationSite* site = nullptr;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100506 for (base::HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000507 local_entry != nullptr;
508 local_entry = local_pretenuring_feedback.Next(local_entry)) {
509 site = reinterpret_cast<AllocationSite*>(local_entry->key);
510 MapWord map_word = site->map_word();
511 if (map_word.IsForwardingAddress()) {
512 site = AllocationSite::cast(map_word.ToForwardingAddress());
513 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100514
515 // We have not validated the allocation site yet, since we have not
516 // dereferenced the site during collecting information.
517 // This is an inlined check of AllocationMemento::IsValid.
518 if (!site->IsAllocationSite() || site->IsZombie()) continue;
519
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000520 int value =
521 static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
522 DCHECK_GT(value, 0);
523
Ben Murdoch097c5b22016-05-18 11:27:45 +0100524 if (site->IncrementMementoFoundCount(value)) {
525 global_pretenuring_feedback_->LookupOrInsert(site,
526 ObjectHash(site->address()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000527 }
528 }
529}
530
531
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000532class Heap::PretenuringScope {
533 public:
534 explicit PretenuringScope(Heap* heap) : heap_(heap) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100535 heap_->global_pretenuring_feedback_ = new base::HashMap(
536 base::HashMap::PointersMatch, kInitialFeedbackCapacity);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000537 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000538
539 ~PretenuringScope() {
540 delete heap_->global_pretenuring_feedback_;
541 heap_->global_pretenuring_feedback_ = nullptr;
542 }
543
544 private:
545 Heap* heap_;
546};
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000547
548
549void Heap::ProcessPretenuringFeedback() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000550 bool trigger_deoptimization = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000551 if (FLAG_allocation_site_pretenuring) {
552 int tenure_decisions = 0;
553 int dont_tenure_decisions = 0;
554 int allocation_mementos_found = 0;
555 int allocation_sites = 0;
556 int active_allocation_sites = 0;
557
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000558 AllocationSite* site = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000559
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000560 // Step 1: Digest feedback for recorded allocation sites.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000561 bool maximum_size_scavenge = MaximumSizeScavenge();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100562 for (base::HashMap::Entry* e = global_pretenuring_feedback_->Start();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000563 e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100564 allocation_sites++;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000565 site = reinterpret_cast<AllocationSite*>(e->key);
566 int found_count = site->memento_found_count();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100567 // An entry in the storage does not imply that the count is > 0 because
568 // allocation sites might have been reset due to too many objects dying
569 // in old space.
570 if (found_count > 0) {
571 DCHECK(site->IsAllocationSite());
572 active_allocation_sites++;
573 allocation_mementos_found += found_count;
574 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
575 trigger_deoptimization = true;
576 }
577 if (site->GetPretenureMode() == TENURED) {
578 tenure_decisions++;
579 } else {
580 dont_tenure_decisions++;
581 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000582 }
583 }
584
585 // Step 2: Deopt maybe tenured allocation sites if necessary.
586 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
587 if (deopt_maybe_tenured) {
588 Object* list_element = allocation_sites_list();
589 while (list_element->IsAllocationSite()) {
590 site = AllocationSite::cast(list_element);
591 DCHECK(site->IsAllocationSite());
592 allocation_sites++;
593 if (site->IsMaybeTenure()) {
594 site->set_deopt_dependent_code(true);
595 trigger_deoptimization = true;
596 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000597 list_element = site->weak_next();
598 }
599 }
600
601 if (trigger_deoptimization) {
602 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
603 }
604
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000605 if (FLAG_trace_pretenuring_statistics &&
606 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
607 dont_tenure_decisions > 0)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000608 PrintIsolate(isolate(),
609 "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
610 "active_sites=%d "
611 "mementos=%d tenured=%d not_tenured=%d\n",
612 deopt_maybe_tenured ? 1 : 0, allocation_sites,
613 active_allocation_sites, allocation_mementos_found,
614 tenure_decisions, dont_tenure_decisions);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000615 }
616 }
617}
618
619
620void Heap::DeoptMarkedAllocationSites() {
621 // TODO(hpayer): If iterating over the allocation sites list becomes a
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000622 // performance issue, use a cache data structure in heap instead.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000623 Object* list_element = allocation_sites_list();
624 while (list_element->IsAllocationSite()) {
625 AllocationSite* site = AllocationSite::cast(list_element);
626 if (site->deopt_dependent_code()) {
627 site->dependent_code()->MarkCodeForDeoptimization(
628 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
629 site->set_deopt_dependent_code(false);
630 }
631 list_element = site->weak_next();
632 }
633 Deoptimizer::DeoptimizeMarkedCode(isolate_);
634}
635
636
637void Heap::GarbageCollectionEpilogue() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000638 // In release mode, we only zap the from space under heap verification.
639 if (Heap::ShouldZapGarbage()) {
640 ZapFromSpace();
641 }
642
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000643#ifdef VERIFY_HEAP
644 if (FLAG_verify_heap) {
645 Verify();
646 }
647#endif
648
649 AllowHeapAllocation for_the_rest_of_the_epilogue;
650
651#ifdef DEBUG
652 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
653 if (FLAG_print_handles) PrintHandles();
654 if (FLAG_gc_verbose) Print();
655 if (FLAG_code_stats) ReportCodeStatistics("After GC");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000656 if (FLAG_check_handle_count) CheckHandleCount();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000657#endif
658 if (FLAG_deopt_every_n_garbage_collections > 0) {
659 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
660 // the topmost optimized frame can be deoptimized safely, because it
661 // might not have a lazy bailout point right after its current PC.
662 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
663 Deoptimizer::DeoptimizeAll(isolate());
664 gcs_since_last_deopt_ = 0;
665 }
666 }
667
668 UpdateMaximumCommitted();
669
670 isolate_->counters()->alive_after_last_gc()->Set(
671 static_cast<int>(SizeOfObjects()));
672
673 isolate_->counters()->string_table_capacity()->Set(
674 string_table()->Capacity());
675 isolate_->counters()->number_of_symbols()->Set(
676 string_table()->NumberOfElements());
677
678 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
679 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
680 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
681 (crankshaft_codegen_bytes_generated_ +
682 full_codegen_bytes_generated_)));
683 }
684
685 if (CommittedMemory() > 0) {
686 isolate_->counters()->external_fragmentation_total()->AddSample(
687 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
688
689 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
690 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000691 isolate_->counters()->heap_fraction_old_space()->AddSample(static_cast<int>(
692 (old_space()->CommittedMemory() * 100.0) / CommittedMemory()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000693 isolate_->counters()->heap_fraction_code_space()->AddSample(
694 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
695 CommittedMemory()));
696 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
697 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000698 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
699 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
700
701 isolate_->counters()->heap_sample_total_committed()->AddSample(
702 static_cast<int>(CommittedMemory() / KB));
703 isolate_->counters()->heap_sample_total_used()->AddSample(
704 static_cast<int>(SizeOfObjects() / KB));
705 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
706 static_cast<int>(map_space()->CommittedMemory() / KB));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000707 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
708 static_cast<int>(code_space()->CommittedMemory() / KB));
709
710 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
711 static_cast<int>(MaximumCommittedMemory() / KB));
712 }
713
714#define UPDATE_COUNTERS_FOR_SPACE(space) \
715 isolate_->counters()->space##_bytes_available()->Set( \
716 static_cast<int>(space()->Available())); \
717 isolate_->counters()->space##_bytes_committed()->Set( \
718 static_cast<int>(space()->CommittedMemory())); \
719 isolate_->counters()->space##_bytes_used()->Set( \
720 static_cast<int>(space()->SizeOfObjects()));
721#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
722 if (space()->CommittedMemory() > 0) { \
723 isolate_->counters()->external_fragmentation_##space()->AddSample( \
724 static_cast<int>(100 - \
725 (space()->SizeOfObjects() * 100.0) / \
726 space()->CommittedMemory())); \
727 }
728#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
729 UPDATE_COUNTERS_FOR_SPACE(space) \
730 UPDATE_FRAGMENTATION_FOR_SPACE(space)
731
732 UPDATE_COUNTERS_FOR_SPACE(new_space)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000733 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000734 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
735 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000736 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
737#undef UPDATE_COUNTERS_FOR_SPACE
738#undef UPDATE_FRAGMENTATION_FOR_SPACE
739#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
740
741#ifdef DEBUG
742 ReportStatisticsAfterGC();
743#endif // DEBUG
744
745 // Remember the last top pointer so that we can later find out
746 // whether we allocated in new space since the last GC.
747 new_space_top_after_last_gc_ = new_space()->top();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000748 last_gc_time_ = MonotonicallyIncreasingTimeInMs();
749
750 ReduceNewSpaceSize();
751}
752
753
754void Heap::PreprocessStackTraces() {
755 WeakFixedArray::Iterator iterator(weak_stack_trace_list());
756 FixedArray* elements;
757 while ((elements = iterator.Next<FixedArray>())) {
758 for (int j = 1; j < elements->length(); j += 4) {
759 Object* maybe_code = elements->get(j + 2);
760 // If GC happens while adding a stack trace to the weak fixed array,
761 // which has been copied into a larger backing store, we may run into
762 // a stack trace that has already been preprocessed. Guard against this.
Ben Murdochc5610432016-08-08 18:44:38 +0100763 if (!maybe_code->IsAbstractCode()) break;
764 AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000765 int offset = Smi::cast(elements->get(j + 3))->value();
Ben Murdochc5610432016-08-08 18:44:38 +0100766 int pos = abstract_code->SourcePosition(offset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000767 elements->set(j + 2, Smi::FromInt(pos));
768 }
769 }
770 // We must not compact the weak fixed list here, as we may be in the middle
771 // of writing to it, when the GC triggered. Instead, we reset the root value.
772 set_weak_stack_trace_list(Smi::FromInt(0));
773}
774
775
776class GCCallbacksScope {
777 public:
778 explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
779 heap_->gc_callbacks_depth_++;
780 }
781 ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
782
783 bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
784
785 private:
786 Heap* heap_;
787};
788
789
790void Heap::HandleGCRequest() {
Ben Murdochda12d292016-06-02 14:46:10 +0100791 if (HighMemoryPressure()) {
792 incremental_marking()->reset_request_type();
793 CheckMemoryPressure();
794 } else if (incremental_marking()->request_type() ==
795 IncrementalMarking::COMPLETE_MARKING) {
796 incremental_marking()->reset_request_type();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000797 CollectAllGarbage(current_gc_flags_, "GC interrupt",
798 current_gc_callback_flags_);
Ben Murdochda12d292016-06-02 14:46:10 +0100799 } else if (incremental_marking()->request_type() ==
800 IncrementalMarking::FINALIZATION &&
801 incremental_marking()->IsMarking() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000802 !incremental_marking()->finalize_marking_completed()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100803 incremental_marking()->reset_request_type();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000804 FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
805 }
806}
807
808
809void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
810 scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
811}
812
813
814void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
815 if (FLAG_trace_incremental_marking) {
816 PrintF("[IncrementalMarking] (%s).\n", gc_reason);
817 }
818
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000819 HistogramTimerScope incremental_marking_scope(
820 isolate()->counters()->gc_incremental_marking_finalize());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100821 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
Ben Murdochc5610432016-08-08 18:44:38 +0100822 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000823
824 {
825 GCCallbacksScope scope(this);
826 if (scope.CheckReenter()) {
827 AllowHeapAllocation allow_allocation;
Ben Murdochda12d292016-06-02 14:46:10 +0100828 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000829 VMState<EXTERNAL> state(isolate_);
830 HandleScope handle_scope(isolate_);
831 CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
832 }
833 }
834 incremental_marking()->FinalizeIncrementally();
835 {
836 GCCallbacksScope scope(this);
837 if (scope.CheckReenter()) {
838 AllowHeapAllocation allow_allocation;
Ben Murdochda12d292016-06-02 14:46:10 +0100839 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000840 VMState<EXTERNAL> state(isolate_);
841 HandleScope handle_scope(isolate_);
842 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
843 }
844 }
845}
846
847
848HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
849 if (collector == SCAVENGER) {
850 return isolate_->counters()->gc_scavenger();
851 } else {
852 if (!incremental_marking()->IsStopped()) {
853 if (ShouldReduceMemory()) {
854 return isolate_->counters()->gc_finalize_reduce_memory();
855 } else {
856 return isolate_->counters()->gc_finalize();
857 }
858 } else {
859 return isolate_->counters()->gc_compactor();
860 }
861 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000862}
863
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000864void Heap::CollectAllGarbage(int flags, const char* gc_reason,
865 const v8::GCCallbackFlags gc_callback_flags) {
866 // Since we are ignoring the return value, the exact choice of space does
867 // not matter, so long as we do not specify NEW_SPACE, which would not
868 // cause a full GC.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000869 set_current_gc_flags(flags);
870 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
871 set_current_gc_flags(kNoGCFlags);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000872}
873
874
875void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
876 // Since we are ignoring the return value, the exact choice of space does
877 // not matter, so long as we do not specify NEW_SPACE, which would not
878 // cause a full GC.
879 // Major GC would invoke weak handle callbacks on weakly reachable
880 // handles, but won't collect weakly reachable objects until next
881 // major GC. Therefore if we collect aggressively and weak handle callback
882 // has been invoked, we rerun major GC to release objects which become
883 // garbage.
884 // Note: as weak callbacks can execute arbitrary code, we cannot
885 // hope that eventually there will be no weak callbacks invocations.
886 // Therefore stop recollecting after several attempts.
887 if (isolate()->concurrent_recompilation_enabled()) {
888 // The optimizing compiler may be unnecessarily holding on to memory.
889 DisallowHeapAllocation no_recursive_gc;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000890 isolate()->optimizing_compile_dispatcher()->Flush();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000891 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000892 isolate()->ClearSerializerData();
893 set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000894 isolate_->compilation_cache()->Clear();
895 const int kMaxNumberOfAttempts = 7;
896 const int kMinNumberOfAttempts = 2;
897 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000898 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100899 v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000900 attempt + 1 >= kMinNumberOfAttempts) {
901 break;
902 }
903 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000904 set_current_gc_flags(kNoGCFlags);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000905 new_space_.Shrink();
906 UncommitFromSpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000907}
908
909
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000910void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
911 if (incremental_marking()->IsStopped()) {
912 if (incremental_marking()->CanBeActivated()) {
913 StartIncrementalMarking(
914 i::Heap::kNoGCFlags,
915 kGCCallbackFlagSynchronousPhantomCallbackProcessing, gc_reason);
916 } else {
917 CollectAllGarbage(i::Heap::kNoGCFlags, gc_reason,
918 kGCCallbackFlagSynchronousPhantomCallbackProcessing);
919 }
920 } else {
921 // Incremental marking is turned on an has already been started.
922
923 // TODO(mlippautz): Compute the time slice for incremental marking based on
924 // memory pressure.
925 double deadline = MonotonicallyIncreasingTimeInMs() +
926 FLAG_external_allocation_limit_incremental_time;
927 incremental_marking()->AdvanceIncrementalMarking(
Ben Murdochda12d292016-06-02 14:46:10 +0100928 deadline,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000929 IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD,
930 IncrementalMarking::FORCE_MARKING,
931 IncrementalMarking::FORCE_COMPLETION));
932 }
933}
934
935
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000936void Heap::EnsureFillerObjectAtTop() {
Ben Murdochda12d292016-06-02 14:46:10 +0100937 // There may be an allocation memento behind objects in new space. Upon
938 // evacuation of a non-full new space (or if we are on the last page) there
939 // may be uninitialized memory behind top. We fill the remainder of the page
940 // with a filler.
941 Address to_top = new_space_.top();
Ben Murdochc5610432016-08-08 18:44:38 +0100942 Page* page = Page::FromAddress(to_top - kPointerSize);
Ben Murdochda12d292016-06-02 14:46:10 +0100943 if (page->Contains(to_top)) {
944 int remaining_in_page = static_cast<int>(page->area_end() - to_top);
945 CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000946 }
947}
948
949
950bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
951 const char* collector_reason,
952 const v8::GCCallbackFlags gc_callback_flags) {
953 // The VM is in the GC state until exiting this function.
954 VMState<GC> state(isolate_);
955
956#ifdef DEBUG
957 // Reset the allocation timeout to the GC interval, but make sure to
958 // allow at least a few allocations after a collection. The reason
959 // for this is that we have a lot of allocation sequences and we
960 // assume that a garbage collection will allow the subsequent
961 // allocation attempts to go through.
962 allocation_timeout_ = Max(6, FLAG_gc_interval);
963#endif
964
965 EnsureFillerObjectAtTop();
966
967 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
968 if (FLAG_trace_incremental_marking) {
969 PrintF("[IncrementalMarking] Scavenge during marking.\n");
970 }
971 }
972
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000973 if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
974 !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
975 !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
976 OldGenerationAllocationLimitReached()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000977 if (!incremental_marking()->IsComplete() &&
978 !mark_compact_collector()->marking_deque_.IsEmpty() &&
979 !FLAG_gc_global) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000980 if (FLAG_trace_incremental_marking) {
981 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
982 }
983 collector = SCAVENGER;
984 collector_reason = "incremental marking delaying mark-sweep";
985 }
986 }
987
988 bool next_gc_likely_to_collect_more = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000989 intptr_t committed_memory_before = 0;
990
991 if (collector == MARK_COMPACTOR) {
992 committed_memory_before = CommittedOldGenerationMemory();
993 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000994
995 {
996 tracer()->Start(collector, gc_reason, collector_reason);
997 DCHECK(AllowHeapAllocation::IsAllowed());
998 DisallowHeapAllocation no_allocation_during_gc;
999 GarbageCollectionPrologue();
1000
1001 {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001002 HistogramTimer* gc_type_timer = GCTypeTimer(collector);
1003 HistogramTimerScope histogram_timer_scope(gc_type_timer);
1004 TRACE_EVENT0("v8", gc_type_timer->name());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001005
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001006 next_gc_likely_to_collect_more =
1007 PerformGarbageCollection(collector, gc_callback_flags);
1008 }
1009
1010 GarbageCollectionEpilogue();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001011 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1012 isolate()->CheckDetachedContextsAfterGC();
1013 }
1014
1015 if (collector == MARK_COMPACTOR) {
1016 intptr_t committed_memory_after = CommittedOldGenerationMemory();
1017 intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
1018 MemoryReducer::Event event;
1019 event.type = MemoryReducer::kMarkCompact;
1020 event.time_ms = MonotonicallyIncreasingTimeInMs();
1021 // Trigger one more GC if
1022 // - this GC decreased committed memory,
1023 // - there is high fragmentation,
1024 // - there are live detached contexts.
1025 event.next_gc_likely_to_collect_more =
1026 (committed_memory_before - committed_memory_after) > MB ||
1027 HasHighFragmentation(used_memory_after, committed_memory_after) ||
1028 (detached_contexts()->length() > 0);
1029 if (deserialization_complete_) {
1030 memory_reducer_->NotifyMarkCompact(event);
1031 }
Ben Murdochda12d292016-06-02 14:46:10 +01001032 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001033 }
1034
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001035 tracer()->Stop(collector);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001036 }
1037
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001038 if (collector == MARK_COMPACTOR &&
Ben Murdoch097c5b22016-05-18 11:27:45 +01001039 (gc_callback_flags & (kGCCallbackFlagForced |
1040 kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001041 isolate()->CountUsage(v8::Isolate::kForcedGC);
1042 }
1043
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001044 // Start incremental marking for the next cycle. The heap snapshot
1045 // generator needs incremental marking to stay off after it aborted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001046 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
1047 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
1048 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001049 }
1050
1051 return next_gc_likely_to_collect_more;
1052}
1053
1054
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001055int Heap::NotifyContextDisposed(bool dependant_context) {
1056 if (!dependant_context) {
1057 tracer()->ResetSurvivalEvents();
1058 old_generation_size_configured_ = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001059 MemoryReducer::Event event;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001060 event.type = MemoryReducer::kPossibleGarbage;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001061 event.time_ms = MonotonicallyIncreasingTimeInMs();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001062 memory_reducer_->NotifyPossibleGarbage(event);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001063 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001064 if (isolate()->concurrent_recompilation_enabled()) {
1065 // Flush the queued recompilation tasks.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001066 isolate()->optimizing_compile_dispatcher()->Flush();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001067 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001068 AgeInlineCaches();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001069 number_of_disposed_maps_ = retained_maps()->Length();
1070 tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001071 return ++contexts_disposed_;
1072}
1073
1074
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001075void Heap::StartIncrementalMarking(int gc_flags,
1076 const GCCallbackFlags gc_callback_flags,
1077 const char* reason) {
1078 DCHECK(incremental_marking()->IsStopped());
1079 set_current_gc_flags(gc_flags);
1080 current_gc_callback_flags_ = gc_callback_flags;
1081 incremental_marking()->Start(reason);
1082}
1083
1084
1085void Heap::StartIdleIncrementalMarking() {
1086 gc_idle_time_handler_->ResetNoProgressCounter();
1087 StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags,
1088 "idle");
1089}
1090
1091
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001092void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1093 int len) {
1094 if (len == 0) return;
1095
1096 DCHECK(array->map() != fixed_cow_array_map());
1097 Object** dst_objects = array->data_start() + dst_index;
1098 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
Ben Murdochc5610432016-08-08 18:44:38 +01001099 FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001100}
1101
1102
1103#ifdef VERIFY_HEAP
1104// Helper class for verifying the string table.
1105class StringTableVerifier : public ObjectVisitor {
1106 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001107 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001108 // Visit all HeapObject pointers in [start, end).
1109 for (Object** p = start; p < end; p++) {
1110 if ((*p)->IsHeapObject()) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001111 HeapObject* object = HeapObject::cast(*p);
1112 Isolate* isolate = object->GetIsolate();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001113 // Check that the string is actually internalized.
Ben Murdoch61f157c2016-09-16 13:49:30 +01001114 CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
1115 object->IsInternalizedString());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001116 }
1117 }
1118 }
1119};
1120
1121
1122static void VerifyStringTable(Heap* heap) {
1123 StringTableVerifier verifier;
1124 heap->string_table()->IterateElements(&verifier);
1125}
1126#endif // VERIFY_HEAP
1127
1128
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001129bool Heap::ReserveSpace(Reservation* reservations) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001130 bool gc_performed = true;
1131 int counter = 0;
1132 static const int kThreshold = 20;
1133 while (gc_performed && counter++ < kThreshold) {
1134 gc_performed = false;
Ben Murdochda12d292016-06-02 14:46:10 +01001135 for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
1136 space++) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001137 Reservation* reservation = &reservations[space];
1138 DCHECK_LE(1, reservation->length());
1139 if (reservation->at(0).size == 0) continue;
1140 bool perform_gc = false;
1141 if (space == LO_SPACE) {
1142 DCHECK_EQ(1, reservation->length());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001143 perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001144 } else {
1145 for (auto& chunk : *reservation) {
1146 AllocationResult allocation;
1147 int size = chunk.size;
1148 DCHECK_LE(size, MemoryAllocator::PageAreaSize(
1149 static_cast<AllocationSpace>(space)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001150 if (space == NEW_SPACE) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001151 allocation = new_space()->AllocateRawUnaligned(size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001152 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01001153 // The deserializer will update the skip list.
1154 allocation = paged_space(space)->AllocateRawUnaligned(
1155 size, PagedSpace::IGNORE_SKIP_LIST);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001156 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001157 HeapObject* free_space = nullptr;
1158 if (allocation.To(&free_space)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001159 // Mark with a free list node, in case we have a GC before
1160 // deserializing.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001161 Address free_space_address = free_space->address();
Ben Murdochda12d292016-06-02 14:46:10 +01001162 CreateFillerObjectAt(free_space_address, size,
1163 ClearRecordedSlots::kNo);
1164 DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001165 chunk.start = free_space_address;
1166 chunk.end = free_space_address + size;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001167 } else {
1168 perform_gc = true;
1169 break;
1170 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001171 }
1172 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001173 if (perform_gc) {
1174 if (space == NEW_SPACE) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001175 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001176 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001177 if (counter > 1) {
1178 CollectAllGarbage(
1179 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1180 "failed to reserve space in paged or large "
1181 "object space, trying to reduce memory footprint");
1182 } else {
1183 CollectAllGarbage(
1184 kAbortIncrementalMarkingMask,
1185 "failed to reserve space in paged or large object space");
1186 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001187 }
1188 gc_performed = true;
1189 break; // Abort for-loop over spaces and retry.
1190 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001191 }
1192 }
1193
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001194 return !gc_performed;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001195}
1196
1197
1198void Heap::EnsureFromSpaceIsCommitted() {
1199 if (new_space_.CommitFromSpaceIfNeeded()) return;
1200
1201 // Committing memory to from space failed.
1202 // Memory is exhausted and we will die.
1203 V8::FatalProcessOutOfMemory("Committing semi space failed.");
1204}
1205
1206
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001207void Heap::ClearNormalizedMapCaches() {
1208 if (isolate_->bootstrapper()->IsActive() &&
1209 !incremental_marking()->IsMarking()) {
1210 return;
1211 }
1212
1213 Object* context = native_contexts_list();
Ben Murdoch61f157c2016-09-16 13:49:30 +01001214 while (!context->IsUndefined(isolate())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001215 // GC can happen when the context is not fully initialized,
1216 // so the cache can be undefined.
1217 Object* cache =
1218 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001219 if (!cache->IsUndefined(isolate())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001220 NormalizedMapCache::cast(cache)->Clear();
1221 }
Ben Murdochc5610432016-08-08 18:44:38 +01001222 context = Context::cast(context)->next_context_link();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001223 }
1224}
1225
1226
1227void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1228 if (start_new_space_size == 0) return;
1229
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001230 promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1231 static_cast<double>(start_new_space_size) * 100);
1232
1233 if (previous_semi_space_copied_object_size_ > 0) {
1234 promotion_rate_ =
1235 (static_cast<double>(promoted_objects_size_) /
1236 static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1237 } else {
1238 promotion_rate_ = 0;
1239 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001240
1241 semi_space_copied_rate_ =
1242 (static_cast<double>(semi_space_copied_object_size_) /
1243 static_cast<double>(start_new_space_size) * 100);
1244
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001245 double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001246 tracer()->AddSurvivalRatio(survival_rate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001247 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1248 high_survival_rate_period_length_++;
1249 } else {
1250 high_survival_rate_period_length_ = 0;
1251 }
1252}
1253
1254bool Heap::PerformGarbageCollection(
1255 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1256 int freed_global_handles = 0;
1257
1258 if (collector != SCAVENGER) {
1259 PROFILE(isolate_, CodeMovingGCEvent());
1260 }
1261
1262#ifdef VERIFY_HEAP
1263 if (FLAG_verify_heap) {
1264 VerifyStringTable(this);
1265 }
1266#endif
1267
1268 GCType gc_type =
1269 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1270
1271 {
1272 GCCallbacksScope scope(this);
1273 if (scope.CheckReenter()) {
1274 AllowHeapAllocation allow_allocation;
Ben Murdochda12d292016-06-02 14:46:10 +01001275 TRACE_GC(tracer(), collector == MARK_COMPACTOR
1276 ? GCTracer::Scope::MC_EXTERNAL_PROLOGUE
1277 : GCTracer::Scope::SCAVENGER_EXTERNAL_PROLOGUE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001278 VMState<EXTERNAL> state(isolate_);
1279 HandleScope handle_scope(isolate_);
1280 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1281 }
1282 }
1283
1284 EnsureFromSpaceIsCommitted();
1285
1286 int start_new_space_size = Heap::new_space()->SizeAsInt();
1287
1288 if (IsHighSurvivalRate()) {
1289 // We speed up the incremental marker if it is running so that it
1290 // does not fall behind the rate of promotion, which would cause a
1291 // constantly growing old space.
1292 incremental_marking()->NotifyOfHighPromotionRate();
1293 }
1294
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001295 {
1296 Heap::PretenuringScope pretenuring_scope(this);
1297
1298 if (collector == MARK_COMPACTOR) {
1299 UpdateOldGenerationAllocationCounter();
1300 // Perform mark-sweep with optional compaction.
1301 MarkCompact();
1302 old_gen_exhausted_ = false;
1303 old_generation_size_configured_ = true;
1304 // This should be updated before PostGarbageCollectionProcessing, which
1305 // can cause another GC. Take into account the objects promoted during GC.
1306 old_generation_allocation_counter_ +=
1307 static_cast<size_t>(promoted_objects_size_);
1308 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1309 } else {
1310 Scavenge();
1311 }
1312
1313 ProcessPretenuringFeedback();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001314 }
1315
1316 UpdateSurvivalStatistics(start_new_space_size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001317 ConfigureInitialOldGenerationSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001318
1319 isolate_->counters()->objs_since_last_young()->Set(0);
1320
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001321 gc_post_processing_depth_++;
1322 {
1323 AllowHeapAllocation allow_allocation;
Ben Murdochda12d292016-06-02 14:46:10 +01001324 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_WEAK_GLOBAL_HANDLES);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001325 freed_global_handles =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001326 isolate_->global_handles()->PostGarbageCollectionProcessing(
1327 collector, gc_callback_flags);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001328 }
1329 gc_post_processing_depth_--;
1330
1331 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1332
1333 // Update relocatables.
1334 Relocatable::PostGarbageCollectionProcessing(isolate_);
1335
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001336 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
Ben Murdochda12d292016-06-02 14:46:10 +01001337 double mutator_speed =
1338 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001339 intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001340 if (collector == MARK_COMPACTOR) {
1341 // Register the amount of external allocated memory.
Ben Murdoch61f157c2016-09-16 13:49:30 +01001342 external_memory_at_last_mark_compact_ = external_memory_;
1343 external_memory_limit_ = external_memory_ + kExternalAllocationLimit;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001344 SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1345 } else if (HasLowYoungGenerationAllocationRate() &&
1346 old_generation_size_configured_) {
1347 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001348 }
1349
1350 {
1351 GCCallbacksScope scope(this);
1352 if (scope.CheckReenter()) {
1353 AllowHeapAllocation allow_allocation;
Ben Murdochda12d292016-06-02 14:46:10 +01001354 TRACE_GC(tracer(), collector == MARK_COMPACTOR
1355 ? GCTracer::Scope::MC_EXTERNAL_EPILOGUE
1356 : GCTracer::Scope::SCAVENGER_EXTERNAL_EPILOGUE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001357 VMState<EXTERNAL> state(isolate_);
1358 HandleScope handle_scope(isolate_);
1359 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1360 }
1361 }
1362
1363#ifdef VERIFY_HEAP
1364 if (FLAG_verify_heap) {
1365 VerifyStringTable(this);
1366 }
1367#endif
1368
1369 return freed_global_handles > 0;
1370}
1371
1372
1373void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1374 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1375 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001376 if (!gc_prologue_callbacks_[i].pass_isolate) {
1377 v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
1378 gc_prologue_callbacks_[i].callback);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001379 callback(gc_type, flags);
1380 } else {
1381 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1382 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1383 }
1384 }
1385 }
Ben Murdochda12d292016-06-02 14:46:10 +01001386 if (FLAG_trace_object_groups && (gc_type == kGCTypeIncrementalMarking ||
1387 gc_type == kGCTypeMarkSweepCompact)) {
1388 isolate_->global_handles()->PrintObjectGroups();
1389 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001390}
1391
1392
1393void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1394 GCCallbackFlags gc_callback_flags) {
1395 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1396 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001397 if (!gc_epilogue_callbacks_[i].pass_isolate) {
1398 v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
1399 gc_epilogue_callbacks_[i].callback);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001400 callback(gc_type, gc_callback_flags);
1401 } else {
1402 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1403 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1404 }
1405 }
1406 }
1407}
1408
1409
1410void Heap::MarkCompact() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001411 PauseAllocationObserversScope pause_observers(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001412
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001413 gc_state_ = MARK_COMPACT;
1414 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1415
1416 uint64_t size_of_objects_before_gc = SizeOfObjects();
1417
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001418 mark_compact_collector()->Prepare();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001419
1420 ms_count_++;
1421
1422 MarkCompactPrologue();
1423
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001424 mark_compact_collector()->CollectGarbage();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001425
1426 LOG(isolate_, ResourceEvent("markcompact", "end"));
1427
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001428 MarkCompactEpilogue();
1429
1430 if (FLAG_allocation_site_pretenuring) {
1431 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1432 }
1433}
1434
1435
1436void Heap::MarkCompactEpilogue() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001437 gc_state_ = NOT_IN_GC;
1438
1439 isolate_->counters()->objs_since_last_full()->Set(0);
1440
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001441 incremental_marking()->Epilogue();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001442
1443 PreprocessStackTraces();
Ben Murdochda12d292016-06-02 14:46:10 +01001444 DCHECK(incremental_marking()->IsStopped());
1445
1446 // We finished a marking cycle. We can uncommit the marking deque until
1447 // we start marking again.
1448 mark_compact_collector()->marking_deque()->Uninitialize();
1449 mark_compact_collector()->EnsureMarkingDequeIsCommitted(
1450 MarkCompactCollector::kMinMarkingDequeSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001451}
1452
1453
1454void Heap::MarkCompactPrologue() {
1455 // At any old GC clear the keyed lookup cache to enable collection of unused
1456 // maps.
1457 isolate_->keyed_lookup_cache()->Clear();
1458 isolate_->context_slot_cache()->Clear();
1459 isolate_->descriptor_lookup_cache()->Clear();
1460 RegExpResultsCache::Clear(string_split_cache());
1461 RegExpResultsCache::Clear(regexp_multiple_cache());
1462
1463 isolate_->compilation_cache()->MarkCompactPrologue();
1464
1465 CompletelyClearInstanceofCache();
1466
1467 FlushNumberStringCache();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001468 ClearNormalizedMapCaches();
1469}
1470
1471
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001472#ifdef VERIFY_HEAP
1473// Visitor class to verify pointers in code or data space do not point into
1474// new space.
1475class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1476 public:
1477 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001478
1479 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001480 for (Object** current = start; current < end; current++) {
1481 if ((*current)->IsHeapObject()) {
1482 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1483 }
1484 }
1485 }
1486
1487 private:
1488 Heap* heap_;
1489};
1490
1491
1492static void VerifyNonPointerSpacePointers(Heap* heap) {
1493 // Verify that there are no pointers to new space in spaces where we
1494 // do not expect them.
1495 VerifyNonPointerSpacePointersVisitor v(heap);
1496 HeapObjectIterator code_it(heap->code_space());
1497 for (HeapObject* object = code_it.Next(); object != NULL;
1498 object = code_it.Next())
1499 object->Iterate(&v);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001500}
1501#endif // VERIFY_HEAP
1502
1503
1504void Heap::CheckNewSpaceExpansionCriteria() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001505 if (FLAG_experimental_new_space_growth_heuristic) {
1506 if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1507 survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
1508 // Grow the size of new space if there is room to grow, and more than 10%
1509 // have survived the last scavenge.
1510 new_space_.Grow();
1511 survived_since_last_expansion_ = 0;
1512 }
1513 } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1514 survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1515 // Grow the size of new space if there is room to grow, and enough data
1516 // has survived scavenge since the last expansion.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001517 new_space_.Grow();
1518 survived_since_last_expansion_ = 0;
1519 }
1520}
1521
1522
1523static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1524 return heap->InNewSpace(*p) &&
1525 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1526}
1527
1528
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001529static bool IsUnmodifiedHeapObject(Object** p) {
1530 Object* object = *p;
1531 if (object->IsSmi()) return false;
1532 HeapObject* heap_object = HeapObject::cast(object);
1533 if (!object->IsJSObject()) return false;
Ben Murdochc5610432016-08-08 18:44:38 +01001534 JSObject* js_object = JSObject::cast(object);
1535 if (!js_object->WasConstructedFromApiFunction()) return false;
1536 JSFunction* constructor =
1537 JSFunction::cast(js_object->map()->GetConstructor());
1538
1539 return constructor->initial_map() == heap_object->map();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001540}
1541
1542
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001543void PromotionQueue::Initialize() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001544 // The last to-space page may be used for promotion queue. On promotion
1545 // conflict, we use the emergency stack.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001546 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1547 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001548 front_ = rear_ =
Ben Murdochda12d292016-06-02 14:46:10 +01001549 reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
1550 limit_ = reinterpret_cast<struct Entry*>(
Ben Murdochc5610432016-08-08 18:44:38 +01001551 Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
1552 ->area_start());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001553 emergency_stack_ = NULL;
1554}
1555
1556
1557void PromotionQueue::RelocateQueueHead() {
1558 DCHECK(emergency_stack_ == NULL);
1559
Ben Murdochc5610432016-08-08 18:44:38 +01001560 Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
Ben Murdochda12d292016-06-02 14:46:10 +01001561 struct Entry* head_start = rear_;
1562 struct Entry* head_end =
1563 Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001564
1565 int entries_count =
Ben Murdochda12d292016-06-02 14:46:10 +01001566 static_cast<int>(head_end - head_start) / sizeof(struct Entry);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001567
1568 emergency_stack_ = new List<Entry>(2 * entries_count);
1569
1570 while (head_start != head_end) {
Ben Murdochda12d292016-06-02 14:46:10 +01001571 struct Entry* entry = head_start++;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001572 // New space allocation in SemiSpaceCopyObject marked the region
1573 // overlapping with promotion queue as uninitialized.
Ben Murdochda12d292016-06-02 14:46:10 +01001574 MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry));
1575 emergency_stack_->Add(*entry);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001576 }
1577 rear_ = head_end;
1578}
1579
1580
1581class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1582 public:
1583 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1584
1585 virtual Object* RetainAs(Object* object) {
1586 if (!heap_->InFromSpace(object)) {
1587 return object;
1588 }
1589
1590 MapWord map_word = HeapObject::cast(object)->map_word();
1591 if (map_word.IsForwardingAddress()) {
1592 return map_word.ToForwardingAddress();
1593 }
1594 return NULL;
1595 }
1596
1597 private:
1598 Heap* heap_;
1599};
1600
1601
1602void Heap::Scavenge() {
Ben Murdochda12d292016-06-02 14:46:10 +01001603 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001604 RelocationLock relocation_lock(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001605 // There are soft limits in the allocation code, designed to trigger a mark
1606 // sweep collection by failing allocations. There is no sense in trying to
1607 // trigger one during scavenge: scavenges allocation should always succeed.
1608 AlwaysAllocateScope scope(isolate());
1609
1610 // Bump-pointer allocations done during scavenge are not real allocations.
1611 // Pause the inline allocation steps.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001612 PauseAllocationObserversScope pause_observers(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001613
Ben Murdoch61f157c2016-09-16 13:49:30 +01001614 mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
1615
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001616#ifdef VERIFY_HEAP
1617 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1618#endif
1619
1620 gc_state_ = SCAVENGE;
1621
1622 // Implements Cheney's copying algorithm
1623 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1624
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001625 // Used for updating survived_since_last_expansion_ at function end.
1626 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1627
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001628 scavenge_collector_->SelectScavengingVisitorsTable();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001629
Ben Murdoch61f157c2016-09-16 13:49:30 +01001630 if (UsingEmbedderHeapTracer()) {
1631 // Register found wrappers with embedder so it can add them to its marking
1632 // deque and correctly manage the case when v8 scavenger collects the
1633 // wrappers by either keeping wrappables alive, or cleaning marking deque.
1634 mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer();
1635 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001636
1637 // Flip the semispaces. After flipping, to space is empty, from space has
1638 // live objects.
1639 new_space_.Flip();
1640 new_space_.ResetAllocationInfo();
1641
1642 // We need to sweep newly copied objects which can be either in the
1643 // to space or promoted to the old generation. For to-space
1644 // objects, we treat the bottom of the to space as a queue. Newly
1645 // copied and unswept objects lie between a 'front' mark and the
1646 // allocation pointer.
1647 //
1648 // Promoted objects can go into various old-generation spaces, and
1649 // can be allocated internally in the spaces (from the free list).
1650 // We treat the top of the to space as a queue of addresses of
1651 // promoted objects. The addresses of newly promoted and unswept
1652 // objects lie between a 'front' mark and a 'rear' mark that is
1653 // updated as a side effect of promoting an object.
1654 //
1655 // There is guaranteed to be enough room at the top of the to space
1656 // for the addresses of promoted objects: every object promoted
1657 // frees up its size in bytes from the top of the new space, and
1658 // objects are at least one pointer in size.
1659 Address new_space_front = new_space_.ToSpaceStart();
1660 promotion_queue_.Initialize();
1661
Ben Murdoch61f157c2016-09-16 13:49:30 +01001662 PromotionMode promotion_mode = CurrentPromotionMode();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001663 ScavengeVisitor scavenge_visitor(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001664
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001665 if (FLAG_scavenge_reclaim_unmodified_objects) {
1666 isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
1667 &IsUnmodifiedHeapObject);
1668 }
1669
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001670 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001671 // Copy roots.
Ben Murdochda12d292016-06-02 14:46:10 +01001672 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001673 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1674 }
1675
1676 {
1677 // Copy objects reachable from the old generation.
Ben Murdochda12d292016-06-02 14:46:10 +01001678 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001679 RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) {
1680 return Scavenger::CheckAndScavengeObject(this, addr);
1681 });
1682
1683 RememberedSet<OLD_TO_NEW>::IterateTyped(
1684 this, [this](SlotType type, Address host_addr, Address addr) {
1685 return UpdateTypedSlotHelper::UpdateTypedSlot(
1686 isolate(), type, addr, [this](Object** addr) {
1687 // We expect that objects referenced by code are long living.
1688 // If we do not force promotion, then we need to clear
1689 // old_to_new slots in dead code objects after mark-compact.
1690 return Scavenger::CheckAndScavengeObject(
1691 this, reinterpret_cast<Address>(addr));
1692 });
1693 });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001694 }
1695
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001696 {
Ben Murdochda12d292016-06-02 14:46:10 +01001697 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001698 // Copy objects reachable from the encountered weak collections list.
1699 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1700 // Copy objects reachable from the encountered weak cells.
1701 scavenge_visitor.VisitPointer(&encountered_weak_cells_);
1702 }
1703
1704 {
1705 // Copy objects reachable from the code flushing candidates list.
Ben Murdochda12d292016-06-02 14:46:10 +01001706 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001707 MarkCompactCollector* collector = mark_compact_collector();
1708 if (collector->is_code_flushing_enabled()) {
1709 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001710 }
1711 }
1712
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001713 {
Ben Murdochda12d292016-06-02 14:46:10 +01001714 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001715 new_space_front =
1716 DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001717 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001718
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001719 if (FLAG_scavenge_reclaim_unmodified_objects) {
1720 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
1721 &IsUnscavengedHeapObject);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001722
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001723 isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
1724 &scavenge_visitor);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001725 new_space_front =
1726 DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001727 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01001728 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001729 while (isolate()->global_handles()->IterateObjectGroups(
1730 &scavenge_visitor, &IsUnscavengedHeapObject)) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001731 new_space_front =
1732 DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001733 }
1734 isolate()->global_handles()->RemoveObjectGroups();
1735 isolate()->global_handles()->RemoveImplicitRefGroups();
1736
1737 isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1738 &IsUnscavengedHeapObject);
1739
1740 isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
1741 &scavenge_visitor);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001742 new_space_front =
1743 DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001744 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001745
1746 UpdateNewSpaceReferencesInExternalStringTable(
1747 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1748
1749 promotion_queue_.Destroy();
1750
1751 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1752
1753 ScavengeWeakObjectRetainer weak_object_retainer(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001754 ProcessYoungWeakReferences(&weak_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001755
1756 DCHECK(new_space_front == new_space_.top());
1757
1758 // Set age mark.
1759 new_space_.set_age_mark(new_space_.top());
1760
Ben Murdoch61f157c2016-09-16 13:49:30 +01001761 ArrayBufferTracker::FreeDeadInNewSpace(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001762
1763 // Update how much has survived scavenge.
1764 IncrementYoungSurvivorsCounter(static_cast<int>(
1765 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1766
1767 LOG(isolate_, ResourceEvent("scavenge", "end"));
1768
1769 gc_state_ = NOT_IN_GC;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001770}
1771
1772
1773String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1774 Object** p) {
1775 MapWord first_word = HeapObject::cast(*p)->map_word();
1776
1777 if (!first_word.IsForwardingAddress()) {
1778 // Unreachable external string can be finalized.
1779 heap->FinalizeExternalString(String::cast(*p));
1780 return NULL;
1781 }
1782
1783 // String is still reachable.
1784 return String::cast(first_word.ToForwardingAddress());
1785}
1786
1787
1788void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1789 ExternalStringTableUpdaterCallback updater_func) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001790 if (external_string_table_.new_space_strings_.is_empty()) return;
1791
1792 Object** start = &external_string_table_.new_space_strings_[0];
1793 Object** end = start + external_string_table_.new_space_strings_.length();
1794 Object** last = start;
1795
1796 for (Object** p = start; p < end; ++p) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001797 String* target = updater_func(this, p);
1798
1799 if (target == NULL) continue;
1800
1801 DCHECK(target->IsExternalString());
1802
1803 if (InNewSpace(target)) {
1804 // String is still in new space. Update the table entry.
1805 *last = target;
1806 ++last;
1807 } else {
1808 // String got promoted. Move it to the old string list.
1809 external_string_table_.AddOldString(target);
1810 }
1811 }
1812
1813 DCHECK(last <= end);
1814 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1815}
1816
1817
1818void Heap::UpdateReferencesInExternalStringTable(
1819 ExternalStringTableUpdaterCallback updater_func) {
1820 // Update old space string references.
1821 if (external_string_table_.old_space_strings_.length() > 0) {
1822 Object** start = &external_string_table_.old_space_strings_[0];
1823 Object** end = start + external_string_table_.old_space_strings_.length();
1824 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1825 }
1826
1827 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1828}
1829
1830
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001831void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001832 ProcessNativeContexts(retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001833 ProcessAllocationSites(retainer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001834}
1835
1836
1837void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
1838 ProcessNativeContexts(retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001839}
1840
1841
1842void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1843 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1844 // Update the head of the list of contexts.
1845 set_native_contexts_list(head);
1846}
1847
1848
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001849void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1850 Object* allocation_site_obj =
1851 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1852 set_allocation_sites_list(allocation_site_obj);
1853}
1854
Ben Murdochda12d292016-06-02 14:46:10 +01001855void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
1856 set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
1857 set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
1858}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001859
1860void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1861 DisallowHeapAllocation no_allocation_scope;
1862 Object* cur = allocation_sites_list();
1863 bool marked = false;
1864 while (cur->IsAllocationSite()) {
1865 AllocationSite* casted = AllocationSite::cast(cur);
1866 if (casted->GetPretenureMode() == flag) {
1867 casted->ResetPretenureDecision();
1868 casted->set_deopt_dependent_code(true);
1869 marked = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001870 RemoveAllocationSitePretenuringFeedback(casted);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001871 }
1872 cur = casted->weak_next();
1873 }
1874 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1875}
1876
1877
1878void Heap::EvaluateOldSpaceLocalPretenuring(
1879 uint64_t size_of_objects_before_gc) {
1880 uint64_t size_of_objects_after_gc = SizeOfObjects();
1881 double old_generation_survival_rate =
1882 (static_cast<double>(size_of_objects_after_gc) * 100) /
1883 static_cast<double>(size_of_objects_before_gc);
1884
1885 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1886 // Too many objects died in the old generation, pretenuring of wrong
1887 // allocation sites may be the cause for that. We have to deopt all
1888 // dependent code registered in the allocation sites to re-evaluate
1889 // our pretenuring decisions.
1890 ResetAllAllocationSitesDependentCode(TENURED);
1891 if (FLAG_trace_pretenuring) {
1892 PrintF(
1893 "Deopt all allocation sites dependent code due to low survival "
1894 "rate in the old generation %f\n",
1895 old_generation_survival_rate);
1896 }
1897 }
1898}
1899
1900
1901void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1902 DisallowHeapAllocation no_allocation;
1903 // All external strings are listed in the external string table.
1904
1905 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1906 public:
1907 explicit ExternalStringTableVisitorAdapter(
1908 v8::ExternalResourceVisitor* visitor)
1909 : visitor_(visitor) {}
1910 virtual void VisitPointers(Object** start, Object** end) {
1911 for (Object** p = start; p < end; p++) {
1912 DCHECK((*p)->IsExternalString());
1913 visitor_->VisitExternalString(
1914 Utils::ToLocal(Handle<String>(String::cast(*p))));
1915 }
1916 }
1917
1918 private:
1919 v8::ExternalResourceVisitor* visitor_;
1920 } external_string_table_visitor(visitor);
1921
1922 external_string_table_.Iterate(&external_string_table_visitor);
1923}
1924
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001925Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Ben Murdoch61f157c2016-09-16 13:49:30 +01001926 Address new_space_front,
1927 PromotionMode promotion_mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001928 do {
1929 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1930 // The addresses new_space_front and new_space_.top() define a
1931 // queue of unprocessed copied objects. Process them until the
1932 // queue is empty.
1933 while (new_space_front != new_space_.top()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001934 if (!Page::IsAlignedToPageSize(new_space_front)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001935 HeapObject* object = HeapObject::FromAddress(new_space_front);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001936 if (promotion_mode == PROMOTE_MARKED) {
1937 new_space_front += StaticScavengeVisitor<PROMOTE_MARKED>::IterateBody(
1938 object->map(), object);
1939 } else {
1940 new_space_front +=
1941 StaticScavengeVisitor<DEFAULT_PROMOTION>::IterateBody(
1942 object->map(), object);
1943 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001944 } else {
Ben Murdochc5610432016-08-08 18:44:38 +01001945 new_space_front = Page::FromAllocationAreaAddress(new_space_front)
1946 ->next_page()
1947 ->area_start();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001948 }
1949 }
1950
1951 // Promote and process all the to-be-promoted objects.
1952 {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001953 while (!promotion_queue()->is_empty()) {
1954 HeapObject* target;
Ben Murdochda12d292016-06-02 14:46:10 +01001955 int32_t size;
1956 bool was_marked_black;
1957 promotion_queue()->remove(&target, &size, &was_marked_black);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001958
1959 // Promoted object might be already partially visited
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001960 // during old space pointer iteration. Thus we search specifically
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001961 // for pointers to from semispace instead of looking for pointers
1962 // to new space.
1963 DCHECK(!target->IsMap());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001964
Ben Murdochda12d292016-06-02 14:46:10 +01001965 IteratePromotedObject(target, static_cast<int>(size), was_marked_black,
1966 &Scavenger::ScavengeObject);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001967 }
1968 }
1969
1970 // Take another spin if there are now unswept objects in new space
1971 // (there are currently no more unswept promoted objects).
1972 } while (new_space_front != new_space_.top());
1973
1974 return new_space_front;
1975}
1976
1977
1978STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1979 0); // NOLINT
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001980STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001981 0); // NOLINT
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001982#ifdef V8_HOST_ARCH_32_BIT
1983STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
1984 0); // NOLINT
1985#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001986
1987
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001988int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
1989 switch (alignment) {
1990 case kWordAligned:
1991 return 0;
1992 case kDoubleAligned:
1993 case kDoubleUnaligned:
1994 return kDoubleSize - kPointerSize;
1995 case kSimd128Unaligned:
1996 return kSimd128Size - kPointerSize;
1997 default:
1998 UNREACHABLE();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001999 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002000 return 0;
2001}
2002
2003
2004int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
2005 intptr_t offset = OffsetFrom(address);
2006 if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
2007 return kPointerSize;
2008 if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
2009 return kDoubleSize - kPointerSize; // No fill if double is always aligned.
2010 if (alignment == kSimd128Unaligned) {
2011 return (kSimd128Size - (static_cast<int>(offset) + kPointerSize)) &
2012 kSimd128AlignmentMask;
2013 }
2014 return 0;
2015}
2016
2017
2018HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
Ben Murdochda12d292016-06-02 14:46:10 +01002019 CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002020 return HeapObject::FromAddress(object->address() + filler_size);
2021}
2022
2023
2024HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
2025 int allocation_size,
2026 AllocationAlignment alignment) {
2027 int filler_size = allocation_size - object_size;
2028 DCHECK(filler_size > 0);
2029 int pre_filler = GetFillToAlign(object->address(), alignment);
2030 if (pre_filler) {
2031 object = PrecedeWithFiller(object, pre_filler);
2032 filler_size -= pre_filler;
2033 }
2034 if (filler_size)
Ben Murdochda12d292016-06-02 14:46:10 +01002035 CreateFillerObjectAt(object->address() + object_size, filler_size,
2036 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002037 return object;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002038}
2039
2040
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002041HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002042 return AlignWithFiller(object, size - kPointerSize, size, kDoubleAligned);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002043}
2044
2045
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002046void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01002047 ArrayBufferTracker::RegisterNew(this, buffer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002048}
2049
2050
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002051void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01002052 ArrayBufferTracker::Unregister(this, buffer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002053}
2054
2055
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002056void Heap::ConfigureInitialOldGenerationSize() {
2057 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2058 old_generation_allocation_limit_ =
2059 Max(kMinimumOldGenerationAllocationLimit,
2060 static_cast<intptr_t>(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002061 static_cast<double>(old_generation_allocation_limit_) *
2062 (tracer()->AverageSurvivalRatio() / 100)));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002063 }
2064}
2065
2066
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002067AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2068 int instance_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002069 Object* result = nullptr;
2070 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002071 if (!allocation.To(&result)) return allocation;
2072
2073 // Map::cast cannot be used due to uninitialized map field.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002074 reinterpret_cast<Map*>(result)->set_map(
2075 reinterpret_cast<Map*>(root(kMetaMapRootIndex)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002076 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2077 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002078 // Initialize to only containing tagged fields.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002079 reinterpret_cast<Map*>(result)->set_visitor_id(
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002080 StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
2081 if (FLAG_unbox_double_fields) {
2082 reinterpret_cast<Map*>(result)
2083 ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2084 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002085 reinterpret_cast<Map*>(result)->clear_unused();
2086 reinterpret_cast<Map*>(result)
2087 ->set_inobject_properties_or_constructor_function_index(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002088 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2089 reinterpret_cast<Map*>(result)->set_bit_field(0);
2090 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2091 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002092 Map::OwnsDescriptors::encode(true) |
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002093 Map::ConstructionCounter::encode(Map::kNoSlackTracking);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002094 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002095 reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002096 return result;
2097}
2098
2099
2100AllocationResult Heap::AllocateMap(InstanceType instance_type,
2101 int instance_size,
2102 ElementsKind elements_kind) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002103 HeapObject* result = nullptr;
2104 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002105 if (!allocation.To(&result)) return allocation;
2106
Ben Murdoch097c5b22016-05-18 11:27:45 +01002107 isolate()->counters()->maps_created()->Increment();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002108 result->set_map_no_write_barrier(meta_map());
2109 Map* map = Map::cast(result);
2110 map->set_instance_type(instance_type);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002111 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002112 map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002113 map->set_instance_size(instance_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002114 map->clear_unused();
2115 map->set_inobject_properties_or_constructor_function_index(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002116 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2117 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2118 SKIP_WRITE_BARRIER);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002119 map->set_weak_cell_cache(Smi::FromInt(0));
2120 map->set_raw_transitions(Smi::FromInt(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002121 map->set_unused_property_fields(0);
2122 map->set_instance_descriptors(empty_descriptor_array());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002123 if (FLAG_unbox_double_fields) {
2124 map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2125 }
2126 // Must be called only after |instance_type|, |instance_size| and
2127 // |layout_descriptor| are set.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002128 map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002129 map->set_bit_field(0);
2130 map->set_bit_field2(1 << Map::kIsExtensible);
2131 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002132 Map::OwnsDescriptors::encode(true) |
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002133 Map::ConstructionCounter::encode(Map::kNoSlackTracking);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002134 map->set_bit_field3(bit_field3);
2135 map->set_elements_kind(elements_kind);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002136 map->set_new_target_is_base(true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002137
2138 return map;
2139}
2140
2141
2142AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2143 AllocationSpace space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002144 HeapObject* obj = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002145 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002146 AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
2147 AllocationResult allocation = AllocateRaw(size, space, align);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002148 if (!allocation.To(&obj)) return allocation;
2149 }
2150#ifdef DEBUG
2151 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2152 DCHECK(chunk->owner()->identity() == space);
2153#endif
Ben Murdochda12d292016-06-02 14:46:10 +01002154 CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002155 return obj;
2156}
2157
2158
2159const Heap::StringTypeTable Heap::string_type_table[] = {
2160#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2161 { type, size, k##camel_name##MapRootIndex } \
2162 ,
2163 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2164#undef STRING_TYPE_ELEMENT
2165};
2166
2167
2168const Heap::ConstantStringTable Heap::constant_string_table[] = {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002169 {"", kempty_stringRootIndex},
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002170#define CONSTANT_STRING_ELEMENT(name, contents) \
2171 { contents, k##name##RootIndex } \
2172 ,
2173 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2174#undef CONSTANT_STRING_ELEMENT
2175};
2176
2177
2178const Heap::StructTable Heap::struct_table[] = {
2179#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2180 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2181 ,
2182 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2183#undef STRUCT_TABLE_ELEMENT
2184};
2185
Ben Murdochc5610432016-08-08 18:44:38 +01002186namespace {
2187
2188void FinalizePartialMap(Heap* heap, Map* map) {
2189 map->set_code_cache(heap->empty_fixed_array());
2190 map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
2191 map->set_raw_transitions(Smi::FromInt(0));
2192 map->set_instance_descriptors(heap->empty_descriptor_array());
2193 if (FLAG_unbox_double_fields) {
2194 map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2195 }
2196 map->set_prototype(heap->null_value());
2197 map->set_constructor_or_backpointer(heap->null_value());
2198}
2199
2200} // namespace
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002201
2202bool Heap::CreateInitialMaps() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002203 HeapObject* obj = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002204 {
2205 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2206 if (!allocation.To(&obj)) return false;
2207 }
2208 // Map::cast cannot be used due to uninitialized map field.
2209 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2210 set_meta_map(new_meta_map);
2211 new_meta_map->set_map(new_meta_map);
2212
2213 { // Partial map allocation
2214#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2215 { \
2216 Map* map; \
2217 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2218 set_##field_name##_map(map); \
2219 }
2220
2221 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
Ben Murdochc5610432016-08-08 18:44:38 +01002222 fixed_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002223 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2224 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
Ben Murdochc5610432016-08-08 18:44:38 +01002225 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002226
2227#undef ALLOCATE_PARTIAL_MAP
2228 }
2229
2230 // Allocate the empty array.
2231 {
2232 AllocationResult allocation = AllocateEmptyFixedArray();
2233 if (!allocation.To(&obj)) return false;
2234 }
2235 set_empty_fixed_array(FixedArray::cast(obj));
2236
2237 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002238 AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002239 if (!allocation.To(&obj)) return false;
2240 }
2241 set_null_value(Oddball::cast(obj));
2242 Oddball::cast(obj)->set_kind(Oddball::kNull);
2243
2244 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002245 AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002246 if (!allocation.To(&obj)) return false;
2247 }
2248 set_undefined_value(Oddball::cast(obj));
2249 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2250 DCHECK(!InNewSpace(undefined_value()));
Ben Murdochc5610432016-08-08 18:44:38 +01002251 {
2252 AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
2253 if (!allocation.To(&obj)) return false;
2254 }
2255 set_the_hole_value(Oddball::cast(obj));
2256 Oddball::cast(obj)->set_kind(Oddball::kTheHole);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002257
2258 // Set preliminary exception sentinel value before actually initializing it.
2259 set_exception(null_value());
2260
2261 // Allocate the empty descriptor array.
2262 {
2263 AllocationResult allocation = AllocateEmptyFixedArray();
2264 if (!allocation.To(&obj)) return false;
2265 }
2266 set_empty_descriptor_array(DescriptorArray::cast(obj));
2267
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002268 // Fix the instance_descriptors for the existing maps.
Ben Murdochc5610432016-08-08 18:44:38 +01002269 FinalizePartialMap(this, meta_map());
2270 FinalizePartialMap(this, fixed_array_map());
2271 FinalizePartialMap(this, undefined_map());
Ben Murdoch097c5b22016-05-18 11:27:45 +01002272 undefined_map()->set_is_undetectable();
Ben Murdochc5610432016-08-08 18:44:38 +01002273 FinalizePartialMap(this, null_map());
2274 null_map()->set_is_undetectable();
2275 FinalizePartialMap(this, the_hole_map());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002276
2277 { // Map allocation
2278#define ALLOCATE_MAP(instance_type, size, field_name) \
2279 { \
2280 Map* map; \
2281 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2282 set_##field_name##_map(map); \
2283 }
2284
2285#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2286 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2287
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002288#define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
2289 constructor_function_index) \
2290 { \
2291 ALLOCATE_MAP((instance_type), (size), field_name); \
2292 field_name##_map()->SetConstructorFunctionIndex( \
2293 (constructor_function_index)); \
2294 }
2295
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002296 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
Ben Murdochc5610432016-08-08 18:44:38 +01002297 fixed_cow_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
2298 DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002299
2300 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002301 ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
2302 Context::NUMBER_FUNCTION_INDEX)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002303 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2304 mutable_heap_number)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002305 ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
2306 Context::SYMBOL_FUNCTION_INDEX)
2307#define ALLOCATE_SIMD128_MAP(TYPE, Type, type, lane_count, lane_type) \
2308 ALLOCATE_PRIMITIVE_MAP(SIMD128_VALUE_TYPE, Type::kSize, type, \
2309 Context::TYPE##_FUNCTION_INDEX)
2310 SIMD128_TYPES(ALLOCATE_SIMD128_MAP)
2311#undef ALLOCATE_SIMD128_MAP
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002312 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2313
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002314 ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
2315 Context::BOOLEAN_FUNCTION_INDEX);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002316 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2317 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2318 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2319 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2320 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
Ben Murdochda12d292016-06-02 14:46:10 +01002321 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
Ben Murdochc5610432016-08-08 18:44:38 +01002322 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002323
2324 for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2325 const StringTypeTable& entry = string_type_table[i];
2326 {
2327 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2328 if (!allocation.To(&obj)) return false;
2329 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002330 Map* map = Map::cast(obj);
2331 map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002332 // Mark cons string maps as unstable, because their objects can change
2333 // maps during GC.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002334 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2335 roots_[entry.index] = map;
2336 }
2337
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002338 { // Create a separate external one byte string map for native sources.
Ben Murdoch61f157c2016-09-16 13:49:30 +01002339 AllocationResult allocation =
2340 AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
2341 ExternalOneByteString::kShortSize);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002342 if (!allocation.To(&obj)) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002343 Map* map = Map::cast(obj);
2344 map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
2345 set_native_source_string_map(map);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002346 }
2347
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002348 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
Ben Murdochc5610432016-08-08 18:44:38 +01002349 fixed_double_array_map()->set_elements_kind(FAST_HOLEY_DOUBLE_ELEMENTS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002350 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002351 ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002352 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2353
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002354#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2355 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2356
2357 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2358#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2359
2360 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2361
2362 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2363
2364 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2365 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002366 ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002367 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2368 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2369
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002370 ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002371
2372 for (unsigned i = 0; i < arraysize(struct_table); i++) {
2373 const StructTable& entry = struct_table[i];
2374 Map* map;
2375 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2376 roots_[entry.index] = map;
2377 }
2378
2379 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2380 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2381
2382 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2383 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2384 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
Ben Murdochda12d292016-06-02 14:46:10 +01002385 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002386 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2387 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002388 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
2389 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002390
2391 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2392 native_context_map()->set_dictionary_map(true);
2393 native_context_map()->set_visitor_id(
2394 StaticVisitorBase::kVisitNativeContext);
2395
2396 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2397 shared_function_info)
2398
2399 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2400 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2401 external_map()->set_is_extensible(false);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002402#undef ALLOCATE_PRIMITIVE_MAP
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002403#undef ALLOCATE_VARSIZE_MAP
2404#undef ALLOCATE_MAP
2405 }
2406
Ben Murdochda12d292016-06-02 14:46:10 +01002407 {
2408 AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
2409 if (!allocation.To(&obj)) return false;
2410 }
2411 set_true_value(Oddball::cast(obj));
2412 Oddball::cast(obj)->set_kind(Oddball::kTrue);
2413
2414 {
2415 AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
2416 if (!allocation.To(&obj)) return false;
2417 }
2418 set_false_value(Oddball::cast(obj));
2419 Oddball::cast(obj)->set_kind(Oddball::kFalse);
2420
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002421 { // Empty arrays
2422 {
2423 ByteArray* byte_array;
2424 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2425 set_empty_byte_array(byte_array);
2426 }
2427
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002428#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2429 { \
2430 FixedTypedArrayBase* obj; \
2431 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2432 return false; \
2433 set_empty_fixed_##type##_array(obj); \
2434 }
2435
2436 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2437#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2438 }
2439 DCHECK(!InNewSpace(empty_fixed_array()));
2440 return true;
2441}
2442
2443
2444AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2445 PretenureFlag pretenure) {
2446 // Statically ensure that it is safe to allocate heap numbers in paged
2447 // spaces.
2448 int size = HeapNumber::kSize;
2449 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2450
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002451 AllocationSpace space = SelectSpace(pretenure);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002452
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002453 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002454 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002455 AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002456 if (!allocation.To(&result)) return allocation;
2457 }
2458
2459 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2460 HeapObject::cast(result)->set_map_no_write_barrier(map);
2461 HeapNumber::cast(result)->set_value(value);
2462 return result;
2463}
2464
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002465#define SIMD_ALLOCATE_DEFINITION(TYPE, Type, type, lane_count, lane_type) \
2466 AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count], \
2467 PretenureFlag pretenure) { \
2468 int size = Type::kSize; \
2469 STATIC_ASSERT(Type::kSize <= Page::kMaxRegularHeapObjectSize); \
2470 \
2471 AllocationSpace space = SelectSpace(pretenure); \
2472 \
2473 HeapObject* result = nullptr; \
2474 { \
2475 AllocationResult allocation = \
2476 AllocateRaw(size, space, kSimd128Unaligned); \
2477 if (!allocation.To(&result)) return allocation; \
2478 } \
2479 \
2480 result->set_map_no_write_barrier(type##_map()); \
2481 Type* instance = Type::cast(result); \
2482 for (int i = 0; i < lane_count; i++) { \
2483 instance->set_lane(i, lanes[i]); \
2484 } \
2485 return result; \
2486 }
2487SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)
2488#undef SIMD_ALLOCATE_DEFINITION
2489
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002490
2491AllocationResult Heap::AllocateCell(Object* value) {
2492 int size = Cell::kSize;
2493 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2494
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002495 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002496 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002497 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002498 if (!allocation.To(&result)) return allocation;
2499 }
2500 result->set_map_no_write_barrier(cell_map());
2501 Cell::cast(result)->set_value(value);
2502 return result;
2503}
2504
2505
2506AllocationResult Heap::AllocatePropertyCell() {
2507 int size = PropertyCell::kSize;
2508 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2509
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002510 HeapObject* result = nullptr;
2511 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002512 if (!allocation.To(&result)) return allocation;
2513
2514 result->set_map_no_write_barrier(global_property_cell_map());
2515 PropertyCell* cell = PropertyCell::cast(result);
2516 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2517 SKIP_WRITE_BARRIER);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002518 cell->set_property_details(PropertyDetails(Smi::FromInt(0)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002519 cell->set_value(the_hole_value());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002520 return result;
2521}
2522
2523
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002524AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
2525 int size = WeakCell::kSize;
2526 STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002527 HeapObject* result = nullptr;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002528 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002529 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002530 if (!allocation.To(&result)) return allocation;
2531 }
2532 result->set_map_no_write_barrier(weak_cell_map());
2533 WeakCell::cast(result)->initialize(value);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002534 WeakCell::cast(result)->clear_next(the_hole_value());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002535 return result;
2536}
2537
2538
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002539AllocationResult Heap::AllocateTransitionArray(int capacity) {
2540 DCHECK(capacity > 0);
2541 HeapObject* raw_array = nullptr;
2542 {
2543 AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
2544 if (!allocation.To(&raw_array)) return allocation;
2545 }
2546 raw_array->set_map_no_write_barrier(transition_array_map());
2547 TransitionArray* array = TransitionArray::cast(raw_array);
2548 array->set_length(capacity);
2549 MemsetPointer(array->data_start(), undefined_value(), capacity);
Ben Murdochda12d292016-06-02 14:46:10 +01002550 // Transition arrays are tenured. When black allocation is on we have to
2551 // add the transition array to the list of encountered_transition_arrays.
2552 if (incremental_marking()->black_allocation()) {
2553 array->set_next_link(encountered_transition_arrays(),
2554 UPDATE_WEAK_WRITE_BARRIER);
2555 set_encountered_transition_arrays(array);
2556 } else {
2557 array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
2558 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002559 return array;
2560}
2561
2562
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002563void Heap::CreateApiObjects() {
2564 HandleScope scope(isolate());
2565 Factory* factory = isolate()->factory();
2566 Handle<Map> new_neander_map =
2567 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2568
2569 // Don't use Smi-only elements optimizations for objects with the neander
2570 // map. There are too many cases where element values are set directly with a
2571 // bottleneck to trap the Smi-only -> fast elements transition, and there
2572 // appears to be no benefit for optimize this case.
2573 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2574 set_neander_map(*new_neander_map);
2575
2576 Handle<JSObject> listeners = factory->NewNeanderObject();
2577 Handle<FixedArray> elements = factory->NewFixedArray(2);
2578 elements->set(0, Smi::FromInt(0));
2579 listeners->set_elements(*elements);
2580 set_message_listeners(*listeners);
2581}
2582
2583
2584void Heap::CreateJSEntryStub() {
2585 JSEntryStub stub(isolate(), StackFrame::ENTRY);
2586 set_js_entry_code(*stub.GetCode());
2587}
2588
2589
2590void Heap::CreateJSConstructEntryStub() {
2591 JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2592 set_js_construct_entry_code(*stub.GetCode());
2593}
2594
2595
2596void Heap::CreateFixedStubs() {
2597 // Here we create roots for fixed stubs. They are needed at GC
2598 // for cooking and uncooking (check out frames.cc).
2599 // The eliminates the need for doing dictionary lookup in the
2600 // stub cache for these stubs.
2601 HandleScope scope(isolate());
2602
2603 // Create stubs that should be there, so we don't unexpectedly have to
2604 // create them if we need them during the creation of another stub.
2605 // Stub creation mixes raw pointers and handles in an unsafe manner so
2606 // we cannot create stubs while we are creating stubs.
2607 CodeStub::GenerateStubsAheadOfTime(isolate());
2608
2609 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2610 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2611 // is created.
2612
2613 // gcc-4.4 has problem generating correct code of following snippet:
2614 // { JSEntryStub stub;
2615 // js_entry_code_ = *stub.GetCode();
2616 // }
2617 // { JSConstructEntryStub stub;
2618 // js_construct_entry_code_ = *stub.GetCode();
2619 // }
2620 // To workaround the problem, make separate functions without inlining.
2621 Heap::CreateJSEntryStub();
2622 Heap::CreateJSConstructEntryStub();
2623}
2624
2625
2626void Heap::CreateInitialObjects() {
2627 HandleScope scope(isolate());
2628 Factory* factory = isolate()->factory();
2629
2630 // The -0 value must be set before NewNumber works.
2631 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2632 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2633
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002634 set_nan_value(*factory->NewHeapNumber(
2635 std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002636 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002637 set_minus_infinity_value(
2638 *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002639
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002640 // Allocate initial string table.
2641 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2642
Ben Murdochda12d292016-06-02 14:46:10 +01002643 // Allocate
2644
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002645 // Finish initializing oddballs after creating the string table.
2646 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
Ben Murdochda12d292016-06-02 14:46:10 +01002647 factory->nan_value(), false, "undefined",
2648 Oddball::kUndefined);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002649
2650 // Initialize the null_value.
2651 Oddball::Initialize(isolate(), factory->null_value(), "null",
Ben Murdochda12d292016-06-02 14:46:10 +01002652 handle(Smi::FromInt(0), isolate()), false, "object",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002653 Oddball::kNull);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002654
Ben Murdochc5610432016-08-08 18:44:38 +01002655 // Initialize the_hole_value.
2656 Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
2657 handle(Smi::FromInt(-1), isolate()), false, "undefined",
2658 Oddball::kTheHole);
2659
Ben Murdochda12d292016-06-02 14:46:10 +01002660 // Initialize the true_value.
2661 Oddball::Initialize(isolate(), factory->true_value(), "true",
2662 handle(Smi::FromInt(1), isolate()), true, "boolean",
2663 Oddball::kTrue);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002664
Ben Murdochda12d292016-06-02 14:46:10 +01002665 // Initialize the false_value.
2666 Oddball::Initialize(isolate(), factory->false_value(), "false",
2667 handle(Smi::FromInt(0), isolate()), false, "boolean",
2668 Oddball::kFalse);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002669
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002670 set_uninitialized_value(
2671 *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
Ben Murdochda12d292016-06-02 14:46:10 +01002672 handle(Smi::FromInt(-1), isolate()), false,
2673 "undefined", Oddball::kUninitialized));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002674
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002675 set_arguments_marker(
2676 *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
Ben Murdochda12d292016-06-02 14:46:10 +01002677 handle(Smi::FromInt(-4), isolate()), false,
2678 "undefined", Oddball::kArgumentsMarker));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002679
2680 set_no_interceptor_result_sentinel(*factory->NewOddball(
2681 factory->no_interceptor_result_sentinel_map(),
2682 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
Ben Murdochda12d292016-06-02 14:46:10 +01002683 false, "undefined", Oddball::kOther));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002684
2685 set_termination_exception(*factory->NewOddball(
2686 factory->termination_exception_map(), "termination_exception",
Ben Murdochda12d292016-06-02 14:46:10 +01002687 handle(Smi::FromInt(-3), isolate()), false, "undefined",
2688 Oddball::kOther));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002689
2690 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
Ben Murdochda12d292016-06-02 14:46:10 +01002691 handle(Smi::FromInt(-5), isolate()), false,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002692 "undefined", Oddball::kException));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002693
Ben Murdochda12d292016-06-02 14:46:10 +01002694 set_optimized_out(
2695 *factory->NewOddball(factory->optimized_out_map(), "optimized_out",
2696 handle(Smi::FromInt(-6), isolate()), false,
2697 "undefined", Oddball::kOptimizedOut));
2698
Ben Murdochc5610432016-08-08 18:44:38 +01002699 set_stale_register(
2700 *factory->NewOddball(factory->stale_register_map(), "stale_register",
2701 handle(Smi::FromInt(-7), isolate()), false,
2702 "undefined", Oddball::kStaleRegister));
2703
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002704 for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2705 Handle<String> str =
2706 factory->InternalizeUtf8String(constant_string_table[i].contents);
2707 roots_[constant_string_table[i].index] = *str;
2708 }
2709
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002710 // Create the code_stubs dictionary. The initial size is set to avoid
2711 // expanding the dictionary during bootstrapping.
2712 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2713
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002714 set_instanceof_cache_function(Smi::FromInt(0));
2715 set_instanceof_cache_map(Smi::FromInt(0));
2716 set_instanceof_cache_answer(Smi::FromInt(0));
2717
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002718 {
2719 HandleScope scope(isolate());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002720#define SYMBOL_INIT(name) \
2721 { \
2722 Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
2723 Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol()); \
2724 symbol->set_name(*name##d); \
2725 roots_[k##name##RootIndex] = *symbol; \
2726 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002727 PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
2728#undef SYMBOL_INIT
2729 }
2730
2731 {
2732 HandleScope scope(isolate());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002733#define SYMBOL_INIT(name, description) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002734 Handle<Symbol> name = factory->NewSymbol(); \
2735 Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
2736 name->set_name(*name##d); \
2737 roots_[k##name##RootIndex] = *name;
2738 PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
2739#undef SYMBOL_INIT
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002740
2741#define SYMBOL_INIT(name, description) \
2742 Handle<Symbol> name = factory->NewSymbol(); \
2743 Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
2744 name->set_is_well_known_symbol(true); \
2745 name->set_name(*name##d); \
2746 roots_[k##name##RootIndex] = *name;
2747 WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
2748#undef SYMBOL_INIT
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002749 }
2750
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002751 // Allocate the dictionary of intrinsic function names.
2752 Handle<NameDictionary> intrinsic_names =
2753 NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
2754 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2755 set_intrinsic_function_names(*intrinsic_names);
2756
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002757 Handle<NameDictionary> empty_properties_dictionary =
2758 NameDictionary::New(isolate(), 0, TENURED);
2759 empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
2760 set_empty_properties_dictionary(*empty_properties_dictionary);
2761
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002762 set_number_string_cache(
2763 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2764
2765 // Allocate cache for single character one byte strings.
2766 set_single_character_string_cache(
2767 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2768
2769 // Allocate cache for string split and regexp-multiple.
2770 set_string_split_cache(*factory->NewFixedArray(
2771 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2772 set_regexp_multiple_cache(*factory->NewFixedArray(
2773 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2774
2775 // Allocate cache for external strings pointing to native source code.
2776 set_natives_source_cache(
2777 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2778
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002779 set_experimental_natives_source_cache(
2780 *factory->NewFixedArray(ExperimentalNatives::GetBuiltinsCount()));
2781
2782 set_extra_natives_source_cache(
2783 *factory->NewFixedArray(ExtraNatives::GetBuiltinsCount()));
2784
2785 set_experimental_extra_natives_source_cache(
2786 *factory->NewFixedArray(ExperimentalExtraNatives::GetBuiltinsCount()));
2787
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002788 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2789
2790 // The symbol registry is initialized lazily.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002791 set_symbol_registry(Smi::FromInt(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002792
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002793 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2794 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2795 set_microtask_queue(empty_fixed_array());
2796
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002797 {
2798 StaticFeedbackVectorSpec spec;
2799 FeedbackVectorSlot load_ic_slot = spec.AddLoadICSlot();
2800 FeedbackVectorSlot keyed_load_ic_slot = spec.AddKeyedLoadICSlot();
2801 FeedbackVectorSlot store_ic_slot = spec.AddStoreICSlot();
2802 FeedbackVectorSlot keyed_store_ic_slot = spec.AddKeyedStoreICSlot();
2803
2804 DCHECK_EQ(load_ic_slot,
2805 FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
2806 DCHECK_EQ(keyed_load_ic_slot,
2807 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
2808 DCHECK_EQ(store_ic_slot,
2809 FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
2810 DCHECK_EQ(keyed_store_ic_slot,
2811 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
2812
2813 Handle<TypeFeedbackMetadata> dummy_metadata =
2814 TypeFeedbackMetadata::New(isolate(), &spec);
2815 Handle<TypeFeedbackVector> dummy_vector =
2816 TypeFeedbackVector::New(isolate(), dummy_metadata);
2817
2818 Object* megamorphic = *TypeFeedbackVector::MegamorphicSentinel(isolate());
2819 dummy_vector->Set(load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2820 dummy_vector->Set(keyed_load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2821 dummy_vector->Set(store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2822 dummy_vector->Set(keyed_store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2823
2824 set_dummy_vector(*dummy_vector);
2825 }
2826
2827 {
Ben Murdoch61f157c2016-09-16 13:49:30 +01002828 Handle<FixedArray> empty_literals_array =
2829 factory->NewFixedArray(1, TENURED);
2830 empty_literals_array->set(0, *factory->empty_fixed_array());
2831 set_empty_literals_array(*empty_literals_array);
2832 }
2833
2834 {
2835 Handle<FixedArray> empty_sloppy_arguments_elements =
2836 factory->NewFixedArray(2, TENURED);
2837 empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
2838 set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
2839 }
2840
2841 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002842 Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
2843 set_empty_weak_cell(*cell);
2844 cell->clear();
2845
2846 Handle<FixedArray> cleared_optimized_code_map =
2847 factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
2848 cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
2849 *cell);
2850 STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
2851 SharedFunctionInfo::kSharedCodeIndex == 0);
2852 set_cleared_optimized_code_map(*cleared_optimized_code_map);
2853 }
2854
2855 set_detached_contexts(empty_fixed_array());
2856 set_retained_maps(ArrayList::cast(empty_fixed_array()));
2857
2858 set_weak_object_to_code_table(
2859 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
2860 TENURED));
2861
2862 set_script_list(Smi::FromInt(0));
2863
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002864 Handle<SeededNumberDictionary> slow_element_dictionary =
2865 SeededNumberDictionary::New(isolate(), 0, TENURED);
2866 slow_element_dictionary->set_requires_slow_elements();
2867 set_empty_slow_element_dictionary(*slow_element_dictionary);
2868
2869 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2870
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002871 // Handling of script id generation is in Heap::NextScriptId().
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002872 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
Ben Murdoch61f157c2016-09-16 13:49:30 +01002873 set_next_template_serial_number(Smi::FromInt(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002874
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002875 // Allocate the empty script.
2876 Handle<Script> script = factory->NewScript(factory->empty_string());
2877 script->set_type(Script::TYPE_NATIVE);
2878 set_empty_script(*script);
2879
2880 Handle<PropertyCell> cell = factory->NewPropertyCell();
2881 cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
2882 set_array_protector(*cell);
2883
2884 cell = factory->NewPropertyCell();
2885 cell->set_value(the_hole_value());
2886 set_empty_property_cell(*cell);
2887
Ben Murdochc5610432016-08-08 18:44:38 +01002888 cell = factory->NewPropertyCell();
2889 cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
2890 set_has_instance_protector(*cell);
2891
2892 Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
2893 handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
2894 set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
2895
2896 Handle<Cell> species_cell = factory->NewCell(
2897 handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
Ben Murdoch097c5b22016-05-18 11:27:45 +01002898 set_species_protector(*species_cell);
2899
Ben Murdoch61f157c2016-09-16 13:49:30 +01002900 set_serialized_templates(empty_fixed_array());
2901
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002902 set_weak_stack_trace_list(Smi::FromInt(0));
2903
2904 set_noscript_shared_function_infos(Smi::FromInt(0));
2905
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002906 // Initialize keyed lookup cache.
2907 isolate_->keyed_lookup_cache()->Clear();
2908
2909 // Initialize context slot cache.
2910 isolate_->context_slot_cache()->Clear();
2911
2912 // Initialize descriptor cache.
2913 isolate_->descriptor_lookup_cache()->Clear();
2914
2915 // Initialize compilation cache.
2916 isolate_->compilation_cache()->Clear();
Ben Murdochc5610432016-08-08 18:44:38 +01002917
2918 CreateFixedStubs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002919}
2920
2921
2922bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002923 switch (root_index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002924 case kNumberStringCacheRootIndex:
2925 case kInstanceofCacheFunctionRootIndex:
2926 case kInstanceofCacheMapRootIndex:
2927 case kInstanceofCacheAnswerRootIndex:
2928 case kCodeStubsRootIndex:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002929 case kEmptyScriptRootIndex:
2930 case kSymbolRegistryRootIndex:
2931 case kScriptListRootIndex:
2932 case kMaterializedObjectsRootIndex:
2933 case kMicrotaskQueueRootIndex:
2934 case kDetachedContextsRootIndex:
2935 case kWeakObjectToCodeTableRootIndex:
2936 case kRetainedMapsRootIndex:
2937 case kNoScriptSharedFunctionInfosRootIndex:
2938 case kWeakStackTraceListRootIndex:
Ben Murdoch61f157c2016-09-16 13:49:30 +01002939 case kSerializedTemplatesRootIndex:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002940// Smi values
2941#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
2942 SMI_ROOT_LIST(SMI_ENTRY)
2943#undef SMI_ENTRY
2944 // String table
2945 case kStringTableRootIndex:
2946 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002947
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002948 default:
2949 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002950 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002951}
2952
2953
2954bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2955 return !RootCanBeWrittenAfterInitialization(root_index) &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002956 !InNewSpace(root(root_index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002957}
2958
2959
2960int Heap::FullSizeNumberStringCacheLength() {
2961 // Compute the size of the number string cache based on the max newspace size.
2962 // The number string cache has a minimum size based on twice the initial cache
2963 // size to ensure that it is bigger after being made 'full size'.
2964 int number_string_cache_size = max_semi_space_size_ / 512;
2965 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
2966 Min(0x4000, number_string_cache_size));
2967 // There is a string and a number per entry so the length is twice the number
2968 // of entries.
2969 return number_string_cache_size * 2;
2970}
2971
2972
2973void Heap::FlushNumberStringCache() {
2974 // Flush the number to string cache.
2975 int len = number_string_cache()->length();
2976 for (int i = 0; i < len; i++) {
2977 number_string_cache()->set_undefined(i);
2978 }
2979}
2980
2981
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002982Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
2983 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
2984}
2985
2986
2987Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
2988 ExternalArrayType array_type) {
2989 switch (array_type) {
2990#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2991 case kExternal##Type##Array: \
2992 return kFixed##Type##ArrayMapRootIndex;
2993
2994 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
2995#undef ARRAY_TYPE_TO_ROOT_INDEX
2996
2997 default:
2998 UNREACHABLE();
2999 return kUndefinedValueRootIndex;
3000 }
3001}
3002
3003
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003004Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3005 ElementsKind elementsKind) {
3006 switch (elementsKind) {
3007#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3008 case TYPE##_ELEMENTS: \
3009 return kEmptyFixed##Type##ArrayRootIndex;
3010
3011 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3012#undef ELEMENT_KIND_TO_ROOT_INDEX
3013 default:
3014 UNREACHABLE();
3015 return kUndefinedValueRootIndex;
3016 }
3017}
3018
3019
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003020FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3021 return FixedTypedArrayBase::cast(
3022 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3023}
3024
3025
3026AllocationResult Heap::AllocateForeign(Address address,
3027 PretenureFlag pretenure) {
3028 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3029 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003030 AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
3031 Foreign* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003032 AllocationResult allocation = Allocate(foreign_map(), space);
3033 if (!allocation.To(&result)) return allocation;
3034 result->set_foreign_address(address);
3035 return result;
3036}
3037
3038
3039AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3040 if (length < 0 || length > ByteArray::kMaxLength) {
3041 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3042 }
3043 int size = ByteArray::SizeFor(length);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003044 AllocationSpace space = SelectSpace(pretenure);
3045 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003046 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003047 AllocationResult allocation = AllocateRaw(size, space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003048 if (!allocation.To(&result)) return allocation;
3049 }
3050
3051 result->set_map_no_write_barrier(byte_array_map());
3052 ByteArray::cast(result)->set_length(length);
3053 return result;
3054}
3055
3056
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003057AllocationResult Heap::AllocateBytecodeArray(int length,
3058 const byte* const raw_bytecodes,
3059 int frame_size,
3060 int parameter_count,
3061 FixedArray* constant_pool) {
3062 if (length < 0 || length > BytecodeArray::kMaxLength) {
3063 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3064 }
3065 // Bytecode array is pretenured, so constant pool array should be to.
3066 DCHECK(!InNewSpace(constant_pool));
3067
3068 int size = BytecodeArray::SizeFor(length);
3069 HeapObject* result = nullptr;
3070 {
3071 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3072 if (!allocation.To(&result)) return allocation;
3073 }
3074
3075 result->set_map_no_write_barrier(bytecode_array_map());
3076 BytecodeArray* instance = BytecodeArray::cast(result);
3077 instance->set_length(length);
3078 instance->set_frame_size(frame_size);
3079 instance->set_parameter_count(parameter_count);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003080 instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003081 instance->set_constant_pool(constant_pool);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003082 instance->set_handler_table(empty_fixed_array());
Ben Murdochda12d292016-06-02 14:46:10 +01003083 instance->set_source_position_table(empty_byte_array());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003084 CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
3085
3086 return result;
3087}
3088
Ben Murdochda12d292016-06-02 14:46:10 +01003089void Heap::CreateFillerObjectAt(Address addr, int size,
3090 ClearRecordedSlots mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003091 if (size == 0) return;
3092 HeapObject* filler = HeapObject::FromAddress(addr);
3093 if (size == kPointerSize) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003094 filler->set_map_no_write_barrier(
3095 reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003096 } else if (size == 2 * kPointerSize) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003097 filler->set_map_no_write_barrier(
3098 reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003099 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003100 DCHECK_GT(size, 2 * kPointerSize);
3101 filler->set_map_no_write_barrier(
3102 reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
3103 FreeSpace::cast(filler)->nobarrier_set_size(size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003104 }
Ben Murdochda12d292016-06-02 14:46:10 +01003105 if (mode == ClearRecordedSlots::kYes) {
3106 ClearRecordedSlotRange(addr, addr + size);
3107 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003108 // At this point, we may be deserializing the heap from a snapshot, and
3109 // none of the maps have been created yet and are NULL.
3110 DCHECK((filler->map() == NULL && !deserialization_complete_) ||
3111 filler->map()->IsMap());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003112}
3113
3114
3115bool Heap::CanMoveObjectStart(HeapObject* object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003116 if (!FLAG_move_object_start) return false;
3117
Ben Murdoch097c5b22016-05-18 11:27:45 +01003118 // Sampling heap profiler may have a reference to the object.
3119 if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
3120
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003121 Address address = object->address();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003122
3123 if (lo_space()->Contains(object)) return false;
3124
Ben Murdoch61f157c2016-09-16 13:49:30 +01003125 // We can move the object start if the page was already swept.
3126 return Page::FromAddress(address)->SweepingDone();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003127}
3128
3129
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003130void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
3131 // As long as the inspected object is black and we are currently not iterating
3132 // the heap using HeapIterator, we can update the live byte count. We cannot
3133 // update while using HeapIterator because the iterator is temporarily
3134 // marking the whole object graph, without updating live bytes.
Ben Murdochda12d292016-06-02 14:46:10 +01003135 if (lo_space()->Contains(object)) {
3136 lo_space()->AdjustLiveBytes(by);
3137 } else if (!in_heap_iterator() &&
3138 !mark_compact_collector()->sweeping_in_progress() &&
3139 Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003140 if (mode == SEQUENTIAL_TO_SWEEPER) {
3141 MemoryChunk::IncrementLiveBytesFromGC(object, by);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003142 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003143 MemoryChunk::IncrementLiveBytesFromMutator(object, by);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003144 }
3145 }
3146}
3147
3148
3149FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3150 int elements_to_trim) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003151 CHECK_NOT_NULL(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003152 DCHECK(!object->IsFixedTypedArrayBase());
3153 DCHECK(!object->IsByteArray());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003154 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3155 const int bytes_to_trim = elements_to_trim * element_size;
3156 Map* map = object->map();
3157
3158 // For now this trick is only applied to objects in new and paged space.
3159 // In large object space the object's start must coincide with chunk
3160 // and thus the trick is just not applicable.
3161 DCHECK(!lo_space()->Contains(object));
3162 DCHECK(object->map() != fixed_cow_array_map());
3163
3164 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3165 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3166 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3167
3168 const int len = object->length();
3169 DCHECK(elements_to_trim <= len);
3170
3171 // Calculate location of new array start.
3172 Address new_start = object->address() + bytes_to_trim;
3173
3174 // Technically in new space this write might be omitted (except for
3175 // debug mode which iterates through the heap), but to play safer
3176 // we still do it.
Ben Murdochda12d292016-06-02 14:46:10 +01003177 CreateFillerObjectAt(object->address(), bytes_to_trim,
3178 ClearRecordedSlots::kYes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003179 // Initialize header of the trimmed array. Since left trimming is only
3180 // performed on pages which are not concurrently swept creating a filler
3181 // object does not require synchronization.
3182 DCHECK(CanMoveObjectStart(object));
3183 Object** former_start = HeapObject::RawField(object, 0);
3184 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3185 former_start[new_start_index] = map;
3186 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3187 FixedArrayBase* new_object =
3188 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3189
Ben Murdochc5610432016-08-08 18:44:38 +01003190 // Remove recorded slots for the new map and length offset.
3191 ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
3192 ClearRecordedSlot(new_object, HeapObject::RawField(
3193 new_object, FixedArrayBase::kLengthOffset));
3194
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003195 // Maintain consistency of live bytes during incremental marking
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003196 Marking::TransferMark(this, object->address(), new_start);
3197 AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003198
3199 // Notify the heap profiler of change in object layout.
3200 OnMoveEvent(new_object, object, new_object->Size());
3201 return new_object;
3202}
3203
3204
3205// Force instantiation of templatized method.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003206template void Heap::RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
3207 FixedArrayBase*, int);
3208template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
3209 FixedArrayBase*, int);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003210
3211
3212template<Heap::InvocationMode mode>
3213void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003214 const int len = object->length();
3215 DCHECK_LE(elements_to_trim, len);
3216 DCHECK_GE(elements_to_trim, 0);
3217
3218 int bytes_to_trim;
3219 if (object->IsFixedTypedArrayBase()) {
3220 InstanceType type = object->map()->instance_type();
3221 bytes_to_trim =
3222 FixedTypedArrayBase::TypedArraySize(type, len) -
3223 FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
3224 } else if (object->IsByteArray()) {
3225 int new_size = ByteArray::SizeFor(len - elements_to_trim);
3226 bytes_to_trim = ByteArray::SizeFor(len) - new_size;
3227 DCHECK_GE(bytes_to_trim, 0);
3228 } else {
3229 const int element_size =
3230 object->IsFixedArray() ? kPointerSize : kDoubleSize;
3231 bytes_to_trim = elements_to_trim * element_size;
3232 }
3233
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003234
3235 // For now this trick is only applied to objects in new and paged space.
3236 DCHECK(object->map() != fixed_cow_array_map());
3237
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003238 if (bytes_to_trim == 0) {
3239 // No need to create filler and update live bytes counters, just initialize
3240 // header of the trimmed array.
3241 object->synchronized_set_length(len - elements_to_trim);
3242 return;
3243 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003244
3245 // Calculate location of new array end.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003246 Address old_end = object->address() + object->Size();
3247 Address new_end = old_end - bytes_to_trim;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003248
3249 // Technically in new space this write might be omitted (except for
3250 // debug mode which iterates through the heap), but to play safer
3251 // we still do it.
3252 // We do not create a filler for objects in large object space.
3253 // TODO(hpayer): We should shrink the large object page if the size
3254 // of the object changed significantly.
3255 if (!lo_space()->Contains(object)) {
Ben Murdochda12d292016-06-02 14:46:10 +01003256 CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003257 }
3258
3259 // Initialize header of the trimmed array. We are storing the new length
3260 // using release store after creating a filler for the left-over space to
3261 // avoid races with the sweeper thread.
3262 object->synchronized_set_length(len - elements_to_trim);
3263
3264 // Maintain consistency of live bytes during incremental marking
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003265 AdjustLiveBytes(object, -bytes_to_trim, mode);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003266
3267 // Notify the heap profiler of change in object layout. The array may not be
3268 // moved during GC, and size has to be adjusted nevertheless.
3269 HeapProfiler* profiler = isolate()->heap_profiler();
3270 if (profiler->is_tracking_allocations()) {
3271 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3272 }
3273}
3274
3275
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003276AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
3277 int length, ExternalArrayType array_type, void* external_pointer,
3278 PretenureFlag pretenure) {
3279 int size = FixedTypedArrayBase::kHeaderSize;
3280 AllocationSpace space = SelectSpace(pretenure);
3281 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003282 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003283 AllocationResult allocation = AllocateRaw(size, space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003284 if (!allocation.To(&result)) return allocation;
3285 }
3286
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003287 result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
3288 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
3289 elements->set_base_pointer(Smi::FromInt(0), SKIP_WRITE_BARRIER);
3290 elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
3291 elements->set_length(length);
3292 return elements;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003293}
3294
3295static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3296 ElementsKind* element_kind) {
3297 switch (array_type) {
3298#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3299 case kExternal##Type##Array: \
3300 *element_size = size; \
3301 *element_kind = TYPE##_ELEMENTS; \
3302 return;
3303
3304 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3305#undef TYPED_ARRAY_CASE
3306
3307 default:
3308 *element_size = 0; // Bogus
3309 *element_kind = UINT8_ELEMENTS; // Bogus
3310 UNREACHABLE();
3311 }
3312}
3313
3314
3315AllocationResult Heap::AllocateFixedTypedArray(int length,
3316 ExternalArrayType array_type,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003317 bool initialize,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003318 PretenureFlag pretenure) {
3319 int element_size;
3320 ElementsKind elements_kind;
3321 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3322 int size = OBJECT_POINTER_ALIGN(length * element_size +
3323 FixedTypedArrayBase::kDataOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003324 AllocationSpace space = SelectSpace(pretenure);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003325
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003326 HeapObject* object = nullptr;
3327 AllocationResult allocation = AllocateRaw(
3328 size, space,
3329 array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003330 if (!allocation.To(&object)) return allocation;
3331
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003332 object->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003333 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003334 elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
3335 elements->set_external_pointer(
3336 ExternalReference::fixed_typed_array_base_data_offset().address(),
3337 SKIP_WRITE_BARRIER);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003338 elements->set_length(length);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003339 if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003340 return elements;
3341}
3342
3343
3344AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3345 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003346 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003347
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003348 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003349 if (!allocation.To(&result)) return allocation;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003350 if (immovable) {
3351 Address address = result->address();
3352 // Code objects which should stay at a fixed address are allocated either
3353 // in the first page of code space (objects on the first page of each space
3354 // are never moved) or in large object space.
3355 if (!code_space_->FirstPage()->Contains(address) &&
3356 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3357 // Discard the first code allocation, which was on a page where it could
3358 // be moved.
Ben Murdochda12d292016-06-02 14:46:10 +01003359 CreateFillerObjectAt(result->address(), object_size,
3360 ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003361 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3362 if (!allocation.To(&result)) return allocation;
3363 OnAllocationEvent(result, object_size);
3364 }
3365 }
3366
3367 result->set_map_no_write_barrier(code_map());
3368 Code* code = Code::cast(result);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003369 DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
Ben Murdoch61f157c2016-09-16 13:49:30 +01003370 DCHECK(!memory_allocator()->code_range()->valid() ||
Ben Murdochc5610432016-08-08 18:44:38 +01003371 memory_allocator()->code_range()->contains(code->address()) ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003372 object_size <= code_space()->AreaSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003373 code->set_gc_metadata(Smi::FromInt(0));
3374 code->set_ic_age(global_ic_age_);
3375 return code;
3376}
3377
3378
3379AllocationResult Heap::CopyCode(Code* code) {
3380 AllocationResult allocation;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003381
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003382 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003383 // Allocate an object the same size as the code object.
3384 int obj_size = code->Size();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003385 allocation = AllocateRaw(obj_size, CODE_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003386 if (!allocation.To(&result)) return allocation;
3387
3388 // Copy code object.
3389 Address old_addr = code->address();
3390 Address new_addr = result->address();
3391 CopyBlock(new_addr, old_addr, obj_size);
3392 Code* new_code = Code::cast(result);
3393
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003394 // Relocate the copy.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003395 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
Ben Murdoch61f157c2016-09-16 13:49:30 +01003396 DCHECK(!memory_allocator()->code_range()->valid() ||
Ben Murdochc5610432016-08-08 18:44:38 +01003397 memory_allocator()->code_range()->contains(code->address()) ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003398 obj_size <= code_space()->AreaSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003399 new_code->Relocate(new_addr - old_addr);
Ben Murdochda12d292016-06-02 14:46:10 +01003400 // We have to iterate over the object and process its pointers when black
3401 // allocation is on.
3402 incremental_marking()->IterateBlackObject(new_code);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003403 return new_code;
3404}
3405
Ben Murdoch097c5b22016-05-18 11:27:45 +01003406AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
3407 int size = BytecodeArray::SizeFor(bytecode_array->length());
3408 HeapObject* result = nullptr;
3409 {
3410 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3411 if (!allocation.To(&result)) return allocation;
3412 }
3413
3414 result->set_map_no_write_barrier(bytecode_array_map());
3415 BytecodeArray* copy = BytecodeArray::cast(result);
3416 copy->set_length(bytecode_array->length());
3417 copy->set_frame_size(bytecode_array->frame_size());
3418 copy->set_parameter_count(bytecode_array->parameter_count());
3419 copy->set_constant_pool(bytecode_array->constant_pool());
3420 copy->set_handler_table(bytecode_array->handler_table());
3421 copy->set_source_position_table(bytecode_array->source_position_table());
Ben Murdochda12d292016-06-02 14:46:10 +01003422 copy->set_interrupt_budget(bytecode_array->interrupt_budget());
Ben Murdoch097c5b22016-05-18 11:27:45 +01003423 bytecode_array->CopyBytecodesTo(copy);
3424 return copy;
3425}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003426
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003427void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3428 AllocationSite* allocation_site) {
3429 memento->set_map_no_write_barrier(allocation_memento_map());
3430 DCHECK(allocation_site->map() == allocation_site_map());
3431 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3432 if (FLAG_allocation_site_pretenuring) {
3433 allocation_site->IncrementMementoCreateCount();
3434 }
3435}
3436
3437
3438AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3439 AllocationSite* allocation_site) {
3440 DCHECK(gc_state_ == NOT_IN_GC);
3441 DCHECK(map->instance_type() != MAP_TYPE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003442 int size = map->instance_size();
3443 if (allocation_site != NULL) {
3444 size += AllocationMemento::kSize;
3445 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003446 HeapObject* result = nullptr;
3447 AllocationResult allocation = AllocateRaw(size, space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003448 if (!allocation.To(&result)) return allocation;
3449 // No need for write barrier since object is white and map is in old space.
3450 result->set_map_no_write_barrier(map);
3451 if (allocation_site != NULL) {
3452 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3453 reinterpret_cast<Address>(result) + map->instance_size());
3454 InitializeAllocationMemento(alloc_memento, allocation_site);
3455 }
3456 return result;
3457}
3458
3459
3460void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3461 Map* map) {
3462 obj->set_properties(properties);
3463 obj->initialize_elements();
3464 // TODO(1240798): Initialize the object's body using valid initial values
3465 // according to the object's initial map. For example, if the map's
3466 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3467 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3468 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3469 // verification code has to cope with (temporarily) invalid objects. See
3470 // for example, JSArray::JSArrayVerify).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003471 InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
3472}
3473
3474
3475void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
3476 if (start_offset == map->instance_size()) return;
3477 DCHECK_LT(start_offset, map->instance_size());
3478
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003479 // We cannot always fill with one_pointer_filler_map because objects
3480 // created from API functions expect their internal fields to be initialized
3481 // with undefined_value.
3482 // Pre-allocated fields need to be initialized with undefined_value as well
3483 // so that object accesses before the constructor completes (e.g. in the
3484 // debugger) will not cause a crash.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003485
3486 // In case of Array subclassing the |map| could already be transitioned
3487 // to different elements kind from the initial map on which we track slack.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003488 bool in_progress = map->IsInobjectSlackTrackingInProgress();
3489 Object* filler;
3490 if (in_progress) {
3491 filler = one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003492 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003493 filler = undefined_value();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003494 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003495 obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003496 if (in_progress) {
3497 map->FindRootMap()->InobjectSlackTrackingStep();
3498 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003499}
3500
3501
3502AllocationResult Heap::AllocateJSObjectFromMap(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003503 Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003504 // JSFunctions should be allocated using AllocateFunction to be
3505 // properly initialized.
3506 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3507
3508 // Both types of global objects should be allocated using
3509 // AllocateGlobalObject to be properly initialized.
3510 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003511
3512 // Allocate the backing storage for the properties.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003513 FixedArray* properties = empty_fixed_array();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003514
3515 // Allocate the JSObject.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003516 AllocationSpace space = SelectSpace(pretenure);
3517 JSObject* js_obj = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003518 AllocationResult allocation = Allocate(map, space, allocation_site);
3519 if (!allocation.To(&js_obj)) return allocation;
3520
3521 // Initialize the JSObject.
3522 InitializeJSObjectFromMap(js_obj, properties, map);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003523 DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
Ben Murdoch61f157c2016-09-16 13:49:30 +01003524 js_obj->HasFastStringWrapperElements() ||
3525 js_obj->HasFastArgumentsElements());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003526 return js_obj;
3527}
3528
3529
3530AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3531 PretenureFlag pretenure,
3532 AllocationSite* allocation_site) {
3533 DCHECK(constructor->has_initial_map());
3534
3535 // Allocate the object based on the constructors initial map.
3536 AllocationResult allocation = AllocateJSObjectFromMap(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003537 constructor->initial_map(), pretenure, allocation_site);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003538#ifdef DEBUG
3539 // Make sure result is NOT a global object if valid.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003540 HeapObject* obj = nullptr;
3541 DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003542#endif
3543 return allocation;
3544}
3545
3546
3547AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003548 // Make the clone.
3549 Map* map = source->map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003550
Ben Murdoch61f157c2016-09-16 13:49:30 +01003551 // We can only clone regexps, normal objects, api objects, errors or arrays.
3552 // Copying anything else will break invariants.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003553 CHECK(map->instance_type() == JS_REGEXP_TYPE ||
3554 map->instance_type() == JS_OBJECT_TYPE ||
Ben Murdoch61f157c2016-09-16 13:49:30 +01003555 map->instance_type() == JS_ERROR_TYPE ||
Ben Murdochda12d292016-06-02 14:46:10 +01003556 map->instance_type() == JS_ARRAY_TYPE ||
Ben Murdochc5610432016-08-08 18:44:38 +01003557 map->instance_type() == JS_API_OBJECT_TYPE ||
Ben Murdochda12d292016-06-02 14:46:10 +01003558 map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003559
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003560 int object_size = map->instance_size();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003561 HeapObject* clone = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003562
3563 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3564
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003565 int adjusted_object_size =
3566 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3567 AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
3568 if (!allocation.To(&clone)) return allocation;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003569
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003570 SLOW_DCHECK(InNewSpace(clone));
3571 // Since we know the clone is allocated in new space, we can copy
3572 // the contents without worrying about updating the write barrier.
3573 CopyBlock(clone->address(), source->address(), object_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003574
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003575 if (site != NULL) {
3576 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3577 reinterpret_cast<Address>(clone) + object_size);
3578 InitializeAllocationMemento(alloc_memento, site);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003579 }
3580
3581 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3582 source->GetElementsKind());
3583 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3584 FixedArray* properties = FixedArray::cast(source->properties());
3585 // Update elements if necessary.
3586 if (elements->length() > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003587 FixedArrayBase* elem = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003588 {
3589 AllocationResult allocation;
3590 if (elements->map() == fixed_cow_array_map()) {
3591 allocation = FixedArray::cast(elements);
3592 } else if (source->HasFastDoubleElements()) {
3593 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3594 } else {
3595 allocation = CopyFixedArray(FixedArray::cast(elements));
3596 }
3597 if (!allocation.To(&elem)) return allocation;
3598 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003599 JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003600 }
3601 // Update properties if necessary.
3602 if (properties->length() > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003603 FixedArray* prop = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003604 {
3605 AllocationResult allocation = CopyFixedArray(properties);
3606 if (!allocation.To(&prop)) return allocation;
3607 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003608 JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003609 }
3610 // Return the new clone.
3611 return clone;
3612}
3613
3614
3615static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3616 int len) {
3617 // Only works for one byte strings.
3618 DCHECK(vector.length() == len);
3619 MemCopy(chars, vector.start(), len);
3620}
3621
3622static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3623 int len) {
3624 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003625 size_t stream_length = vector.length();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003626 while (stream_length != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003627 size_t consumed = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003628 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3629 DCHECK(c != unibrow::Utf8::kBadChar);
3630 DCHECK(consumed <= stream_length);
3631 stream_length -= consumed;
3632 stream += consumed;
3633 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3634 len -= 2;
3635 if (len < 0) break;
3636 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3637 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3638 } else {
3639 len -= 1;
3640 if (len < 0) break;
3641 *chars++ = c;
3642 }
3643 }
3644 DCHECK(stream_length == 0);
3645 DCHECK(len == 0);
3646}
3647
3648
3649static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3650 DCHECK(s->length() == len);
3651 String::WriteToFlat(s, chars, 0, len);
3652}
3653
3654
3655static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3656 DCHECK(s->length() == len);
3657 String::WriteToFlat(s, chars, 0, len);
3658}
3659
3660
3661template <bool is_one_byte, typename T>
3662AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3663 uint32_t hash_field) {
3664 DCHECK(chars >= 0);
3665 // Compute map and object size.
3666 int size;
3667 Map* map;
3668
3669 DCHECK_LE(0, chars);
3670 DCHECK_GE(String::kMaxLength, chars);
3671 if (is_one_byte) {
3672 map = one_byte_internalized_string_map();
3673 size = SeqOneByteString::SizeFor(chars);
3674 } else {
3675 map = internalized_string_map();
3676 size = SeqTwoByteString::SizeFor(chars);
3677 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003678
3679 // Allocate string.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003680 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003681 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003682 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003683 if (!allocation.To(&result)) return allocation;
3684 }
3685
3686 result->set_map_no_write_barrier(map);
3687 // Set length and hash fields of the allocated string.
3688 String* answer = String::cast(result);
3689 answer->set_length(chars);
3690 answer->set_hash_field(hash_field);
3691
3692 DCHECK_EQ(size, answer->Size());
3693
3694 if (is_one_byte) {
3695 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3696 } else {
3697 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3698 }
3699 return answer;
3700}
3701
3702
3703// Need explicit instantiations.
3704template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3705 int,
3706 uint32_t);
3707template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3708 int,
3709 uint32_t);
3710template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3711 Vector<const char>, int, uint32_t);
3712
3713
3714AllocationResult Heap::AllocateRawOneByteString(int length,
3715 PretenureFlag pretenure) {
3716 DCHECK_LE(0, length);
3717 DCHECK_GE(String::kMaxLength, length);
3718 int size = SeqOneByteString::SizeFor(length);
3719 DCHECK(size <= SeqOneByteString::kMaxSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003720 AllocationSpace space = SelectSpace(pretenure);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003721
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003722 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003723 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003724 AllocationResult allocation = AllocateRaw(size, space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003725 if (!allocation.To(&result)) return allocation;
3726 }
3727
3728 // Partially initialize the object.
3729 result->set_map_no_write_barrier(one_byte_string_map());
3730 String::cast(result)->set_length(length);
3731 String::cast(result)->set_hash_field(String::kEmptyHashField);
3732 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3733
3734 return result;
3735}
3736
3737
3738AllocationResult Heap::AllocateRawTwoByteString(int length,
3739 PretenureFlag pretenure) {
3740 DCHECK_LE(0, length);
3741 DCHECK_GE(String::kMaxLength, length);
3742 int size = SeqTwoByteString::SizeFor(length);
3743 DCHECK(size <= SeqTwoByteString::kMaxSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003744 AllocationSpace space = SelectSpace(pretenure);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003745
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003746 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003747 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003748 AllocationResult allocation = AllocateRaw(size, space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003749 if (!allocation.To(&result)) return allocation;
3750 }
3751
3752 // Partially initialize the object.
3753 result->set_map_no_write_barrier(string_map());
3754 String::cast(result)->set_length(length);
3755 String::cast(result)->set_hash_field(String::kEmptyHashField);
3756 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3757 return result;
3758}
3759
3760
3761AllocationResult Heap::AllocateEmptyFixedArray() {
3762 int size = FixedArray::SizeFor(0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003763 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003764 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003765 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003766 if (!allocation.To(&result)) return allocation;
3767 }
3768 // Initialize the object.
3769 result->set_map_no_write_barrier(fixed_array_map());
3770 FixedArray::cast(result)->set_length(0);
3771 return result;
3772}
3773
3774
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003775AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3776 if (!InNewSpace(src)) {
3777 return src;
3778 }
3779
3780 int len = src->length();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003781 HeapObject* obj = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003782 {
3783 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3784 if (!allocation.To(&obj)) return allocation;
3785 }
3786 obj->set_map_no_write_barrier(fixed_array_map());
3787 FixedArray* result = FixedArray::cast(obj);
3788 result->set_length(len);
3789
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003790 // Copy the content.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003791 DisallowHeapAllocation no_gc;
3792 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3793 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3794
3795 // TODO(mvstanton): The map is set twice because of protection against calling
3796 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3797 // we might then be able to remove this whole method.
3798 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3799 return result;
3800}
3801
3802
3803AllocationResult Heap::AllocateEmptyFixedTypedArray(
3804 ExternalArrayType array_type) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003805 return AllocateFixedTypedArray(0, array_type, false, TENURED);
3806}
3807
3808
3809AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
3810 PretenureFlag pretenure) {
3811 int old_len = src->length();
3812 int new_len = old_len + grow_by;
3813 DCHECK(new_len >= old_len);
3814 HeapObject* obj = nullptr;
3815 {
3816 AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3817 if (!allocation.To(&obj)) return allocation;
3818 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003819
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003820 obj->set_map_no_write_barrier(fixed_array_map());
3821 FixedArray* result = FixedArray::cast(obj);
3822 result->set_length(new_len);
3823
3824 // Copy the content.
3825 DisallowHeapAllocation no_gc;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003826 WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003827 for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
3828 MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
3829 return result;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003830}
3831
Ben Murdoch097c5b22016-05-18 11:27:45 +01003832AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
3833 PretenureFlag pretenure) {
3834 if (new_len == 0) return empty_fixed_array();
3835
3836 DCHECK_LE(new_len, src->length());
3837
3838 HeapObject* obj = nullptr;
3839 {
3840 AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3841 if (!allocation.To(&obj)) return allocation;
3842 }
3843 obj->set_map_no_write_barrier(fixed_array_map());
3844
3845 FixedArray* result = FixedArray::cast(obj);
3846 result->set_length(new_len);
3847
3848 // Copy the content.
3849 DisallowHeapAllocation no_gc;
3850 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3851 for (int i = 0; i < new_len; i++) result->set(i, src->get(i), mode);
3852 return result;
3853}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003854
3855AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3856 int len = src->length();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003857 HeapObject* obj = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003858 {
3859 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
3860 if (!allocation.To(&obj)) return allocation;
3861 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003862 obj->set_map_no_write_barrier(map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003863 if (InNewSpace(obj)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003864 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
3865 FixedArray::SizeFor(len) - kPointerSize);
3866 return obj;
3867 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003868 FixedArray* result = FixedArray::cast(obj);
3869 result->set_length(len);
3870
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003871 // Copy the content.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003872 DisallowHeapAllocation no_gc;
3873 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3874 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3875 return result;
3876}
3877
3878
3879AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3880 Map* map) {
3881 int len = src->length();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003882 HeapObject* obj = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003883 {
3884 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3885 if (!allocation.To(&obj)) return allocation;
3886 }
3887 obj->set_map_no_write_barrier(map);
3888 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
3889 src->address() + FixedDoubleArray::kLengthOffset,
3890 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
3891 return obj;
3892}
3893
3894
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003895AllocationResult Heap::AllocateRawFixedArray(int length,
3896 PretenureFlag pretenure) {
3897 if (length < 0 || length > FixedArray::kMaxLength) {
3898 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3899 }
3900 int size = FixedArray::SizeFor(length);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003901 AllocationSpace space = SelectSpace(pretenure);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003902
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003903 return AllocateRaw(size, space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003904}
3905
3906
3907AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
3908 PretenureFlag pretenure,
3909 Object* filler) {
3910 DCHECK(length >= 0);
3911 DCHECK(empty_fixed_array()->IsFixedArray());
3912 if (length == 0) return empty_fixed_array();
3913
3914 DCHECK(!InNewSpace(filler));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003915 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003916 {
3917 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
3918 if (!allocation.To(&result)) return allocation;
3919 }
3920
3921 result->set_map_no_write_barrier(fixed_array_map());
3922 FixedArray* array = FixedArray::cast(result);
3923 array->set_length(length);
3924 MemsetPointer(array->data_start(), filler, length);
3925 return array;
3926}
3927
3928
3929AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
3930 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3931}
3932
3933
3934AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
3935 if (length == 0) return empty_fixed_array();
3936
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003937 HeapObject* obj = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003938 {
3939 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
3940 if (!allocation.To(&obj)) return allocation;
3941 }
3942
3943 obj->set_map_no_write_barrier(fixed_array_map());
3944 FixedArray::cast(obj)->set_length(length);
3945 return obj;
3946}
3947
3948
3949AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
3950 int length, PretenureFlag pretenure) {
3951 if (length == 0) return empty_fixed_array();
3952
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003953 HeapObject* elements = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003954 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
3955 if (!allocation.To(&elements)) return allocation;
3956
3957 elements->set_map_no_write_barrier(fixed_double_array_map());
3958 FixedDoubleArray::cast(elements)->set_length(length);
3959 return elements;
3960}
3961
3962
3963AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
3964 PretenureFlag pretenure) {
3965 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
Ben Murdochc5610432016-08-08 18:44:38 +01003966 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003967 }
3968 int size = FixedDoubleArray::SizeFor(length);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003969 AllocationSpace space = SelectSpace(pretenure);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003970
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003971 HeapObject* object = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003972 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003973 AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003974 if (!allocation.To(&object)) return allocation;
3975 }
3976
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003977 return object;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003978}
3979
3980
3981AllocationResult Heap::AllocateSymbol() {
3982 // Statically ensure that it is safe to allocate symbols in paged spaces.
3983 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
3984
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003985 HeapObject* result = nullptr;
3986 AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003987 if (!allocation.To(&result)) return allocation;
3988
3989 result->set_map_no_write_barrier(symbol_map());
3990
3991 // Generate a random hash value.
3992 int hash;
3993 int attempts = 0;
3994 do {
3995 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
3996 attempts++;
3997 } while (hash == 0 && attempts < 30);
3998 if (hash == 0) hash = 1; // never return 0
3999
4000 Symbol::cast(result)
4001 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4002 Symbol::cast(result)->set_name(undefined_value());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004003 Symbol::cast(result)->set_flags(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004004
4005 DCHECK(!Symbol::cast(result)->is_private());
4006 return result;
4007}
4008
4009
4010AllocationResult Heap::AllocateStruct(InstanceType type) {
4011 Map* map;
4012 switch (type) {
4013#define MAKE_CASE(NAME, Name, name) \
4014 case NAME##_TYPE: \
4015 map = name##_map(); \
4016 break;
4017 STRUCT_LIST(MAKE_CASE)
4018#undef MAKE_CASE
4019 default:
4020 UNREACHABLE();
4021 return exception();
4022 }
4023 int size = map->instance_size();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004024 Struct* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004025 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004026 AllocationResult allocation = Allocate(map, OLD_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004027 if (!allocation.To(&result)) return allocation;
4028 }
4029 result->InitializeBody(size);
4030 return result;
4031}
4032
4033
4034bool Heap::IsHeapIterable() {
4035 // TODO(hpayer): This function is not correct. Allocation folding in old
4036 // space breaks the iterability.
4037 return new_space_top_after_last_gc_ == new_space()->top();
4038}
4039
4040
4041void Heap::MakeHeapIterable() {
4042 DCHECK(AllowHeapAllocation::IsAllowed());
4043 if (!IsHeapIterable()) {
4044 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4045 }
4046 if (mark_compact_collector()->sweeping_in_progress()) {
4047 mark_compact_collector()->EnsureSweepingCompleted();
4048 }
4049 DCHECK(IsHeapIterable());
4050}
4051
4052
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004053static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
4054 const double kMinMutatorUtilization = 0.0;
4055 const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
4056 if (mutator_speed == 0) return kMinMutatorUtilization;
4057 if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
4058 // Derivation:
4059 // mutator_utilization = mutator_time / (mutator_time + gc_time)
4060 // mutator_time = 1 / mutator_speed
4061 // gc_time = 1 / gc_speed
4062 // mutator_utilization = (1 / mutator_speed) /
4063 // (1 / mutator_speed + 1 / gc_speed)
4064 // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
4065 return gc_speed / (mutator_speed + gc_speed);
4066}
4067
4068
4069double Heap::YoungGenerationMutatorUtilization() {
4070 double mutator_speed = static_cast<double>(
4071 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
Ben Murdochda12d292016-06-02 14:46:10 +01004072 double gc_speed =
4073 tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004074 double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4075 if (FLAG_trace_mutator_utilization) {
4076 PrintIsolate(isolate(),
4077 "Young generation mutator utilization = %.3f ("
4078 "mutator_speed=%.f, gc_speed=%.f)\n",
4079 result, mutator_speed, gc_speed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004080 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004081 return result;
4082}
4083
4084
4085double Heap::OldGenerationMutatorUtilization() {
4086 double mutator_speed = static_cast<double>(
4087 tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
4088 double gc_speed = static_cast<double>(
4089 tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
4090 double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4091 if (FLAG_trace_mutator_utilization) {
4092 PrintIsolate(isolate(),
4093 "Old generation mutator utilization = %.3f ("
4094 "mutator_speed=%.f, gc_speed=%.f)\n",
4095 result, mutator_speed, gc_speed);
4096 }
4097 return result;
4098}
4099
4100
4101bool Heap::HasLowYoungGenerationAllocationRate() {
4102 const double high_mutator_utilization = 0.993;
4103 return YoungGenerationMutatorUtilization() > high_mutator_utilization;
4104}
4105
4106
4107bool Heap::HasLowOldGenerationAllocationRate() {
4108 const double high_mutator_utilization = 0.993;
4109 return OldGenerationMutatorUtilization() > high_mutator_utilization;
4110}
4111
4112
4113bool Heap::HasLowAllocationRate() {
4114 return HasLowYoungGenerationAllocationRate() &&
4115 HasLowOldGenerationAllocationRate();
4116}
4117
4118
4119bool Heap::HasHighFragmentation() {
4120 intptr_t used = PromotedSpaceSizeOfObjects();
4121 intptr_t committed = CommittedOldGenerationMemory();
4122 return HasHighFragmentation(used, committed);
4123}
4124
4125
4126bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
4127 const intptr_t kSlack = 16 * MB;
4128 // Fragmentation is high if committed > 2 * used + kSlack.
4129 // Rewrite the exression to avoid overflow.
4130 return committed - used > used + kSlack;
4131}
4132
Ben Murdoch097c5b22016-05-18 11:27:45 +01004133void Heap::SetOptimizeForMemoryUsage() {
4134 // Activate memory reducer when switching to background if
4135 // - there was no mark compact since the start.
4136 // - the committed memory can be potentially reduced.
4137 // 2 pages for the old, code, and map space + 1 page for new space.
4138 const int kMinCommittedMemory = 7 * Page::kPageSize;
4139 if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory) {
4140 MemoryReducer::Event event;
4141 event.type = MemoryReducer::kPossibleGarbage;
4142 event.time_ms = MonotonicallyIncreasingTimeInMs();
4143 memory_reducer_->NotifyPossibleGarbage(event);
4144 }
4145 optimize_for_memory_usage_ = true;
4146}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004147
4148void Heap::ReduceNewSpaceSize() {
4149 // TODO(ulan): Unify this constant with the similar constant in
4150 // GCIdleTimeHandler once the change is merged to 4.5.
4151 static const size_t kLowAllocationThroughput = 1000;
Ben Murdochda12d292016-06-02 14:46:10 +01004152 const double allocation_throughput =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004153 tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
4154
4155 if (FLAG_predictable) return;
4156
4157 if (ShouldReduceMemory() ||
4158 ((allocation_throughput != 0) &&
4159 (allocation_throughput < kLowAllocationThroughput))) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004160 new_space_.Shrink();
4161 UncommitFromSpace();
4162 }
4163}
4164
4165
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004166void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
4167 if (incremental_marking()->IsMarking() &&
4168 (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4169 (!incremental_marking()->finalize_marking_completed() &&
4170 mark_compact_collector()->marking_deque()->IsEmpty()))) {
4171 FinalizeIncrementalMarking(comment);
4172 } else if (incremental_marking()->IsComplete() ||
4173 (mark_compact_collector()->marking_deque()->IsEmpty())) {
4174 CollectAllGarbage(current_gc_flags_, comment);
4175 }
4176}
4177
4178
4179bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
4180 size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
Ben Murdochda12d292016-06-02 14:46:10 +01004181 double final_incremental_mark_compact_speed_in_bytes_per_ms =
4182 tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004183 if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4184 (!incremental_marking()->finalize_marking_completed() &&
4185 mark_compact_collector()->marking_deque()->IsEmpty() &&
4186 gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
Ben Murdochda12d292016-06-02 14:46:10 +01004187 idle_time_in_ms))) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004188 FinalizeIncrementalMarking(
4189 "Idle notification: finalize incremental marking");
4190 return true;
4191 } else if (incremental_marking()->IsComplete() ||
4192 (mark_compact_collector()->marking_deque()->IsEmpty() &&
4193 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
Ben Murdochda12d292016-06-02 14:46:10 +01004194 idle_time_in_ms, size_of_objects,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004195 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4196 CollectAllGarbage(current_gc_flags_,
4197 "idle notification: finalize incremental marking");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004198 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004199 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004200 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004201}
4202
Ben Murdochda12d292016-06-02 14:46:10 +01004203void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
4204 // TODO(hpayer): We do not have to iterate reservations on black objects
4205 // for marking. We just have to execute the special visiting side effect
4206 // code that adds objects to global data structures, e.g. for array buffers.
4207
4208 // Code space, map space, and large object space do not use black pages.
4209 // Hence we have to color all objects of the reservation first black to avoid
4210 // unnecessary marking deque load.
4211 if (incremental_marking()->black_allocation()) {
4212 for (int i = CODE_SPACE; i < Serializer::kNumberOfSpaces; i++) {
4213 const Heap::Reservation& res = reservations[i];
4214 for (auto& chunk : res) {
4215 Address addr = chunk.start;
4216 while (addr < chunk.end) {
4217 HeapObject* obj = HeapObject::FromAddress(addr);
4218 Marking::MarkBlack(Marking::MarkBitFrom(obj));
4219 MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
4220 addr += obj->Size();
4221 }
4222 }
4223 }
4224 for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
4225 const Heap::Reservation& res = reservations[i];
4226 for (auto& chunk : res) {
4227 Address addr = chunk.start;
4228 while (addr < chunk.end) {
4229 HeapObject* obj = HeapObject::FromAddress(addr);
4230 incremental_marking()->IterateBlackObject(obj);
4231 addr += obj->Size();
4232 }
4233 }
4234 }
4235 }
4236}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004237
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004238GCIdleTimeHeapState Heap::ComputeHeapState() {
4239 GCIdleTimeHeapState heap_state;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004240 heap_state.contexts_disposed = contexts_disposed_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004241 heap_state.contexts_disposal_rate =
4242 tracer()->ContextDisposalRateInMilliseconds();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004243 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4244 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004245 return heap_state;
4246}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004247
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004248
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004249bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
4250 GCIdleTimeHeapState heap_state,
4251 double deadline_in_ms) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004252 bool result = false;
4253 switch (action.type) {
4254 case DONE:
4255 result = true;
4256 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004257 case DO_INCREMENTAL_STEP: {
4258 if (incremental_marking()->incremental_marking_job()->IdleTaskPending()) {
4259 result = true;
4260 } else {
4261 incremental_marking()
4262 ->incremental_marking_job()
4263 ->NotifyIdleTaskProgress();
4264 result = IncrementalMarkingJob::IdleTask::Step(this, deadline_in_ms) ==
4265 IncrementalMarkingJob::IdleTask::kDone;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004266 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004267 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004268 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004269 case DO_FULL_GC: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004270 DCHECK(contexts_disposed_ > 0);
4271 HistogramTimerScope scope(isolate_->counters()->gc_context());
Ben Murdoch097c5b22016-05-18 11:27:45 +01004272 TRACE_EVENT0("v8", "V8.GCContext");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004273 CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004274 break;
4275 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004276 case DO_NOTHING:
4277 break;
4278 }
4279
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004280 return result;
4281}
4282
4283
4284void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
4285 GCIdleTimeHeapState heap_state,
4286 double start_ms, double deadline_in_ms) {
4287 double idle_time_in_ms = deadline_in_ms - start_ms;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004288 double current_time = MonotonicallyIncreasingTimeInMs();
4289 last_idle_notification_time_ = current_time;
4290 double deadline_difference = deadline_in_ms - current_time;
4291
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004292 contexts_disposed_ = 0;
4293
4294 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4295 static_cast<int>(idle_time_in_ms));
4296
4297 if (deadline_in_ms - start_ms >
4298 GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
4299 int committed_memory = static_cast<int>(CommittedMemory() / KB);
4300 int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
4301 isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
4302 start_ms, committed_memory);
4303 isolate()->counters()->aggregated_memory_heap_used()->AddSample(
4304 start_ms, used_memory);
4305 }
4306
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004307 if (deadline_difference >= 0) {
4308 if (action.type != DONE && action.type != DO_NOTHING) {
4309 isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4310 static_cast<int>(deadline_difference));
4311 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004312 } else {
4313 isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004314 static_cast<int>(-deadline_difference));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004315 }
4316
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004317 if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
4318 FLAG_trace_idle_notification_verbose) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004319 PrintIsolate(isolate_, "%8.0f ms: ", isolate()->time_millis_since_init());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004320 PrintF(
4321 "Idle notification: requested idle time %.2f ms, used idle time %.2f "
4322 "ms, deadline usage %.2f ms [",
4323 idle_time_in_ms, idle_time_in_ms - deadline_difference,
4324 deadline_difference);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004325 action.Print();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004326 PrintF("]");
4327 if (FLAG_trace_idle_notification_verbose) {
4328 PrintF("[");
4329 heap_state.Print();
4330 PrintF("]");
4331 }
4332 PrintF("\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004333 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004334}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004335
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004336
4337double Heap::MonotonicallyIncreasingTimeInMs() {
4338 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4339 static_cast<double>(base::Time::kMillisecondsPerSecond);
4340}
4341
4342
4343bool Heap::IdleNotification(int idle_time_in_ms) {
4344 return IdleNotification(
4345 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4346 (static_cast<double>(idle_time_in_ms) /
4347 static_cast<double>(base::Time::kMillisecondsPerSecond)));
4348}
4349
4350
4351bool Heap::IdleNotification(double deadline_in_seconds) {
4352 CHECK(HasBeenSetUp());
4353 double deadline_in_ms =
4354 deadline_in_seconds *
4355 static_cast<double>(base::Time::kMillisecondsPerSecond);
4356 HistogramTimerScope idle_notification_scope(
4357 isolate_->counters()->gc_idle_notification());
Ben Murdoch097c5b22016-05-18 11:27:45 +01004358 TRACE_EVENT0("v8", "V8.GCIdleNotification");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004359 double start_ms = MonotonicallyIncreasingTimeInMs();
4360 double idle_time_in_ms = deadline_in_ms - start_ms;
4361
4362 tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
4363 OldGenerationAllocationCounter());
4364
4365 GCIdleTimeHeapState heap_state = ComputeHeapState();
4366
4367 GCIdleTimeAction action =
4368 gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
4369
4370 bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
4371
4372 IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004373 return result;
4374}
4375
4376
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004377bool Heap::RecentIdleNotificationHappened() {
4378 return (last_idle_notification_time_ +
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004379 GCIdleTimeHandler::kMaxScheduledIdleTime) >
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004380 MonotonicallyIncreasingTimeInMs();
4381}
4382
Ben Murdochda12d292016-06-02 14:46:10 +01004383class MemoryPressureInterruptTask : public CancelableTask {
4384 public:
4385 explicit MemoryPressureInterruptTask(Heap* heap)
4386 : CancelableTask(heap->isolate()), heap_(heap) {}
4387
4388 virtual ~MemoryPressureInterruptTask() {}
4389
4390 private:
4391 // v8::internal::CancelableTask overrides.
4392 void RunInternal() override { heap_->CheckMemoryPressure(); }
4393
4394 Heap* heap_;
4395 DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
4396};
4397
4398void Heap::CheckMemoryPressure() {
4399 if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
4400 CollectGarbageOnMemoryPressure("memory pressure");
4401 } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
4402 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4403 StartIdleIncrementalMarking();
4404 }
4405 }
4406 MemoryReducer::Event event;
4407 event.type = MemoryReducer::kPossibleGarbage;
4408 event.time_ms = MonotonicallyIncreasingTimeInMs();
4409 memory_reducer_->NotifyPossibleGarbage(event);
4410}
4411
4412void Heap::CollectGarbageOnMemoryPressure(const char* source) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01004413 const int kGarbageThresholdInBytes = 8 * MB;
4414 const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
4415 // This constant is the maximum response time in RAIL performance model.
4416 const double kMaxMemoryPressurePauseMs = 100;
4417
4418 double start = MonotonicallyIncreasingTimeInMs();
Ben Murdochda12d292016-06-02 14:46:10 +01004419 CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
Ben Murdoch61f157c2016-09-16 13:49:30 +01004420 source, kGCCallbackFlagCollectAllAvailableGarbage);
4421 double end = MonotonicallyIncreasingTimeInMs();
4422
4423 // Estimate how much memory we can free.
4424 int64_t potential_garbage =
4425 (CommittedMemory() - SizeOfObjects()) + external_memory_;
4426 // If we can potentially free large amount of memory, then start GC right
4427 // away instead of waiting for memory reducer.
4428 if (potential_garbage >= kGarbageThresholdInBytes &&
4429 potential_garbage >=
4430 CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
4431 // If we spent less than half of the time budget, then perform full GC
4432 // Otherwise, start incremental marking.
4433 if (end - start < kMaxMemoryPressurePauseMs / 2) {
4434 CollectAllGarbage(
4435 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, source,
4436 kGCCallbackFlagCollectAllAvailableGarbage);
4437 } else {
4438 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4439 StartIdleIncrementalMarking();
4440 }
4441 }
4442 }
Ben Murdochda12d292016-06-02 14:46:10 +01004443}
4444
4445void Heap::MemoryPressureNotification(MemoryPressureLevel level,
4446 bool is_isolate_locked) {
4447 MemoryPressureLevel previous = memory_pressure_level_.Value();
4448 memory_pressure_level_.SetValue(level);
4449 if ((previous != MemoryPressureLevel::kCritical &&
4450 level == MemoryPressureLevel::kCritical) ||
4451 (previous == MemoryPressureLevel::kNone &&
4452 level == MemoryPressureLevel::kModerate)) {
4453 if (is_isolate_locked) {
4454 CheckMemoryPressure();
4455 } else {
4456 ExecutionAccess access(isolate());
4457 isolate()->stack_guard()->RequestGC();
4458 V8::GetCurrentPlatform()->CallOnForegroundThread(
4459 reinterpret_cast<v8::Isolate*>(isolate()),
4460 new MemoryPressureInterruptTask(this));
4461 }
4462 }
4463}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004464
Ben Murdoch61f157c2016-09-16 13:49:30 +01004465void Heap::CollectCodeStatistics() {
4466 PagedSpace::ResetCodeAndMetadataStatistics(isolate());
4467 // We do not look for code in new space, or map space. If code
4468 // somehow ends up in those spaces, we would miss it here.
4469 code_space_->CollectCodeStatistics();
4470 old_space_->CollectCodeStatistics();
4471 lo_space_->CollectCodeStatistics();
4472}
4473
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004474#ifdef DEBUG
4475
4476void Heap::Print() {
4477 if (!HasBeenSetUp()) return;
4478 isolate()->PrintStack(stdout);
4479 AllSpaces spaces(this);
4480 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4481 space->Print();
4482 }
4483}
4484
4485
4486void Heap::ReportCodeStatistics(const char* title) {
4487 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
Ben Murdoch61f157c2016-09-16 13:49:30 +01004488 CollectCodeStatistics();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004489 PagedSpace::ReportCodeStatistics(isolate());
4490}
4491
4492
4493// This function expects that NewSpace's allocated objects histogram is
4494// populated (via a call to CollectStatistics or else as a side effect of a
4495// just-completed scavenge collection).
4496void Heap::ReportHeapStatistics(const char* title) {
4497 USE(title);
4498 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4499 gc_count_);
Ben Murdochc5610432016-08-08 18:44:38 +01004500 PrintF("old_generation_allocation_limit_ %" V8PRIdPTR "\n",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004501 old_generation_allocation_limit_);
4502
4503 PrintF("\n");
4504 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4505 isolate_->global_handles()->PrintStats();
4506 PrintF("\n");
4507
4508 PrintF("Heap statistics : ");
Ben Murdochc5610432016-08-08 18:44:38 +01004509 memory_allocator()->ReportStatistics();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004510 PrintF("To space : ");
4511 new_space_.ReportStatistics();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004512 PrintF("Old space : ");
4513 old_space_->ReportStatistics();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004514 PrintF("Code space : ");
4515 code_space_->ReportStatistics();
4516 PrintF("Map space : ");
4517 map_space_->ReportStatistics();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004518 PrintF("Large object space : ");
4519 lo_space_->ReportStatistics();
4520 PrintF(">>>>>> ========================================= >>>>>>\n");
4521}
4522
4523#endif // DEBUG
4524
Ben Murdoch097c5b22016-05-18 11:27:45 +01004525bool Heap::Contains(HeapObject* value) {
Ben Murdochc5610432016-08-08 18:44:38 +01004526 if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004527 return false;
4528 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004529 return HasBeenSetUp() &&
Ben Murdoch097c5b22016-05-18 11:27:45 +01004530 (new_space_.ToSpaceContains(value) || old_space_->Contains(value) ||
4531 code_space_->Contains(value) || map_space_->Contains(value) ||
4532 lo_space_->Contains(value));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004533}
4534
Ben Murdoch097c5b22016-05-18 11:27:45 +01004535bool Heap::ContainsSlow(Address addr) {
Ben Murdochc5610432016-08-08 18:44:38 +01004536 if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004537 return false;
4538 }
4539 return HasBeenSetUp() &&
4540 (new_space_.ToSpaceContainsSlow(addr) ||
4541 old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
4542 map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
4543}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004544
4545bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
Ben Murdochc5610432016-08-08 18:44:38 +01004546 if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004547 return false;
4548 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004549 if (!HasBeenSetUp()) return false;
4550
4551 switch (space) {
4552 case NEW_SPACE:
Ben Murdoch097c5b22016-05-18 11:27:45 +01004553 return new_space_.ToSpaceContains(value);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004554 case OLD_SPACE:
Ben Murdoch097c5b22016-05-18 11:27:45 +01004555 return old_space_->Contains(value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004556 case CODE_SPACE:
Ben Murdoch097c5b22016-05-18 11:27:45 +01004557 return code_space_->Contains(value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004558 case MAP_SPACE:
Ben Murdoch097c5b22016-05-18 11:27:45 +01004559 return map_space_->Contains(value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004560 case LO_SPACE:
Ben Murdoch097c5b22016-05-18 11:27:45 +01004561 return lo_space_->Contains(value);
4562 }
4563 UNREACHABLE();
4564 return false;
4565}
4566
4567bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
Ben Murdochc5610432016-08-08 18:44:38 +01004568 if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004569 return false;
4570 }
4571 if (!HasBeenSetUp()) return false;
4572
4573 switch (space) {
4574 case NEW_SPACE:
4575 return new_space_.ToSpaceContainsSlow(addr);
4576 case OLD_SPACE:
4577 return old_space_->ContainsSlow(addr);
4578 case CODE_SPACE:
4579 return code_space_->ContainsSlow(addr);
4580 case MAP_SPACE:
4581 return map_space_->ContainsSlow(addr);
4582 case LO_SPACE:
4583 return lo_space_->ContainsSlow(addr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004584 }
4585 UNREACHABLE();
4586 return false;
4587}
4588
4589
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004590bool Heap::IsValidAllocationSpace(AllocationSpace space) {
4591 switch (space) {
4592 case NEW_SPACE:
4593 case OLD_SPACE:
4594 case CODE_SPACE:
4595 case MAP_SPACE:
4596 case LO_SPACE:
4597 return true;
4598 default:
4599 return false;
4600 }
4601}
4602
4603
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004604bool Heap::RootIsImmortalImmovable(int root_index) {
4605 switch (root_index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004606#define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
4607 IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
4608#undef IMMORTAL_IMMOVABLE_ROOT
4609#define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
4610 INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
4611#undef INTERNALIZED_STRING
4612#define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
4613 STRING_TYPE_LIST(STRING_TYPE)
4614#undef STRING_TYPE
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004615 return true;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004616 default:
4617 return false;
4618 }
4619}
4620
4621
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004622#ifdef VERIFY_HEAP
4623void Heap::Verify() {
4624 CHECK(HasBeenSetUp());
4625 HandleScope scope(isolate());
4626
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004627 if (mark_compact_collector()->sweeping_in_progress()) {
4628 // We have to wait here for the sweeper threads to have an iterable heap.
4629 mark_compact_collector()->EnsureSweepingCompleted();
4630 }
4631
4632 VerifyPointersVisitor visitor;
4633 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4634
4635 VerifySmisVisitor smis_visitor;
4636 IterateSmiRoots(&smis_visitor);
4637
4638 new_space_.Verify();
4639
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004640 old_space_->Verify(&visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004641 map_space_->Verify(&visitor);
4642
4643 VerifyPointersVisitor no_dirty_regions_visitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004644 code_space_->Verify(&no_dirty_regions_visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004645
4646 lo_space_->Verify();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004647
4648 mark_compact_collector()->VerifyWeakEmbeddedObjectsInCode();
4649 if (FLAG_omit_map_checks_for_leaf_maps) {
4650 mark_compact_collector()->VerifyOmittedMapChecks();
4651 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004652}
4653#endif
4654
4655
4656void Heap::ZapFromSpace() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004657 if (!new_space_.IsFromSpaceCommitted()) return;
Ben Murdoch61f157c2016-09-16 13:49:30 +01004658 for (Page* page : NewSpacePageRange(new_space_.FromSpaceStart(),
4659 new_space_.FromSpaceEnd())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004660 for (Address cursor = page->area_start(), limit = page->area_end();
4661 cursor < limit; cursor += kPointerSize) {
4662 Memory::Address_at(cursor) = kFromSpaceZapValue;
4663 }
4664 }
4665}
4666
Ben Murdochda12d292016-06-02 14:46:10 +01004667void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
4668 Address end, bool record_slots,
4669 ObjectSlotCallback callback) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004670 Address slot_address = start;
Ben Murdoch097c5b22016-05-18 11:27:45 +01004671 Page* page = Page::FromAddress(start);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004672
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004673 while (slot_address < end) {
4674 Object** slot = reinterpret_cast<Object**>(slot_address);
4675 Object* target = *slot;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004676 if (target->IsHeapObject()) {
4677 if (Heap::InFromSpace(target)) {
4678 callback(reinterpret_cast<HeapObject**>(slot),
4679 HeapObject::cast(target));
4680 Object* new_target = *slot;
4681 if (InNewSpace(new_target)) {
4682 SLOW_DCHECK(Heap::InToSpace(new_target));
4683 SLOW_DCHECK(new_target->IsHeapObject());
Ben Murdoch097c5b22016-05-18 11:27:45 +01004684 RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004685 }
4686 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
4687 } else if (record_slots &&
4688 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
4689 mark_compact_collector()->RecordSlot(object, slot, target);
4690 }
4691 }
4692 slot_address += kPointerSize;
4693 }
4694}
4695
Ben Murdochda12d292016-06-02 14:46:10 +01004696class IteratePromotedObjectsVisitor final : public ObjectVisitor {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004697 public:
Ben Murdochda12d292016-06-02 14:46:10 +01004698 IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
4699 bool record_slots, ObjectSlotCallback callback)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004700 : heap_(heap),
4701 target_(target),
4702 record_slots_(record_slots),
4703 callback_(callback) {}
4704
4705 V8_INLINE void VisitPointers(Object** start, Object** end) override {
Ben Murdochda12d292016-06-02 14:46:10 +01004706 heap_->IteratePromotedObjectPointers(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004707 target_, reinterpret_cast<Address>(start),
4708 reinterpret_cast<Address>(end), record_slots_, callback_);
4709 }
4710
Ben Murdochda12d292016-06-02 14:46:10 +01004711 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
4712 // Black allocation requires us to process objects referenced by
4713 // promoted objects.
4714 if (heap_->incremental_marking()->black_allocation()) {
4715 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
4716 IncrementalMarking::MarkObject(heap_, code);
4717 }
4718 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004719
4720 private:
4721 Heap* heap_;
4722 HeapObject* target_;
4723 bool record_slots_;
4724 ObjectSlotCallback callback_;
4725};
4726
Ben Murdochda12d292016-06-02 14:46:10 +01004727void Heap::IteratePromotedObject(HeapObject* target, int size,
4728 bool was_marked_black,
4729 ObjectSlotCallback callback) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004730 // We are not collecting slots on new space objects during mutation
4731 // thus we have to scan for pointers to evacuation candidates when we
4732 // promote objects. But we should not record any slots in non-black
4733 // objects. Grey object's slots would be rescanned.
4734 // White object might not survive until the end of collection
4735 // it would be a violation of the invariant to record it's slots.
4736 bool record_slots = false;
4737 if (incremental_marking()->IsCompacting()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004738 MarkBit mark_bit = Marking::MarkBitFrom(target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004739 record_slots = Marking::IsBlack(mark_bit);
4740 }
4741
Ben Murdochda12d292016-06-02 14:46:10 +01004742 IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004743 target->IterateBody(target->map()->instance_type(), size, &visitor);
Ben Murdochda12d292016-06-02 14:46:10 +01004744
4745 // When black allocations is on, we have to visit not already marked black
4746 // objects (in new space) promoted to black pages to keep their references
4747 // alive.
4748 // TODO(hpayer): Implement a special promotion visitor that incorporates
4749 // regular visiting and IteratePromotedObjectPointers.
4750 if (!was_marked_black) {
4751 if (incremental_marking()->black_allocation()) {
4752 IncrementalMarking::MarkObject(this, target->map());
4753 incremental_marking()->IterateBlackObject(target);
4754 }
4755 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004756}
4757
4758
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004759void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4760 IterateStrongRoots(v, mode);
4761 IterateWeakRoots(v, mode);
4762}
4763
4764
4765void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4766 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4767 v->Synchronize(VisitorSynchronization::kStringTable);
4768 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4769 // Scavenge collections have special processing for this.
4770 external_string_table_.Iterate(v);
4771 }
4772 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4773}
4774
4775
4776void Heap::IterateSmiRoots(ObjectVisitor* v) {
4777 // Acquire execution access since we are going to read stack limit values.
4778 ExecutionAccess access(isolate());
4779 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4780 v->Synchronize(VisitorSynchronization::kSmiRootList);
4781}
4782
4783
4784void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4785 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4786 v->Synchronize(VisitorSynchronization::kStrongRootList);
Ben Murdochda12d292016-06-02 14:46:10 +01004787 // The serializer/deserializer iterates the root list twice, first to pick
4788 // off immortal immovable roots to make sure they end up on the first page,
4789 // and then again for the rest.
4790 if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004791
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004792 isolate_->bootstrapper()->Iterate(v);
4793 v->Synchronize(VisitorSynchronization::kBootstrapper);
4794 isolate_->Iterate(v);
4795 v->Synchronize(VisitorSynchronization::kTop);
4796 Relocatable::Iterate(isolate_, v);
4797 v->Synchronize(VisitorSynchronization::kRelocatable);
Ben Murdoch61f157c2016-09-16 13:49:30 +01004798 isolate_->debug()->Iterate(v);
4799 v->Synchronize(VisitorSynchronization::kDebug);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004800
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004801 isolate_->compilation_cache()->Iterate(v);
4802 v->Synchronize(VisitorSynchronization::kCompilationCache);
4803
4804 // Iterate over local handles in handle scopes.
4805 isolate_->handle_scope_implementer()->Iterate(v);
4806 isolate_->IterateDeferredHandles(v);
4807 v->Synchronize(VisitorSynchronization::kHandleScope);
4808
4809 // Iterate over the builtin code objects and code stubs in the
4810 // heap. Note that it is not necessary to iterate over code objects
4811 // on scavenge collections.
4812 if (mode != VISIT_ALL_IN_SCAVENGE) {
4813 isolate_->builtins()->IterateBuiltins(v);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004814 v->Synchronize(VisitorSynchronization::kBuiltins);
4815 isolate_->interpreter()->IterateDispatchTable(v);
4816 v->Synchronize(VisitorSynchronization::kDispatchTable);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004817 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004818
4819 // Iterate over global handles.
4820 switch (mode) {
Ben Murdochda12d292016-06-02 14:46:10 +01004821 case VISIT_ONLY_STRONG_ROOT_LIST:
4822 UNREACHABLE();
4823 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004824 case VISIT_ONLY_STRONG:
Ben Murdochda12d292016-06-02 14:46:10 +01004825 case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004826 isolate_->global_handles()->IterateStrongRoots(v);
4827 break;
4828 case VISIT_ALL_IN_SCAVENGE:
4829 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4830 break;
4831 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4832 case VISIT_ALL:
4833 isolate_->global_handles()->IterateAllRoots(v);
4834 break;
4835 }
4836 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4837
4838 // Iterate over eternal handles.
4839 if (mode == VISIT_ALL_IN_SCAVENGE) {
4840 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4841 } else {
4842 isolate_->eternal_handles()->IterateAllRoots(v);
4843 }
4844 v->Synchronize(VisitorSynchronization::kEternalHandles);
4845
4846 // Iterate over pointers being held by inactive threads.
4847 isolate_->thread_manager()->Iterate(v);
4848 v->Synchronize(VisitorSynchronization::kThreadManager);
4849
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004850 // Iterate over other strong roots (currently only identity maps).
4851 for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
4852 v->VisitPointers(list->start, list->end);
4853 }
4854 v->Synchronize(VisitorSynchronization::kStrongRoots);
4855
Ben Murdochda12d292016-06-02 14:46:10 +01004856 // Iterate over the partial snapshot cache unless serializing.
4857 if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) {
4858 SerializerDeserializer::Iterate(isolate_, v);
4859 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004860 // We don't do a v->Synchronize call here, because in debug mode that will
4861 // output a flag to the snapshot. However at this point the serializer and
4862 // deserializer are deliberately a little unsynchronized (see above) so the
4863 // checking of the sync flag in the snapshot would fail.
4864}
4865
4866
4867// TODO(1236194): Since the heap size is configurable on the command line
4868// and through the API, we should gracefully handle the case that the heap
4869// size is not big enough to fit all the initial objects.
4870bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4871 int max_executable_size, size_t code_range_size) {
4872 if (HasBeenSetUp()) return false;
4873
4874 // Overwrite default configuration.
4875 if (max_semi_space_size > 0) {
4876 max_semi_space_size_ = max_semi_space_size * MB;
4877 }
4878 if (max_old_space_size > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004879 max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004880 }
4881 if (max_executable_size > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004882 max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004883 }
4884
4885 // If max space size flags are specified overwrite the configuration.
4886 if (FLAG_max_semi_space_size > 0) {
4887 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4888 }
4889 if (FLAG_max_old_space_size > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004890 max_old_generation_size_ =
4891 static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004892 }
4893 if (FLAG_max_executable_size > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004894 max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
4895 }
4896
4897 if (Page::kPageSize > MB) {
4898 max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
4899 max_old_generation_size_ =
4900 ROUND_UP(max_old_generation_size_, Page::kPageSize);
4901 max_executable_size_ = ROUND_UP(max_executable_size_, Page::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004902 }
4903
4904 if (FLAG_stress_compaction) {
4905 // This will cause more frequent GCs when stressing.
4906 max_semi_space_size_ = Page::kPageSize;
4907 }
4908
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004909 // The new space size must be a power of two to support single-bit testing
4910 // for containment.
4911 max_semi_space_size_ =
4912 base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004913
4914 if (FLAG_min_semi_space_size > 0) {
4915 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4916 if (initial_semispace_size > max_semi_space_size_) {
4917 initial_semispace_size_ = max_semi_space_size_;
4918 if (FLAG_trace_gc) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004919 PrintIsolate(isolate_,
4920 "Min semi-space size cannot be more than the maximum "
4921 "semi-space size of %d MB\n",
4922 max_semi_space_size_ / MB);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004923 }
4924 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004925 initial_semispace_size_ =
4926 ROUND_UP(initial_semispace_size, Page::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004927 }
4928 }
4929
4930 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4931
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004932 if (FLAG_semi_space_growth_factor < 2) {
4933 FLAG_semi_space_growth_factor = 2;
4934 }
4935
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004936 // The old generation is paged and needs at least one page for each space.
4937 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4938 max_old_generation_size_ =
4939 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4940 max_old_generation_size_);
4941
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004942 // The max executable size must be less than or equal to the max old
4943 // generation size.
4944 if (max_executable_size_ > max_old_generation_size_) {
4945 max_executable_size_ = max_old_generation_size_;
4946 }
4947
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004948 if (FLAG_initial_old_space_size > 0) {
4949 initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
4950 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004951 initial_old_generation_size_ =
4952 max_old_generation_size_ / kInitalOldGenerationLimitFactor;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04004953 }
4954 old_generation_allocation_limit_ = initial_old_generation_size_;
4955
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004956 // We rely on being able to allocate new arrays in paged spaces.
4957 DCHECK(Page::kMaxRegularHeapObjectSize >=
4958 (JSArray::kSize +
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004959 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004960 AllocationMemento::kSize));
4961
4962 code_range_size_ = code_range_size * MB;
4963
4964 configured_ = true;
4965 return true;
4966}
4967
4968
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004969void Heap::AddToRingBuffer(const char* string) {
4970 size_t first_part =
4971 Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
4972 memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
4973 ring_buffer_end_ += first_part;
4974 if (first_part < strlen(string)) {
4975 ring_buffer_full_ = true;
4976 size_t second_part = strlen(string) - first_part;
4977 memcpy(trace_ring_buffer_, string + first_part, second_part);
4978 ring_buffer_end_ = second_part;
4979 }
4980}
4981
4982
4983void Heap::GetFromRingBuffer(char* buffer) {
4984 size_t copied = 0;
4985 if (ring_buffer_full_) {
4986 copied = kTraceRingBufferSize - ring_buffer_end_;
4987 memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
4988 }
4989 memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
4990}
4991
4992
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004993bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4994
4995
4996void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4997 *stats->start_marker = HeapStats::kStartMarker;
4998 *stats->end_marker = HeapStats::kEndMarker;
4999 *stats->new_space_size = new_space_.SizeAsInt();
5000 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005001 *stats->old_space_size = old_space_->SizeOfObjects();
5002 *stats->old_space_capacity = old_space_->Capacity();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005003 *stats->code_space_size = code_space_->SizeOfObjects();
5004 *stats->code_space_capacity = code_space_->Capacity();
5005 *stats->map_space_size = map_space_->SizeOfObjects();
5006 *stats->map_space_capacity = map_space_->Capacity();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005007 *stats->lo_space_size = lo_space_->Size();
5008 isolate_->global_handles()->RecordStats(stats);
Ben Murdochc5610432016-08-08 18:44:38 +01005009 *stats->memory_allocator_size = memory_allocator()->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005010 *stats->memory_allocator_capacity =
Ben Murdochc5610432016-08-08 18:44:38 +01005011 memory_allocator()->Size() + memory_allocator()->Available();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005012 *stats->os_error = base::OS::GetLastError();
Ben Murdochc5610432016-08-08 18:44:38 +01005013 memory_allocator()->Available();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005014 if (take_snapshot) {
5015 HeapIterator iterator(this);
5016 for (HeapObject* obj = iterator.next(); obj != NULL;
5017 obj = iterator.next()) {
5018 InstanceType type = obj->map()->instance_type();
5019 DCHECK(0 <= type && type <= LAST_TYPE);
5020 stats->objects_per_type[type]++;
5021 stats->size_per_type[type] += obj->Size();
5022 }
5023 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005024 if (stats->last_few_messages != NULL)
5025 GetFromRingBuffer(stats->last_few_messages);
5026 if (stats->js_stacktrace != NULL) {
5027 FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
5028 StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
5029 if (gc_state() == Heap::NOT_IN_GC) {
5030 isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
5031 } else {
5032 accumulator.Add("Cannot get stack trace in GC.");
5033 }
5034 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005035}
5036
5037
5038intptr_t Heap::PromotedSpaceSizeOfObjects() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005039 return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5040 map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005041}
5042
5043
5044int64_t Heap::PromotedExternalMemorySize() {
Ben Murdoch61f157c2016-09-16 13:49:30 +01005045 if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
5046 return external_memory_ - external_memory_at_last_mark_compact_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005047}
5048
5049
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005050const double Heap::kMinHeapGrowingFactor = 1.1;
5051const double Heap::kMaxHeapGrowingFactor = 4.0;
5052const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
5053const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
5054const double Heap::kTargetMutatorUtilization = 0.97;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005055
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005056
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005057// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
5058// (mutator speed), this function returns the heap growing factor that will
5059// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
5060// remain the same until the next GC.
5061//
5062// For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
5063// TM / (TM + TG), where TM is the time spent in the mutator and TG is the
5064// time spent in the garbage collector.
5065//
5066// Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
5067// time-frame from the end of the current GC to the end of the next GC. Based
5068// on the MU we can compute the heap growing factor F as
5069//
5070// F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
5071//
5072// This formula can be derived as follows.
5073//
5074// F = Limit / Live by definition, where the Limit is the allocation limit,
5075// and the Live is size of live objects.
5076// Let’s assume that we already know the Limit. Then:
5077// TG = Limit / gc_speed
5078// TM = (TM + TG) * MU, by definition of MU.
5079// TM = TG * MU / (1 - MU)
5080// TM = Limit * MU / (gc_speed * (1 - MU))
5081// On the other hand, if the allocation throughput remains constant:
5082// Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
5083// Solving it for TM, we get
5084// TM = (Limit - Live) / mutator_speed
5085// Combining the two equation for TM:
5086// (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
5087// (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
5088// substitute R = gc_speed / mutator_speed
5089// (Limit - Live) = Limit * MU / (R * (1 - MU))
5090// substitute F = Limit / Live
5091// F - 1 = F * MU / (R * (1 - MU))
5092// F - F * MU / (R * (1 - MU)) = 1
5093// F * (1 - MU / (R * (1 - MU))) = 1
5094// F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
5095// F = R * (1 - MU) / (R * (1 - MU) - MU)
5096double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
5097 if (gc_speed == 0 || mutator_speed == 0) return kMaxHeapGrowingFactor;
5098
5099 const double speed_ratio = gc_speed / mutator_speed;
5100 const double mu = kTargetMutatorUtilization;
5101
5102 const double a = speed_ratio * (1 - mu);
5103 const double b = speed_ratio * (1 - mu) - mu;
5104
5105 // The factor is a / b, but we need to check for small b first.
5106 double factor =
5107 (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
5108 factor = Min(factor, kMaxHeapGrowingFactor);
5109 factor = Max(factor, kMinHeapGrowingFactor);
5110 return factor;
5111}
5112
5113
5114intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
5115 intptr_t old_gen_size) {
5116 CHECK(factor > 1.0);
5117 CHECK(old_gen_size > 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005118 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005119 limit = Max(limit, old_gen_size + kMinimumOldGenerationAllocationLimit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005120 limit += new_space_.Capacity();
5121 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5122 return Min(limit, halfway_to_the_max);
5123}
5124
5125
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005126void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
5127 double gc_speed,
5128 double mutator_speed) {
5129 const double kConservativeHeapGrowingFactor = 1.3;
5130
5131 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5132
5133 if (FLAG_trace_gc_verbose) {
5134 PrintIsolate(isolate_,
5135 "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
5136 "(gc=%.f, mutator=%.f)\n",
5137 factor, kTargetMutatorUtilization, gc_speed / mutator_speed,
5138 gc_speed, mutator_speed);
5139 }
5140
5141 // We set the old generation growing factor to 2 to grow the heap slower on
5142 // memory-constrained devices.
5143 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice ||
5144 FLAG_optimize_for_size) {
5145 factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
5146 }
5147
5148 if (memory_reducer_->ShouldGrowHeapSlowly() || optimize_for_memory_usage_) {
5149 factor = Min(factor, kConservativeHeapGrowingFactor);
5150 }
5151
5152 if (FLAG_stress_compaction || ShouldReduceMemory()) {
5153 factor = kMinHeapGrowingFactor;
5154 }
5155
5156 if (FLAG_heap_growing_percent > 0) {
5157 factor = 1.0 + FLAG_heap_growing_percent / 100.0;
5158 }
5159
5160 old_generation_allocation_limit_ =
5161 CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5162
5163 if (FLAG_trace_gc_verbose) {
Ben Murdochc5610432016-08-08 18:44:38 +01005164 PrintIsolate(isolate_, "Grow: old size: %" V8PRIdPTR
5165 " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005166 old_gen_size / KB, old_generation_allocation_limit_ / KB,
5167 factor);
5168 }
5169}
5170
5171
5172void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
5173 double gc_speed,
5174 double mutator_speed) {
5175 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5176 intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5177 if (limit < old_generation_allocation_limit_) {
5178 if (FLAG_trace_gc_verbose) {
Ben Murdochc5610432016-08-08 18:44:38 +01005179 PrintIsolate(isolate_,
5180 "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
5181 " KB, "
5182 "new limit: %" V8PRIdPTR " KB (%.1f)\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005183 old_gen_size / KB, old_generation_allocation_limit_ / KB,
5184 limit / KB, factor);
5185 }
5186 old_generation_allocation_limit_ = limit;
5187 }
5188}
5189
5190
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005191void Heap::EnableInlineAllocation() {
5192 if (!inline_allocation_disabled_) return;
5193 inline_allocation_disabled_ = false;
5194
5195 // Update inline allocation limit for new space.
5196 new_space()->UpdateInlineAllocationLimit(0);
5197}
5198
5199
5200void Heap::DisableInlineAllocation() {
5201 if (inline_allocation_disabled_) return;
5202 inline_allocation_disabled_ = true;
5203
5204 // Update inline allocation limit for new space.
5205 new_space()->UpdateInlineAllocationLimit(0);
5206
5207 // Update inline allocation limit for old spaces.
5208 PagedSpaces spaces(this);
5209 for (PagedSpace* space = spaces.next(); space != NULL;
5210 space = spaces.next()) {
5211 space->EmptyAllocationInfo();
5212 }
5213}
5214
5215
5216V8_DECLARE_ONCE(initialize_gc_once);
5217
5218static void InitializeGCOnce() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005219 Scavenger::Initialize();
Ben Murdoch61f157c2016-09-16 13:49:30 +01005220 StaticScavengeVisitor<DEFAULT_PROMOTION>::Initialize();
5221 StaticScavengeVisitor<PROMOTE_MARKED>::Initialize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005222 MarkCompactCollector::Initialize();
5223}
5224
5225
5226bool Heap::SetUp() {
5227#ifdef DEBUG
5228 allocation_timeout_ = FLAG_gc_interval;
5229#endif
5230
5231 // Initialize heap spaces and initial maps and objects. Whenever something
5232 // goes wrong, just return false. The caller should check the results and
5233 // call Heap::TearDown() to release allocated memory.
5234 //
5235 // If the heap is not yet configured (e.g. through the API), configure it.
5236 // Configuration is based on the flags new-space-size (really the semispace
5237 // size) and old-space-size if set or the initial values of semispace_size_
5238 // and old_generation_size_ otherwise.
5239 if (!configured_) {
5240 if (!ConfigureHeapDefault()) return false;
5241 }
5242
5243 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5244
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005245 // Set up memory allocator.
Ben Murdochc5610432016-08-08 18:44:38 +01005246 memory_allocator_ = new MemoryAllocator(isolate_);
5247 if (!memory_allocator_->SetUp(MaxReserved(), MaxExecutableSize(),
5248 code_range_size_))
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005249 return false;
5250
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005251 // Initialize incremental marking.
5252 incremental_marking_ = new IncrementalMarking(this);
5253
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005254 // Set up new space.
Ben Murdochda12d292016-06-02 14:46:10 +01005255 if (!new_space_.SetUp(initial_semispace_size_, max_semi_space_size_)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005256 return false;
5257 }
5258 new_space_top_after_last_gc_ = new_space()->top();
5259
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005260 // Initialize old space.
5261 old_space_ = new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
5262 if (old_space_ == NULL) return false;
5263 if (!old_space_->SetUp()) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005264
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005265 // Initialize the code space, set its maximum capacity to the old
5266 // generation size. It needs executable memory.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005267 code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005268 if (code_space_ == NULL) return false;
5269 if (!code_space_->SetUp()) return false;
5270
5271 // Initialize map space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005272 map_space_ = new MapSpace(this, MAP_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005273 if (map_space_ == NULL) return false;
5274 if (!map_space_->SetUp()) return false;
5275
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005276 // The large object code space may contain code or data. We set the memory
5277 // to be non-executable here for safety, but this means we need to enable it
5278 // explicitly when allocating large code objects.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005279 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005280 if (lo_space_ == NULL) return false;
5281 if (!lo_space_->SetUp()) return false;
5282
5283 // Set up the seed that is used to randomize the string hash function.
5284 DCHECK(hash_seed() == 0);
5285 if (FLAG_randomize_hashes) {
5286 if (FLAG_hash_seed == 0) {
5287 int rnd = isolate()->random_number_generator()->NextInt();
5288 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5289 } else {
5290 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5291 }
5292 }
5293
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005294 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
5295 i++) {
5296 deferred_counters_[i] = 0;
5297 }
5298
5299 tracer_ = new GCTracer(this);
5300
5301 scavenge_collector_ = new Scavenger(this);
5302
5303 mark_compact_collector_ = new MarkCompactCollector(this);
5304
5305 gc_idle_time_handler_ = new GCIdleTimeHandler();
5306
5307 memory_reducer_ = new MemoryReducer(this);
5308
5309 object_stats_ = new ObjectStats(this);
5310 object_stats_->ClearObjectStats(true);
5311
5312 scavenge_job_ = new ScavengeJob();
5313
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005314 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5315 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5316
5317 store_buffer()->SetUp();
5318
5319 mark_compact_collector()->SetUp();
5320
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005321 idle_scavenge_observer_ = new IdleScavengeObserver(
5322 *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005323 new_space()->AddAllocationObserver(idle_scavenge_observer_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005324
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005325 return true;
5326}
5327
5328
5329bool Heap::CreateHeapObjects() {
5330 // Create initial maps.
5331 if (!CreateInitialMaps()) return false;
5332 CreateApiObjects();
5333
5334 // Create initial objects
5335 CreateInitialObjects();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005336 CHECK_EQ(0u, gc_count_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005337
5338 set_native_contexts_list(undefined_value());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005339 set_allocation_sites_list(undefined_value());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005340
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005341 return true;
5342}
5343
5344
5345void Heap::SetStackLimits() {
5346 DCHECK(isolate_ != NULL);
5347 DCHECK(isolate_ == isolate());
5348 // On 64 bit machines, pointers are generally out of range of Smis. We write
5349 // something that looks like an out of range Smi to the GC.
5350
5351 // Set up the special root array entries containing the stack limits.
5352 // These are actually addresses, but the tag makes the GC ignore it.
5353 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5354 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5355 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5356 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5357}
5358
Ben Murdochda12d292016-06-02 14:46:10 +01005359void Heap::ClearStackLimits() {
5360 roots_[kStackLimitRootIndex] = Smi::FromInt(0);
5361 roots_[kRealStackLimitRootIndex] = Smi::FromInt(0);
5362}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005363
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005364void Heap::PrintAlloctionsHash() {
5365 uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
5366 PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
5367}
5368
5369
5370void Heap::NotifyDeserializationComplete() {
5371 deserialization_complete_ = true;
5372#ifdef DEBUG
5373 // All pages right after bootstrapping must be marked as never-evacuate.
5374 PagedSpaces spaces(this);
5375 for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01005376 for (Page* p : *s) {
5377 CHECK(p->NeverEvacuate());
5378 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005379 }
5380#endif // DEBUG
5381}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04005382
Ben Murdochc5610432016-08-08 18:44:38 +01005383void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
5384 mark_compact_collector()->SetEmbedderHeapTracer(tracer);
5385}
5386
5387bool Heap::UsingEmbedderHeapTracer() {
5388 return mark_compact_collector()->UsingEmbedderHeapTracer();
5389}
5390
5391void Heap::TracePossibleWrapper(JSObject* js_object) {
5392 mark_compact_collector()->TracePossibleWrapper(js_object);
5393}
5394
Ben Murdochda12d292016-06-02 14:46:10 +01005395void Heap::RegisterExternallyReferencedObject(Object** object) {
Ben Murdochc5610432016-08-08 18:44:38 +01005396 mark_compact_collector()->RegisterExternallyReferencedObject(object);
Ben Murdochda12d292016-06-02 14:46:10 +01005397}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04005398
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005399void Heap::TearDown() {
5400#ifdef VERIFY_HEAP
5401 if (FLAG_verify_heap) {
5402 Verify();
5403 }
5404#endif
5405
5406 UpdateMaximumCommitted();
5407
5408 if (FLAG_print_cumulative_gc_stat) {
5409 PrintF("\n");
5410 PrintF("gc_count=%d ", gc_count_);
5411 PrintF("mark_sweep_count=%d ", ms_count_);
5412 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5413 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5414 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
Ben Murdochc5610432016-08-08 18:44:38 +01005415 PrintF("max_alive_after_gc=%" V8PRIdPTR " ", get_max_alive_after_gc());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005416 PrintF("total_marking_time=%.1f ", tracer()->cumulative_marking_duration());
5417 PrintF("total_sweeping_time=%.1f ",
5418 tracer()->cumulative_sweeping_duration());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005419 PrintF("\n\n");
5420 }
5421
5422 if (FLAG_print_max_heap_committed) {
5423 PrintF("\n");
Ben Murdochc5610432016-08-08 18:44:38 +01005424 PrintF("maximum_committed_by_heap=%" V8PRIdPTR " ",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005425 MaximumCommittedMemory());
Ben Murdochc5610432016-08-08 18:44:38 +01005426 PrintF("maximum_committed_by_new_space=%" V8PRIdPTR " ",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005427 new_space_.MaximumCommittedMemory());
Ben Murdochc5610432016-08-08 18:44:38 +01005428 PrintF("maximum_committed_by_old_space=%" V8PRIdPTR " ",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005429 old_space_->MaximumCommittedMemory());
Ben Murdochc5610432016-08-08 18:44:38 +01005430 PrintF("maximum_committed_by_code_space=%" V8PRIdPTR " ",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005431 code_space_->MaximumCommittedMemory());
Ben Murdochc5610432016-08-08 18:44:38 +01005432 PrintF("maximum_committed_by_map_space=%" V8PRIdPTR " ",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005433 map_space_->MaximumCommittedMemory());
Ben Murdochc5610432016-08-08 18:44:38 +01005434 PrintF("maximum_committed_by_lo_space=%" V8PRIdPTR " ",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005435 lo_space_->MaximumCommittedMemory());
5436 PrintF("\n\n");
5437 }
5438
5439 if (FLAG_verify_predictable) {
5440 PrintAlloctionsHash();
5441 }
5442
Ben Murdoch097c5b22016-05-18 11:27:45 +01005443 new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005444 delete idle_scavenge_observer_;
5445 idle_scavenge_observer_ = nullptr;
5446
5447 delete scavenge_collector_;
5448 scavenge_collector_ = nullptr;
5449
5450 if (mark_compact_collector_ != nullptr) {
5451 mark_compact_collector_->TearDown();
5452 delete mark_compact_collector_;
5453 mark_compact_collector_ = nullptr;
5454 }
5455
5456 delete incremental_marking_;
5457 incremental_marking_ = nullptr;
5458
5459 delete gc_idle_time_handler_;
5460 gc_idle_time_handler_ = nullptr;
5461
5462 if (memory_reducer_ != nullptr) {
5463 memory_reducer_->TearDown();
5464 delete memory_reducer_;
5465 memory_reducer_ = nullptr;
5466 }
5467
5468 delete object_stats_;
5469 object_stats_ = nullptr;
5470
5471 delete scavenge_job_;
5472 scavenge_job_ = nullptr;
5473
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005474 isolate_->global_handles()->TearDown();
5475
5476 external_string_table_.TearDown();
5477
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005478 delete tracer_;
5479 tracer_ = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005480
5481 new_space_.TearDown();
5482
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005483 if (old_space_ != NULL) {
5484 delete old_space_;
5485 old_space_ = NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005486 }
5487
5488 if (code_space_ != NULL) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005489 delete code_space_;
5490 code_space_ = NULL;
5491 }
5492
5493 if (map_space_ != NULL) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005494 delete map_space_;
5495 map_space_ = NULL;
5496 }
5497
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005498 if (lo_space_ != NULL) {
5499 lo_space_->TearDown();
5500 delete lo_space_;
5501 lo_space_ = NULL;
5502 }
5503
5504 store_buffer()->TearDown();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005505
Ben Murdochc5610432016-08-08 18:44:38 +01005506 memory_allocator()->TearDown();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005507
5508 StrongRootsList* next = NULL;
5509 for (StrongRootsList* list = strong_roots_list_; list; list = next) {
5510 next = list->next;
5511 delete list;
5512 }
5513 strong_roots_list_ = NULL;
Ben Murdochc5610432016-08-08 18:44:38 +01005514
5515 delete memory_allocator_;
5516 memory_allocator_ = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005517}
5518
5519
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005520void Heap::AddGCPrologueCallback(v8::Isolate::GCCallback callback,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005521 GCType gc_type, bool pass_isolate) {
5522 DCHECK(callback != NULL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005523 GCCallbackPair pair(callback, gc_type, pass_isolate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005524 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5525 return gc_prologue_callbacks_.Add(pair);
5526}
5527
5528
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005529void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallback callback) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005530 DCHECK(callback != NULL);
5531 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5532 if (gc_prologue_callbacks_[i].callback == callback) {
5533 gc_prologue_callbacks_.Remove(i);
5534 return;
5535 }
5536 }
5537 UNREACHABLE();
5538}
5539
5540
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005541void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005542 GCType gc_type, bool pass_isolate) {
5543 DCHECK(callback != NULL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005544 GCCallbackPair pair(callback, gc_type, pass_isolate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005545 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5546 return gc_epilogue_callbacks_.Add(pair);
5547}
5548
5549
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005550void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005551 DCHECK(callback != NULL);
5552 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5553 if (gc_epilogue_callbacks_[i].callback == callback) {
5554 gc_epilogue_callbacks_.Remove(i);
5555 return;
5556 }
5557 }
5558 UNREACHABLE();
5559}
5560
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005561// TODO(ishell): Find a better place for this.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005562void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005563 Handle<DependentCode> dep) {
5564 DCHECK(!InNewSpace(*obj));
5565 DCHECK(!InNewSpace(*dep));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005566 Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005567 table = WeakHashTable::Put(table, obj, dep);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005568 if (*table != weak_object_to_code_table())
5569 set_weak_object_to_code_table(*table);
5570 DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005571}
5572
5573
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005574DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
5575 Object* dep = weak_object_to_code_table()->Lookup(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005576 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5577 return DependentCode::cast(empty_fixed_array());
5578}
5579
Ben Murdoch61f157c2016-09-16 13:49:30 +01005580namespace {
5581void CompactWeakFixedArray(Object* object) {
5582 if (object->IsWeakFixedArray()) {
5583 WeakFixedArray* array = WeakFixedArray::cast(object);
5584 array->Compact<WeakFixedArray::NullCallback>();
5585 }
5586}
5587} // anonymous namespace
5588
5589void Heap::CompactWeakFixedArrays() {
5590 // Find known WeakFixedArrays and compact them.
5591 HeapIterator iterator(this);
5592 for (HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
5593 if (o->IsPrototypeInfo()) {
5594 Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
5595 if (prototype_users->IsWeakFixedArray()) {
5596 WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
5597 array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
5598 }
5599 } else if (o->IsScript()) {
5600 CompactWeakFixedArray(Script::cast(o)->shared_function_infos());
5601 }
5602 }
5603 CompactWeakFixedArray(noscript_shared_function_infos());
5604 CompactWeakFixedArray(script_list());
5605 CompactWeakFixedArray(weak_stack_trace_list());
5606}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005607
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005608void Heap::AddRetainedMap(Handle<Map> map) {
5609 Handle<WeakCell> cell = Map::WeakCellForMap(map);
5610 Handle<ArrayList> array(retained_maps(), isolate());
5611 if (array->IsFull()) {
5612 CompactRetainedMaps(*array);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005613 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005614 array = ArrayList::Add(
5615 array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
5616 ArrayList::kReloadLengthAfterAllocation);
5617 if (*array != retained_maps()) {
5618 set_retained_maps(*array);
5619 }
5620}
5621
5622
5623void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
5624 DCHECK_EQ(retained_maps, this->retained_maps());
5625 int length = retained_maps->Length();
5626 int new_length = 0;
5627 int new_number_of_disposed_maps = 0;
5628 // This loop compacts the array by removing cleared weak cells.
5629 for (int i = 0; i < length; i += 2) {
5630 DCHECK(retained_maps->Get(i)->IsWeakCell());
5631 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
5632 Object* age = retained_maps->Get(i + 1);
5633 if (cell->cleared()) continue;
5634 if (i != new_length) {
5635 retained_maps->Set(new_length, cell);
5636 retained_maps->Set(new_length + 1, age);
5637 }
5638 if (i < number_of_disposed_maps_) {
5639 new_number_of_disposed_maps += 2;
5640 }
5641 new_length += 2;
5642 }
5643 number_of_disposed_maps_ = new_number_of_disposed_maps;
5644 Object* undefined = undefined_value();
5645 for (int i = new_length; i < length; i++) {
5646 retained_maps->Clear(i, undefined);
5647 }
5648 if (new_length != length) retained_maps->SetLength(new_length);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005649}
5650
Ben Murdochc5610432016-08-08 18:44:38 +01005651void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
5652 v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005653}
5654
5655#ifdef DEBUG
5656
5657class PrintHandleVisitor : public ObjectVisitor {
5658 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005659 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005660 for (Object** p = start; p < end; p++)
5661 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5662 reinterpret_cast<void*>(*p));
5663 }
5664};
5665
5666
5667void Heap::PrintHandles() {
5668 PrintF("Handles:\n");
5669 PrintHandleVisitor v;
5670 isolate_->handle_scope_implementer()->Iterate(&v);
5671}
5672
5673#endif
5674
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005675class CheckHandleCountVisitor : public ObjectVisitor {
5676 public:
5677 CheckHandleCountVisitor() : handle_count_(0) {}
5678 ~CheckHandleCountVisitor() override {
5679 CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
5680 }
5681 void VisitPointers(Object** start, Object** end) override {
5682 handle_count_ += end - start;
5683 }
5684
5685 private:
5686 ptrdiff_t handle_count_;
5687};
5688
5689
5690void Heap::CheckHandleCount() {
5691 CheckHandleCountVisitor v;
5692 isolate_->handle_scope_implementer()->Iterate(&v);
5693}
5694
Ben Murdoch097c5b22016-05-18 11:27:45 +01005695void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
5696 if (!InNewSpace(object)) {
5697 store_buffer()->MoveEntriesToRememberedSet();
5698 Address slot_addr = reinterpret_cast<Address>(slot);
5699 Page* page = Page::FromAddress(slot_addr);
5700 DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5701 RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
Ben Murdochda12d292016-06-02 14:46:10 +01005702 RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005703 }
5704}
5705
Ben Murdochda12d292016-06-02 14:46:10 +01005706void Heap::ClearRecordedSlotRange(Address start, Address end) {
5707 Page* page = Page::FromAddress(start);
5708 if (!page->InNewSpace()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01005709 store_buffer()->MoveEntriesToRememberedSet();
Ben Murdoch097c5b22016-05-18 11:27:45 +01005710 DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
Ben Murdochda12d292016-06-02 14:46:10 +01005711 RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end);
5712 RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end);
Ben Murdoch097c5b22016-05-18 11:27:45 +01005713 }
5714}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005715
5716Space* AllSpaces::next() {
5717 switch (counter_++) {
5718 case NEW_SPACE:
5719 return heap_->new_space();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005720 case OLD_SPACE:
5721 return heap_->old_space();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005722 case CODE_SPACE:
5723 return heap_->code_space();
5724 case MAP_SPACE:
5725 return heap_->map_space();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005726 case LO_SPACE:
5727 return heap_->lo_space();
5728 default:
5729 return NULL;
5730 }
5731}
5732
5733
5734PagedSpace* PagedSpaces::next() {
5735 switch (counter_++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005736 case OLD_SPACE:
5737 return heap_->old_space();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005738 case CODE_SPACE:
5739 return heap_->code_space();
5740 case MAP_SPACE:
5741 return heap_->map_space();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005742 default:
5743 return NULL;
5744 }
5745}
5746
5747
5748OldSpace* OldSpaces::next() {
5749 switch (counter_++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005750 case OLD_SPACE:
5751 return heap_->old_space();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005752 case CODE_SPACE:
5753 return heap_->code_space();
5754 default:
5755 return NULL;
5756 }
5757}
5758
5759
5760SpaceIterator::SpaceIterator(Heap* heap)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005761 : heap_(heap), current_space_(FIRST_SPACE), iterator_(NULL) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005762
5763
5764SpaceIterator::~SpaceIterator() {
5765 // Delete active iterator if any.
5766 delete iterator_;
5767}
5768
5769
5770bool SpaceIterator::has_next() {
5771 // Iterate until no more spaces.
5772 return current_space_ != LAST_SPACE;
5773}
5774
5775
5776ObjectIterator* SpaceIterator::next() {
5777 if (iterator_ != NULL) {
5778 delete iterator_;
5779 iterator_ = NULL;
5780 // Move to the next space
5781 current_space_++;
5782 if (current_space_ > LAST_SPACE) {
5783 return NULL;
5784 }
5785 }
5786
5787 // Return iterator for the new current space.
5788 return CreateIterator();
5789}
5790
5791
5792// Create an iterator for the space to iterate.
5793ObjectIterator* SpaceIterator::CreateIterator() {
5794 DCHECK(iterator_ == NULL);
5795
5796 switch (current_space_) {
5797 case NEW_SPACE:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005798 iterator_ = new SemiSpaceIterator(heap_->new_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005799 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005800 case OLD_SPACE:
5801 iterator_ = new HeapObjectIterator(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005802 break;
5803 case CODE_SPACE:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005804 iterator_ = new HeapObjectIterator(heap_->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005805 break;
5806 case MAP_SPACE:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005807 iterator_ = new HeapObjectIterator(heap_->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005808 break;
5809 case LO_SPACE:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005810 iterator_ = new LargeObjectIterator(heap_->lo_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005811 break;
5812 }
5813
5814 // Return the newly allocated iterator;
5815 DCHECK(iterator_ != NULL);
5816 return iterator_;
5817}
5818
5819
5820class HeapObjectsFilter {
5821 public:
5822 virtual ~HeapObjectsFilter() {}
5823 virtual bool SkipObject(HeapObject* object) = 0;
5824};
5825
5826
5827class UnreachableObjectsFilter : public HeapObjectsFilter {
5828 public:
5829 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5830 MarkReachableObjects();
5831 }
5832
5833 ~UnreachableObjectsFilter() {
5834 heap_->mark_compact_collector()->ClearMarkbits();
5835 }
5836
5837 bool SkipObject(HeapObject* object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005838 if (object->IsFiller()) return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005839 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005840 return Marking::IsWhite(mark_bit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005841 }
5842
5843 private:
5844 class MarkingVisitor : public ObjectVisitor {
5845 public:
5846 MarkingVisitor() : marking_stack_(10) {}
5847
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005848 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005849 for (Object** p = start; p < end; p++) {
5850 if (!(*p)->IsHeapObject()) continue;
5851 HeapObject* obj = HeapObject::cast(*p);
5852 MarkBit mark_bit = Marking::MarkBitFrom(obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005853 if (Marking::IsWhite(mark_bit)) {
5854 Marking::WhiteToBlack(mark_bit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005855 marking_stack_.Add(obj);
5856 }
5857 }
5858 }
5859
5860 void TransitiveClosure() {
5861 while (!marking_stack_.is_empty()) {
5862 HeapObject* obj = marking_stack_.RemoveLast();
5863 obj->Iterate(this);
5864 }
5865 }
5866
5867 private:
5868 List<HeapObject*> marking_stack_;
5869 };
5870
5871 void MarkReachableObjects() {
5872 MarkingVisitor visitor;
5873 heap_->IterateRoots(&visitor, VISIT_ALL);
5874 visitor.TransitiveClosure();
5875 }
5876
5877 Heap* heap_;
5878 DisallowHeapAllocation no_allocation_;
5879};
5880
5881
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005882HeapIterator::HeapIterator(Heap* heap,
5883 HeapIterator::HeapObjectsFiltering filtering)
5884 : make_heap_iterable_helper_(heap),
5885 no_heap_allocation_(),
5886 heap_(heap),
5887 filtering_(filtering),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005888 filter_(nullptr),
5889 space_iterator_(nullptr),
5890 object_iterator_(nullptr) {
5891 heap_->heap_iterator_start();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005892 // Start the iteration.
5893 space_iterator_ = new SpaceIterator(heap_);
5894 switch (filtering_) {
5895 case kFilterUnreachable:
5896 filter_ = new UnreachableObjectsFilter(heap_);
5897 break;
5898 default:
5899 break;
5900 }
5901 object_iterator_ = space_iterator_->next();
5902}
5903
5904
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005905HeapIterator::~HeapIterator() {
5906 heap_->heap_iterator_end();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005907#ifdef DEBUG
5908 // Assert that in filtering mode we have iterated through all
5909 // objects. Otherwise, heap will be left in an inconsistent state.
5910 if (filtering_ != kNoFiltering) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005911 DCHECK(object_iterator_ == nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005912 }
5913#endif
5914 // Make sure the last iterator is deallocated.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005915 delete object_iterator_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005916 delete space_iterator_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005917 delete filter_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005918}
5919
5920
5921HeapObject* HeapIterator::next() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005922 if (filter_ == nullptr) return NextObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005923
5924 HeapObject* obj = NextObject();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005925 while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005926 return obj;
5927}
5928
5929
5930HeapObject* HeapIterator::NextObject() {
5931 // No iterator means we are done.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005932 if (object_iterator_ == nullptr) return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005933
Ben Murdoch61f157c2016-09-16 13:49:30 +01005934 if (HeapObject* obj = object_iterator_->Next()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005935 // If the current iterator has more objects we are fine.
5936 return obj;
5937 } else {
5938 // Go though the spaces looking for one that has objects.
5939 while (space_iterator_->has_next()) {
5940 object_iterator_ = space_iterator_->next();
Ben Murdoch61f157c2016-09-16 13:49:30 +01005941 if (HeapObject* obj = object_iterator_->Next()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005942 return obj;
5943 }
5944 }
5945 }
5946 // Done with the last space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005947 object_iterator_ = nullptr;
5948 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005949}
5950
5951
5952#ifdef DEBUG
5953
5954Object* const PathTracer::kAnyGlobalObject = NULL;
5955
5956class PathTracer::MarkVisitor : public ObjectVisitor {
5957 public:
5958 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005959
5960 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005961 // Scan all HeapObject pointers in [start, end)
5962 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5963 if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5964 }
5965 }
5966
5967 private:
5968 PathTracer* tracer_;
5969};
5970
5971
5972class PathTracer::UnmarkVisitor : public ObjectVisitor {
5973 public:
5974 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005975
5976 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005977 // Scan all HeapObject pointers in [start, end)
5978 for (Object** p = start; p < end; p++) {
5979 if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5980 }
5981 }
5982
5983 private:
5984 PathTracer* tracer_;
5985};
5986
5987
5988void PathTracer::VisitPointers(Object** start, Object** end) {
5989 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5990 // Visit all HeapObject pointers in [start, end)
5991 for (Object** p = start; !done && (p < end); p++) {
5992 if ((*p)->IsHeapObject()) {
5993 TracePathFrom(p);
5994 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5995 }
5996 }
5997}
5998
5999
6000void PathTracer::Reset() {
6001 found_target_ = false;
6002 object_stack_.Clear();
6003}
6004
6005
6006void PathTracer::TracePathFrom(Object** root) {
6007 DCHECK((search_target_ == kAnyGlobalObject) ||
6008 search_target_->IsHeapObject());
6009 found_target_in_trace_ = false;
6010 Reset();
6011
6012 MarkVisitor mark_visitor(this);
6013 MarkRecursively(root, &mark_visitor);
6014
6015 UnmarkVisitor unmark_visitor(this);
6016 UnmarkRecursively(root, &unmark_visitor);
6017
6018 ProcessResults();
6019}
6020
6021
6022static bool SafeIsNativeContext(HeapObject* obj) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006023 return obj->map() == obj->GetHeap()->root(Heap::kNativeContextMapRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006024}
6025
6026
6027void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6028 if (!(*p)->IsHeapObject()) return;
6029
6030 HeapObject* obj = HeapObject::cast(*p);
6031
6032 MapWord map_word = obj->map_word();
6033 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
6034
6035 if (found_target_in_trace_) return; // stop if target found
6036 object_stack_.Add(obj);
6037 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6038 (obj == search_target_)) {
6039 found_target_in_trace_ = true;
6040 found_target_ = true;
6041 return;
6042 }
6043
6044 bool is_native_context = SafeIsNativeContext(obj);
6045
6046 // not visited yet
6047 Map* map = Map::cast(map_word.ToMap());
6048
6049 MapWord marked_map_word =
6050 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
6051 obj->set_map_word(marked_map_word);
6052
6053 // Scan the object body.
6054 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6055 // This is specialized to scan Context's properly.
6056 Object** start =
6057 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
6058 Object** end =
6059 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
6060 Context::FIRST_WEAK_SLOT * kPointerSize);
6061 mark_visitor->VisitPointers(start, end);
6062 } else {
6063 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
6064 }
6065
6066 // Scan the map after the body because the body is a lot more interesting
6067 // when doing leak detection.
6068 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
6069
6070 if (!found_target_in_trace_) { // don't pop if found the target
6071 object_stack_.RemoveLast();
6072 }
6073}
6074
6075
6076void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6077 if (!(*p)->IsHeapObject()) return;
6078
6079 HeapObject* obj = HeapObject::cast(*p);
6080
6081 MapWord map_word = obj->map_word();
6082 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
6083
6084 MapWord unmarked_map_word =
6085 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
6086 obj->set_map_word(unmarked_map_word);
6087
6088 Map* map = Map::cast(unmarked_map_word.ToMap());
6089
6090 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
6091
6092 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
6093}
6094
6095
6096void PathTracer::ProcessResults() {
6097 if (found_target_) {
6098 OFStream os(stdout);
6099 os << "=====================================\n"
6100 << "==== Path to object ====\n"
6101 << "=====================================\n\n";
6102
6103 DCHECK(!object_stack_.is_empty());
6104 for (int i = 0; i < object_stack_.length(); i++) {
6105 if (i > 0) os << "\n |\n |\n V\n\n";
6106 object_stack_[i]->Print(os);
6107 }
6108 os << "=====================================\n";
6109 }
6110}
6111
6112
6113// Triggers a depth-first traversal of reachable objects from one
6114// given root object and finds a path to a specific heap object and
6115// prints it.
6116void Heap::TracePathToObjectFrom(Object* target, Object* root) {
6117 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6118 tracer.VisitPointer(&root);
6119}
6120
6121
6122// Triggers a depth-first traversal of reachable objects from roots
6123// and finds a path to a specific heap object and prints it.
6124void Heap::TracePathToObject(Object* target) {
6125 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6126 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6127}
6128
6129
6130// Triggers a depth-first traversal of reachable objects from roots
6131// and finds a path to any global object and prints it. Useful for
6132// determining the source for leaks of global objects.
6133void Heap::TracePathToGlobal() {
6134 PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
6135 VISIT_ALL);
6136 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6137}
6138#endif
6139
6140
6141void Heap::UpdateCumulativeGCStatistics(double duration,
6142 double spent_in_mutator,
6143 double marking_time) {
6144 if (FLAG_print_cumulative_gc_stat) {
6145 total_gc_time_ms_ += duration;
6146 max_gc_pause_ = Max(max_gc_pause_, duration);
6147 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
6148 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
6149 } else if (FLAG_trace_gc_verbose) {
6150 total_gc_time_ms_ += duration;
6151 }
6152
6153 marking_time_ += marking_time;
6154}
6155
6156
6157int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
6158 DisallowHeapAllocation no_gc;
6159 // Uses only lower 32 bits if pointers are larger.
6160 uintptr_t addr_hash =
6161 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
6162 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6163}
6164
6165
6166int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
6167 DisallowHeapAllocation no_gc;
6168 int index = (Hash(map, name) & kHashMask);
6169 for (int i = 0; i < kEntriesPerBucket; i++) {
6170 Key& key = keys_[index + i];
6171 if ((key.map == *map) && key.name->Equals(*name)) {
6172 return field_offsets_[index + i];
6173 }
6174 }
6175 return kNotFound;
6176}
6177
6178
6179void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
6180 int field_offset) {
6181 DisallowHeapAllocation no_gc;
6182 if (!name->IsUniqueName()) {
6183 if (!StringTable::InternalizeStringIfExists(
6184 name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
6185 return;
6186 }
6187 }
6188 // This cache is cleared only between mark compact passes, so we expect the
6189 // cache to only contain old space names.
6190 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
6191
6192 int index = (Hash(map, name) & kHashMask);
6193 // After a GC there will be free slots, so we use them in order (this may
6194 // help to get the most frequently used one in position 0).
6195 for (int i = 0; i < kEntriesPerBucket; i++) {
6196 Key& key = keys_[index];
6197 Object* free_entry_indicator = NULL;
6198 if (key.map == free_entry_indicator) {
6199 key.map = *map;
6200 key.name = *name;
6201 field_offsets_[index + i] = field_offset;
6202 return;
6203 }
6204 }
6205 // No free entry found in this bucket, so we move them all down one and
6206 // put the new entry at position zero.
6207 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
6208 Key& key = keys_[index + i];
6209 Key& key2 = keys_[index + i - 1];
6210 key = key2;
6211 field_offsets_[index + i] = field_offsets_[index + i - 1];
6212 }
6213
6214 // Write the new first entry.
6215 Key& key = keys_[index];
6216 key.map = *map;
6217 key.name = *name;
6218 field_offsets_[index] = field_offset;
6219}
6220
6221
6222void KeyedLookupCache::Clear() {
6223 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6224}
6225
6226
6227void DescriptorLookupCache::Clear() {
6228 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6229}
6230
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006231void Heap::ExternalStringTable::CleanUp() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006232 int last = 0;
Ben Murdoch61f157c2016-09-16 13:49:30 +01006233 Isolate* isolate = heap_->isolate();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006234 for (int i = 0; i < new_space_strings_.length(); ++i) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01006235 if (new_space_strings_[i]->IsTheHole(isolate)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006236 continue;
6237 }
6238 DCHECK(new_space_strings_[i]->IsExternalString());
6239 if (heap_->InNewSpace(new_space_strings_[i])) {
6240 new_space_strings_[last++] = new_space_strings_[i];
6241 } else {
6242 old_space_strings_.Add(new_space_strings_[i]);
6243 }
6244 }
6245 new_space_strings_.Rewind(last);
6246 new_space_strings_.Trim();
6247
6248 last = 0;
6249 for (int i = 0; i < old_space_strings_.length(); ++i) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01006250 if (old_space_strings_[i]->IsTheHole(isolate)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006251 continue;
6252 }
6253 DCHECK(old_space_strings_[i]->IsExternalString());
6254 DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6255 old_space_strings_[last++] = old_space_strings_[i];
6256 }
6257 old_space_strings_.Rewind(last);
6258 old_space_strings_.Trim();
6259#ifdef VERIFY_HEAP
6260 if (FLAG_verify_heap) {
6261 Verify();
6262 }
6263#endif
6264}
6265
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006266void Heap::ExternalStringTable::TearDown() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006267 for (int i = 0; i < new_space_strings_.length(); ++i) {
6268 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6269 }
6270 new_space_strings_.Free();
6271 for (int i = 0; i < old_space_strings_.length(); ++i) {
6272 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6273 }
6274 old_space_strings_.Free();
6275}
6276
6277
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006278void Heap::RememberUnmappedPage(Address page, bool compacted) {
6279 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6280 // Tag the page pointer to make it findable in the dump file.
6281 if (compacted) {
6282 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6283 } else {
6284 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6285 }
6286 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6287 reinterpret_cast<Address>(p);
6288 remembered_unmapped_pages_index_++;
6289 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6290}
6291
6292
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006293void Heap::RegisterStrongRoots(Object** start, Object** end) {
6294 StrongRootsList* list = new StrongRootsList();
6295 list->next = strong_roots_list_;
6296 list->start = start;
6297 list->end = end;
6298 strong_roots_list_ = list;
6299}
6300
6301
6302void Heap::UnregisterStrongRoots(Object** start) {
6303 StrongRootsList* prev = NULL;
6304 StrongRootsList* list = strong_roots_list_;
6305 while (list != nullptr) {
6306 StrongRootsList* next = list->next;
6307 if (list->start == start) {
6308 if (prev) {
6309 prev->next = next;
6310 } else {
6311 strong_roots_list_ = next;
6312 }
6313 delete list;
6314 } else {
6315 prev = list;
6316 }
6317 list = next;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006318 }
6319}
6320
6321
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006322size_t Heap::NumberOfTrackedHeapObjectTypes() {
6323 return ObjectStats::OBJECT_STATS_COUNT;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006324}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006325
6326
6327size_t Heap::ObjectCountAtLastGC(size_t index) {
6328 if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
6329 return object_stats_->object_count_last_gc(index);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006330}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00006331
6332
6333size_t Heap::ObjectSizeAtLastGC(size_t index) {
6334 if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
6335 return object_stats_->object_size_last_gc(index);
6336}
6337
6338
6339bool Heap::GetObjectTypeName(size_t index, const char** object_type,
6340 const char** object_sub_type) {
6341 if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
6342
6343 switch (static_cast<int>(index)) {
6344#define COMPARE_AND_RETURN_NAME(name) \
6345 case name: \
6346 *object_type = #name; \
6347 *object_sub_type = ""; \
6348 return true;
6349 INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6350#undef COMPARE_AND_RETURN_NAME
6351#define COMPARE_AND_RETURN_NAME(name) \
6352 case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
6353 *object_type = "CODE_TYPE"; \
6354 *object_sub_type = "CODE_KIND/" #name; \
6355 return true;
6356 CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
6357#undef COMPARE_AND_RETURN_NAME
6358#define COMPARE_AND_RETURN_NAME(name) \
6359 case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
6360 *object_type = "FIXED_ARRAY_TYPE"; \
6361 *object_sub_type = #name; \
6362 return true;
6363 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6364#undef COMPARE_AND_RETURN_NAME
6365#define COMPARE_AND_RETURN_NAME(name) \
6366 case ObjectStats::FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - \
6367 Code::kFirstCodeAge: \
6368 *object_type = "CODE_TYPE"; \
6369 *object_sub_type = "CODE_AGE/" #name; \
6370 return true;
6371 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6372#undef COMPARE_AND_RETURN_NAME
6373 }
6374 return false;
6375}
6376
6377
6378// static
6379int Heap::GetStaticVisitorIdForMap(Map* map) {
6380 return StaticVisitorBase::GetVisitorId(map);
6381}
6382
6383} // namespace internal
6384} // namespace v8