blob: d6c509e4a332c08b0d65f5d81e345e352394ecdf [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_HEAP_INL_H_
6#define V8_HEAP_HEAP_INL_H_
7
8#include <cmath>
9
10#include "src/base/platform/platform.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/counters.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/heap/heap.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013#include "src/heap/incremental-marking-inl.h"
14#include "src/heap/mark-compact.h"
Ben Murdochda12d292016-06-02 14:46:10 +010015#include "src/heap/remembered-set.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000016#include "src/heap/spaces-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017#include "src/heap/store-buffer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000018#include "src/isolate.h"
19#include "src/list-inl.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020#include "src/log.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000021#include "src/msan.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000022#include "src/objects-inl.h"
23#include "src/type-feedback-vector-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000024
25namespace v8 {
26namespace internal {
27
Ben Murdochda12d292016-06-02 14:46:10 +010028void PromotionQueue::insert(HeapObject* target, int32_t size,
29 bool was_marked_black) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030 if (emergency_stack_ != NULL) {
Ben Murdochda12d292016-06-02 14:46:10 +010031 emergency_stack_->Add(Entry(target, size, was_marked_black));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032 return;
33 }
34
Ben Murdochda12d292016-06-02 14:46:10 +010035 if ((rear_ - 1) < limit_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036 RelocateQueueHead();
Ben Murdochda12d292016-06-02 14:46:10 +010037 emergency_stack_->Add(Entry(target, size, was_marked_black));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038 return;
39 }
40
Ben Murdochda12d292016-06-02 14:46:10 +010041 struct Entry* entry = reinterpret_cast<struct Entry*>(--rear_);
42 entry->obj_ = target;
43 entry->size_ = size;
44 entry->was_marked_black_ = was_marked_black;
45
Ben Murdochb8a8cc12014-11-26 15:28:44 +000046// Assert no overflow into live objects.
47#ifdef DEBUG
48 SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
49 reinterpret_cast<Address>(rear_));
50#endif
51}
52
53
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000054#define ROOT_ACCESSOR(type, name, camel_name) \
55 type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
56ROOT_LIST(ROOT_ACCESSOR)
57#undef ROOT_ACCESSOR
58
59#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
60 Map* Heap::name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
61STRUCT_LIST(STRUCT_MAP_ACCESSOR)
62#undef STRUCT_MAP_ACCESSOR
63
64#define STRING_ACCESSOR(name, str) \
65 String* Heap::name() { return String::cast(roots_[k##name##RootIndex]); }
66INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
67#undef STRING_ACCESSOR
68
69#define SYMBOL_ACCESSOR(name) \
70 Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
71PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
72#undef SYMBOL_ACCESSOR
73
74#define SYMBOL_ACCESSOR(name, description) \
75 Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
76PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
77WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
78#undef SYMBOL_ACCESSOR
79
80#define ROOT_ACCESSOR(type, name, camel_name) \
81 void Heap::set_##name(type* value) { \
82 /* The deserializer makes use of the fact that these common roots are */ \
83 /* never in new space and never on a page that is being compacted. */ \
84 DCHECK(!deserialization_complete() || \
85 RootCanBeWrittenAfterInitialization(k##camel_name##RootIndex)); \
86 DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
87 roots_[k##camel_name##RootIndex] = value; \
88 }
89ROOT_LIST(ROOT_ACCESSOR)
90#undef ROOT_ACCESSOR
91
92
Ben Murdochb8a8cc12014-11-26 15:28:44 +000093template <>
94bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
95 // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
96 return chars == str.length();
97}
98
99
100template <>
101bool inline Heap::IsOneByte(String* str, int chars) {
102 return str->IsOneByteRepresentation();
103}
104
105
106AllocationResult Heap::AllocateInternalizedStringFromUtf8(
107 Vector<const char> str, int chars, uint32_t hash_field) {
108 if (IsOneByte(str, chars)) {
109 return AllocateOneByteInternalizedString(Vector<const uint8_t>::cast(str),
110 hash_field);
111 }
112 return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
113}
114
115
116template <typename T>
117AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
118 uint32_t hash_field) {
119 if (IsOneByte(t, chars)) {
120 return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
121 }
122 return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
123}
124
125
126AllocationResult Heap::AllocateOneByteInternalizedString(
127 Vector<const uint8_t> str, uint32_t hash_field) {
128 CHECK_GE(String::kMaxLength, str.length());
129 // Compute map and object size.
130 Map* map = one_byte_internalized_string_map();
131 int size = SeqOneByteString::SizeFor(str.length());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000132
133 // Allocate string.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000134 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000135 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000136 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000137 if (!allocation.To(&result)) return allocation;
138 }
139
140 // String maps are all immortal immovable objects.
141 result->set_map_no_write_barrier(map);
142 // Set length and hash fields of the allocated string.
143 String* answer = String::cast(result);
144 answer->set_length(str.length());
145 answer->set_hash_field(hash_field);
146
147 DCHECK_EQ(size, answer->Size());
148
149 // Fill in the characters.
150 MemCopy(answer->address() + SeqOneByteString::kHeaderSize, str.start(),
151 str.length());
152
153 return answer;
154}
155
156
157AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
158 uint32_t hash_field) {
159 CHECK_GE(String::kMaxLength, str.length());
160 // Compute map and object size.
161 Map* map = internalized_string_map();
162 int size = SeqTwoByteString::SizeFor(str.length());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000163
164 // Allocate string.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000165 HeapObject* result = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000166 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000167 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000168 if (!allocation.To(&result)) return allocation;
169 }
170
171 result->set_map(map);
172 // Set length and hash fields of the allocated string.
173 String* answer = String::cast(result);
174 answer->set_length(str.length());
175 answer->set_hash_field(hash_field);
176
177 DCHECK_EQ(size, answer->Size());
178
179 // Fill in the characters.
180 MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
181 str.length() * kUC16Size);
182
183 return answer;
184}
185
186AllocationResult Heap::CopyFixedArray(FixedArray* src) {
187 if (src->length() == 0) return src;
188 return CopyFixedArrayWithMap(src, src->map());
189}
190
191
192AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
193 if (src->length() == 0) return src;
194 return CopyFixedDoubleArrayWithMap(src, src->map());
195}
196
197
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000198AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000199 AllocationAlignment alignment) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000200 DCHECK(AllowHandleAllocation::IsAllowed());
201 DCHECK(AllowHeapAllocation::IsAllowed());
202 DCHECK(gc_state_ == NOT_IN_GC);
203#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000204 if (FLAG_gc_interval >= 0 && !always_allocate() &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000205 Heap::allocation_timeout_-- <= 0) {
206 return AllocationResult::Retry(space);
207 }
208 isolate_->counters()->objs_since_last_full()->Increment();
209 isolate_->counters()->objs_since_last_young()->Increment();
210#endif
211
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000212 bool large_object = size_in_bytes > Page::kMaxRegularHeapObjectSize;
213 HeapObject* object = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000214 AllocationResult allocation;
215 if (NEW_SPACE == space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000216 if (large_object) {
217 space = LO_SPACE;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000218 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000219 allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000220 if (allocation.To(&object)) {
221 OnAllocationEvent(object, size_in_bytes);
222 }
223 return allocation;
224 }
225 }
226
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000227 // Here we only allocate in the old generation.
228 if (OLD_SPACE == space) {
229 if (large_object) {
230 allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
231 } else {
232 allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
233 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000234 } else if (CODE_SPACE == space) {
235 if (size_in_bytes <= code_space()->AreaSize()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000236 allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000237 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000238 allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
239 }
240 } else if (LO_SPACE == space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000241 DCHECK(large_object);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000242 allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000243 } else if (MAP_SPACE == space) {
244 allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000245 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000246 // NEW_SPACE is not allowed here.
247 UNREACHABLE();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000248 }
249 if (allocation.To(&object)) {
250 OnAllocationEvent(object, size_in_bytes);
251 } else {
252 old_gen_exhausted_ = true;
253 }
Ben Murdochda12d292016-06-02 14:46:10 +0100254
255 if (!old_gen_exhausted_ && incremental_marking()->black_allocation() &&
256 space != OLD_SPACE) {
257 Marking::MarkBlack(Marking::MarkBitFrom(object));
258 MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
259 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000260 return allocation;
261}
262
263
264void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
265 HeapProfiler* profiler = isolate_->heap_profiler();
266 if (profiler->is_tracking_allocations()) {
267 profiler->AllocationEvent(object->address(), size_in_bytes);
268 }
269
270 if (FLAG_verify_predictable) {
271 ++allocations_count_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000272 // Advance synthetic time by making a time request.
273 MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000274
275 UpdateAllocationsHash(object);
276 UpdateAllocationsHash(size_in_bytes);
277
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000278 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000279 PrintAlloctionsHash();
280 }
281 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000282
283 if (FLAG_trace_allocation_stack_interval > 0) {
284 if (!FLAG_verify_predictable) ++allocations_count_;
285 if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
286 isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
287 }
288 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000289}
290
291
292void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
293 int size_in_bytes) {
294 HeapProfiler* heap_profiler = isolate_->heap_profiler();
295 if (heap_profiler->is_tracking_object_moves()) {
296 heap_profiler->ObjectMoveEvent(source->address(), target->address(),
297 size_in_bytes);
298 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000299 if (target->IsSharedFunctionInfo()) {
300 LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
301 target->address()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000302 }
303
304 if (FLAG_verify_predictable) {
305 ++allocations_count_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000306 // Advance synthetic time by making a time request.
307 MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000308
309 UpdateAllocationsHash(source);
310 UpdateAllocationsHash(target);
311 UpdateAllocationsHash(size_in_bytes);
312
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000313 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000314 PrintAlloctionsHash();
315 }
316 }
317}
318
319
320void Heap::UpdateAllocationsHash(HeapObject* object) {
321 Address object_address = object->address();
322 MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
323 AllocationSpace allocation_space = memory_chunk->owner()->identity();
324
325 STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
326 uint32_t value =
327 static_cast<uint32_t>(object_address - memory_chunk->address()) |
328 (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
329
330 UpdateAllocationsHash(value);
331}
332
333
334void Heap::UpdateAllocationsHash(uint32_t value) {
335 uint16_t c1 = static_cast<uint16_t>(value);
336 uint16_t c2 = static_cast<uint16_t>(value >> 16);
337 raw_allocations_hash_ =
338 StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
339 raw_allocations_hash_ =
340 StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
341}
342
343
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000344void Heap::RegisterExternalString(String* string) {
345 external_string_table_.AddString(string);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000346}
347
348
349void Heap::FinalizeExternalString(String* string) {
350 DCHECK(string->IsExternalString());
351 v8::String::ExternalStringResourceBase** resource_addr =
352 reinterpret_cast<v8::String::ExternalStringResourceBase**>(
353 reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
354 kHeapObjectTag);
355
356 // Dispose of the C++ object if it has not already been disposed.
357 if (*resource_addr != NULL) {
358 (*resource_addr)->Dispose();
359 *resource_addr = NULL;
360 }
361}
362
363
364bool Heap::InNewSpace(Object* object) {
365 bool result = new_space_.Contains(object);
366 DCHECK(!result || // Either not in new space
367 gc_state_ != NOT_IN_GC || // ... or in the middle of GC
368 InToSpace(object)); // ... or in to-space (where we allocate).
369 return result;
370}
371
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000372bool Heap::InFromSpace(Object* object) {
373 return new_space_.FromSpaceContains(object);
374}
375
376
377bool Heap::InToSpace(Object* object) {
378 return new_space_.ToSpaceContains(object);
379}
380
Ben Murdoch097c5b22016-05-18 11:27:45 +0100381bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000382
Ben Murdoch097c5b22016-05-18 11:27:45 +0100383bool Heap::InNewSpaceSlow(Address address) {
384 return new_space_.ContainsSlow(address);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000385}
386
Ben Murdoch097c5b22016-05-18 11:27:45 +0100387bool Heap::InOldSpaceSlow(Address address) {
388 return old_space_->ContainsSlow(address);
389}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000390
391bool Heap::OldGenerationAllocationLimitReached() {
392 if (!incremental_marking()->IsStopped()) return false;
393 return OldGenerationSpaceAvailable() < 0;
394}
395
Ben Murdoch61f157c2016-09-16 13:49:30 +0100396template <PromotionMode promotion_mode>
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000397bool Heap::ShouldBePromoted(Address old_address, int object_size) {
Ben Murdochc5610432016-08-08 18:44:38 +0100398 Page* page = Page::FromAddress(old_address);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000399 Address age_mark = new_space_.age_mark();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100400
401 if (promotion_mode == PROMOTE_MARKED) {
402 MarkBit mark_bit = Marking::MarkBitFrom(old_address);
403 if (!Marking::IsWhite(mark_bit)) {
404 return true;
405 }
406 }
407
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000408 return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
409 (!page->ContainsLimit(age_mark) || old_address < age_mark);
410}
411
Ben Murdoch61f157c2016-09-16 13:49:30 +0100412PromotionMode Heap::CurrentPromotionMode() {
413 if (incremental_marking()->IsMarking()) {
414 return PROMOTE_MARKED;
415 } else {
416 return DEFAULT_PROMOTION;
417 }
418}
419
Ben Murdoch097c5b22016-05-18 11:27:45 +0100420void Heap::RecordWrite(Object* object, int offset, Object* o) {
421 if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
422 return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000423 }
Ben Murdochc5610432016-08-08 18:44:38 +0100424 RememberedSet<OLD_TO_NEW>::Insert(
425 Page::FromAddress(reinterpret_cast<Address>(object)),
426 HeapObject::cast(object)->address() + offset);
427}
428
429void Heap::RecordFixedArrayElements(FixedArray* array, int offset, int length) {
430 if (InNewSpace(array)) return;
431 Page* page = Page::FromAddress(reinterpret_cast<Address>(array));
432 for (int i = 0; i < length; i++) {
433 if (!InNewSpace(array->get(offset + i))) continue;
434 RememberedSet<OLD_TO_NEW>::Insert(
435 page,
436 reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
437 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000438}
439
440
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000441bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
442 // Object migration is governed by the following rules:
443 //
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000444 // 1) Objects in new-space can be migrated to the old space
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000445 // that matches their target space or they stay in new-space.
446 // 2) Objects in old-space stay in the same space when migrating.
447 // 3) Fillers (two or more words) can migrate due to left-trimming of
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000448 // fixed arrays in new-space or old space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000449 // 4) Fillers (one word) can never migrate, they are skipped by
450 // incremental marking explicitly to prevent invalid pattern.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000451 //
452 // Since this function is used for debugging only, we do not place
453 // asserts here, but check everything explicitly.
454 if (obj->map() == one_pointer_filler_map()) return false;
455 InstanceType type = obj->map()->instance_type();
456 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
457 AllocationSpace src = chunk->owner()->identity();
458 switch (src) {
459 case NEW_SPACE:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000460 return dst == src || dst == OLD_SPACE;
461 case OLD_SPACE:
462 return dst == src &&
463 (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000464 case CODE_SPACE:
465 return dst == src && type == CODE_TYPE;
466 case MAP_SPACE:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000467 case LO_SPACE:
468 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000469 }
470 UNREACHABLE();
471 return false;
472}
473
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000474void Heap::CopyBlock(Address dst, Address src, int byte_size) {
475 CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
476 static_cast<size_t>(byte_size / kPointerSize));
477}
478
Ben Murdoch61f157c2016-09-16 13:49:30 +0100479bool Heap::PurgeLeftTrimmedObject(Object** object) {
480 HeapObject* current = reinterpret_cast<HeapObject*>(*object);
481 const MapWord map_word = current->map_word();
482 if (current->IsFiller() && !map_word.IsForwardingAddress()) {
483#ifdef DEBUG
484 // We need to find a FixedArrayBase map after walking the fillers.
485 while (current->IsFiller()) {
486 Address next = reinterpret_cast<Address>(current);
487 if (current->map() == one_pointer_filler_map()) {
488 next += kPointerSize;
489 } else if (current->map() == two_pointer_filler_map()) {
490 next += 2 * kPointerSize;
491 } else {
492 next += current->Size();
493 }
494 current = reinterpret_cast<HeapObject*>(next);
495 }
496 DCHECK(current->IsFixedArrayBase());
497#endif // DEBUG
498 *object = nullptr;
499 return true;
500 }
501 return false;
502}
503
Ben Murdoch097c5b22016-05-18 11:27:45 +0100504template <Heap::FindMementoMode mode>
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000505AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
506 // Check if there is potentially a memento behind the object. If
507 // the last word of the memento is on another page we return
508 // immediately.
509 Address object_address = object->address();
510 Address memento_address = object_address + object->Size();
511 Address last_memento_word_address = memento_address + kPointerSize;
Ben Murdochc5610432016-08-08 18:44:38 +0100512 if (!Page::OnSamePage(object_address, last_memento_word_address)) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100513 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000514 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000515 HeapObject* candidate = HeapObject::FromAddress(memento_address);
516 Map* candidate_map = candidate->map();
517 // This fast check may peek at an uninitialized word. However, the slow check
518 // below (memento_address == top) ensures that this is safe. Mark the word as
519 // initialized to silence MemorySanitizer warnings.
520 MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
Ben Murdoch097c5b22016-05-18 11:27:45 +0100521 if (candidate_map != allocation_memento_map()) {
522 return nullptr;
523 }
524 AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000525
Ben Murdoch097c5b22016-05-18 11:27:45 +0100526 // Depending on what the memento is used for, we might need to perform
527 // additional checks.
528 Address top;
529 switch (mode) {
530 case Heap::kForGC:
531 return memento_candidate;
532 case Heap::kForRuntime:
533 if (memento_candidate == nullptr) return nullptr;
534 // Either the object is the last object in the new space, or there is
535 // another object of at least word size (the header map word) following
536 // it, so suffices to compare ptr and top here.
537 top = NewSpaceTop();
538 DCHECK(memento_address == top ||
539 memento_address + HeapObject::kHeaderSize <= top ||
Ben Murdochc5610432016-08-08 18:44:38 +0100540 !Page::OnSamePage(memento_address, top - 1));
Ben Murdoch097c5b22016-05-18 11:27:45 +0100541 if ((memento_address != top) && memento_candidate->IsValid()) {
542 return memento_candidate;
543 }
544 return nullptr;
545 default:
546 UNREACHABLE();
547 }
548 UNREACHABLE();
549 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000550}
551
Ben Murdoch097c5b22016-05-18 11:27:45 +0100552template <Heap::UpdateAllocationSiteMode mode>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000553void Heap::UpdateAllocationSite(HeapObject* object,
Ben Murdoch61f157c2016-09-16 13:49:30 +0100554 base::HashMap* pretenuring_feedback) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000555 DCHECK(InFromSpace(object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000556 if (!FLAG_allocation_site_pretenuring ||
557 !AllocationSite::CanTrack(object->map()->instance_type()))
558 return;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100559 AllocationMemento* memento_candidate = FindAllocationMemento<kForGC>(object);
560 if (memento_candidate == nullptr) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000561
Ben Murdoch097c5b22016-05-18 11:27:45 +0100562 if (mode == kGlobal) {
563 DCHECK_EQ(pretenuring_feedback, global_pretenuring_feedback_);
564 // Entering global pretenuring feedback is only used in the scavenger, where
565 // we are allowed to actually touch the allocation site.
566 if (!memento_candidate->IsValid()) return;
567 AllocationSite* site = memento_candidate->GetAllocationSite();
568 DCHECK(!site->IsZombie());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000569 // For inserting in the global pretenuring storage we need to first
570 // increment the memento found count on the allocation site.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100571 if (site->IncrementMementoFoundCount()) {
572 global_pretenuring_feedback_->LookupOrInsert(site,
573 ObjectHash(site->address()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000574 }
575 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100576 DCHECK_EQ(mode, kCached);
577 DCHECK_NE(pretenuring_feedback, global_pretenuring_feedback_);
578 // Entering cached feedback is used in the parallel case. We are not allowed
579 // to dereference the allocation site and rather have to postpone all checks
580 // till actually merging the data.
581 Address key = memento_candidate->GetAllocationSiteUnchecked();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100582 base::HashMap::Entry* e =
Ben Murdoch097c5b22016-05-18 11:27:45 +0100583 pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000584 DCHECK(e != nullptr);
585 (*bit_cast<intptr_t*>(&e->value))++;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000586 }
587}
588
589
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000590void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
591 global_pretenuring_feedback_->Remove(
592 site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000593}
594
595
596bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
597 const v8::GCCallbackFlags callbackFlags) {
598 const char* collector_reason = NULL;
599 GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
600 return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
601}
602
603
604Isolate* Heap::isolate() {
605 return reinterpret_cast<Isolate*>(
606 reinterpret_cast<intptr_t>(this) -
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400607 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000608}
609
610
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000611void Heap::ExternalStringTable::AddString(String* string) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000612 DCHECK(string->IsExternalString());
613 if (heap_->InNewSpace(string)) {
614 new_space_strings_.Add(string);
615 } else {
616 old_space_strings_.Add(string);
617 }
618}
619
620
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000621void Heap::ExternalStringTable::Iterate(ObjectVisitor* v) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622 if (!new_space_strings_.is_empty()) {
623 Object** start = &new_space_strings_[0];
624 v->VisitPointers(start, start + new_space_strings_.length());
625 }
626 if (!old_space_strings_.is_empty()) {
627 Object** start = &old_space_strings_[0];
628 v->VisitPointers(start, start + old_space_strings_.length());
629 }
630}
631
632
633// Verify() is inline to avoid ifdef-s around its calls in release
634// mode.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000635void Heap::ExternalStringTable::Verify() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000636#ifdef DEBUG
637 for (int i = 0; i < new_space_strings_.length(); ++i) {
638 Object* obj = Object::cast(new_space_strings_[i]);
639 DCHECK(heap_->InNewSpace(obj));
Ben Murdoch61f157c2016-09-16 13:49:30 +0100640 DCHECK(!obj->IsTheHole(heap_->isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000641 }
642 for (int i = 0; i < old_space_strings_.length(); ++i) {
643 Object* obj = Object::cast(old_space_strings_[i]);
644 DCHECK(!heap_->InNewSpace(obj));
Ben Murdoch61f157c2016-09-16 13:49:30 +0100645 DCHECK(!obj->IsTheHole(heap_->isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000646 }
647#endif
648}
649
650
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000651void Heap::ExternalStringTable::AddOldString(String* string) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000652 DCHECK(string->IsExternalString());
653 DCHECK(!heap_->InNewSpace(string));
654 old_space_strings_.Add(string);
655}
656
657
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000658void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000659 new_space_strings_.Rewind(position);
660#ifdef VERIFY_HEAP
661 if (FLAG_verify_heap) {
662 Verify();
663 }
664#endif
665}
666
Ben Murdoch097c5b22016-05-18 11:27:45 +0100667// static
668int DescriptorLookupCache::Hash(Object* source, Name* name) {
669 DCHECK(name->IsUniqueName());
670 // Uses only lower 32 bits if pointers are larger.
671 uint32_t source_hash =
672 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
673 kPointerSizeLog2;
674 uint32_t name_hash = name->hash_field();
675 return (source_hash ^ name_hash) % kLength;
676}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000677
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000678int DescriptorLookupCache::Lookup(Map* source, Name* name) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000679 int index = Hash(source, name);
680 Key& key = keys_[index];
681 if ((key.source == source) && (key.name == name)) return results_[index];
682 return kAbsent;
683}
684
685
686void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
687 DCHECK(result != kAbsent);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100688 int index = Hash(source, name);
689 Key& key = keys_[index];
690 key.source = source;
691 key.name = name;
692 results_[index] = result;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000693}
694
695
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000696void Heap::ClearInstanceofCache() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400697 set_instanceof_cache_function(Smi::FromInt(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000698}
699
Ben Murdochda12d292016-06-02 14:46:10 +0100700Oddball* Heap::ToBoolean(bool condition) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000701 return condition ? true_value() : false_value();
702}
703
704
705void Heap::CompletelyClearInstanceofCache() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400706 set_instanceof_cache_map(Smi::FromInt(0));
707 set_instanceof_cache_function(Smi::FromInt(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000708}
709
710
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000711uint32_t Heap::HashSeed() {
712 uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
713 DCHECK(FLAG_randomize_hashes || seed == 0);
714 return seed;
715}
716
717
718int Heap::NextScriptId() {
719 int last_id = last_script_id()->value();
720 if (last_id == Smi::kMaxValue) {
721 last_id = 1;
722 } else {
723 last_id++;
724 }
725 set_last_script_id(Smi::FromInt(last_id));
726 return last_id;
727}
728
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000729void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
730 DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
731 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
732}
733
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000734void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
735 DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
736 set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
737}
738
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000739void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
740 DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
741 set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
742}
743
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000744void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
745 DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
746 set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
747}
748
Ben Murdochc5610432016-08-08 18:44:38 +0100749void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
750 DCHECK(interpreter_entry_return_pc_offset() == Smi::FromInt(0));
751 set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
752}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000753
Ben Murdoch61f157c2016-09-16 13:49:30 +0100754int Heap::GetNextTemplateSerialNumber() {
755 int next_serial_number = next_template_serial_number()->value() + 1;
756 set_next_template_serial_number(Smi::FromInt(next_serial_number));
757 return next_serial_number;
758}
759
760void Heap::SetSerializedTemplates(FixedArray* templates) {
761 DCHECK_EQ(empty_fixed_array(), serialized_templates());
762 set_serialized_templates(templates);
763}
764
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000765AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000766 : heap_(isolate->heap()) {
767 heap_->always_allocate_scope_count_.Increment(1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000768}
769
770
771AlwaysAllocateScope::~AlwaysAllocateScope() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000772 heap_->always_allocate_scope_count_.Increment(-1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000773}
774
775
776void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
777 for (Object** current = start; current < end; current++) {
778 if ((*current)->IsHeapObject()) {
779 HeapObject* object = HeapObject::cast(*current);
780 CHECK(object->GetIsolate()->heap()->Contains(object));
781 CHECK(object->map()->IsMap());
782 }
783 }
784}
785
786
787void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
788 for (Object** current = start; current < end; current++) {
789 CHECK((*current)->IsSmi());
790 }
791}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000792} // namespace internal
793} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000794
795#endif // V8_HEAP_HEAP_INL_H_