blob: 2b6c11f758b2f08cea8be1f0068a6c34ed717299 [file] [log] [blame]
Ben Murdoch8b112d22011-06-08 16:22:53 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
Ben Murdoch8b112d22011-06-08 16:22:53 +010033#include "codegen.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
Steve Block1e0659c2011-05-24 12:43:12 +010038#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "mark-compact.h"
40#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010041#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010042#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080043#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000045#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000046#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010047#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010048#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000049#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000050#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000051#endif
Steve Block44f0eee2011-05-26 01:26:41 +010052#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
53#include "regexp-macro-assembler.h"
54#include "mips/regexp-macro-assembler-mips.h"
55#endif
Steve Block6ded16b2010-05-10 14:33:55 +010056
Steve Blocka7e24c12009-10-30 11:49:00 +000057namespace v8 {
58namespace internal {
59
60
John Reck59135872010-11-02 12:39:01 -070061static const intptr_t kMinimumPromotionLimit = 2 * MB;
62static const intptr_t kMinimumAllocationLimit = 8 * MB;
63
Steve Blocka7e24c12009-10-30 11:49:00 +000064
Steve Block44f0eee2011-05-26 01:26:41 +010065static Mutex* gc_initializer_mutex = OS::CreateMutex();
Steve Blocka7e24c12009-10-30 11:49:00 +000066
Steve Blocka7e24c12009-10-30 11:49:00 +000067
Steve Block44f0eee2011-05-26 01:26:41 +010068Heap::Heap()
69 : isolate_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +000070// semispace_size_ should be a power of 2 and old_generation_size_ should be
71// a multiple of Page::kPageSize.
72#if defined(ANDROID)
Steve Block44f0eee2011-05-26 01:26:41 +010073 reserved_semispace_size_(2*MB),
74 max_semispace_size_(2*MB),
75 initial_semispace_size_(128*KB),
76 max_old_generation_size_(192*MB),
77 max_executable_size_(max_old_generation_size_),
78 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000079#elif defined(V8_TARGET_ARCH_X64)
Steve Block44f0eee2011-05-26 01:26:41 +010080 reserved_semispace_size_(16*MB),
81 max_semispace_size_(16*MB),
82 initial_semispace_size_(1*MB),
83 max_old_generation_size_(1*GB),
84 max_executable_size_(256*MB),
85 code_range_size_(512*MB),
Steve Blocka7e24c12009-10-30 11:49:00 +000086#else
Steve Block44f0eee2011-05-26 01:26:41 +010087 reserved_semispace_size_(8*MB),
88 max_semispace_size_(8*MB),
89 initial_semispace_size_(512*KB),
90 max_old_generation_size_(512*MB),
91 max_executable_size_(128*MB),
92 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000093#endif
Steve Blocka7e24c12009-10-30 11:49:00 +000094// Variables set based on semispace_size_ and old_generation_size_ in
Steve Block44f0eee2011-05-26 01:26:41 +010095// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
Steve Block3ce2e202009-11-05 08:53:23 +000096// Will be 4 * reserved_semispace_size_ to ensure that young
97// generation can be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +010098 survived_since_last_expansion_(0),
99 always_allocate_scope_depth_(0),
100 linear_allocation_scope_depth_(0),
101 contexts_disposed_(0),
102 new_space_(this),
103 old_pointer_space_(NULL),
104 old_data_space_(NULL),
105 code_space_(NULL),
106 map_space_(NULL),
107 cell_space_(NULL),
108 lo_space_(NULL),
109 gc_state_(NOT_IN_GC),
110 mc_count_(0),
111 ms_count_(0),
112 gc_count_(0),
113 unflattened_strings_length_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000114#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100115 allocation_allowed_(true),
116 allocation_timeout_(0),
117 disallow_allocation_failure_(false),
118 debug_utils_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000119#endif // DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100120 old_gen_promotion_limit_(kMinimumPromotionLimit),
121 old_gen_allocation_limit_(kMinimumAllocationLimit),
122 external_allocation_limit_(0),
123 amount_of_external_allocated_memory_(0),
124 amount_of_external_allocated_memory_at_last_global_gc_(0),
125 old_gen_exhausted_(false),
126 hidden_symbol_(NULL),
127 global_gc_prologue_callback_(NULL),
128 global_gc_epilogue_callback_(NULL),
129 gc_safe_size_of_old_object_(NULL),
Steve Block053d10c2011-06-13 19:13:29 +0100130 total_regexp_code_generated_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100131 tracer_(NULL),
132 young_survivors_after_last_gc_(0),
133 high_survival_rate_period_length_(0),
134 survival_rate_(0),
135 previous_survival_rate_trend_(Heap::STABLE),
136 survival_rate_trend_(Heap::STABLE),
137 max_gc_pause_(0),
138 max_alive_after_gc_(0),
139 min_in_mutator_(kMaxInt),
140 alive_after_last_gc_(0),
141 last_gc_end_timestamp_(0.0),
142 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
143 number_idle_notifications_(0),
144 last_idle_notification_gc_count_(0),
145 last_idle_notification_gc_count_init_(false),
146 configured_(false),
147 is_safe_to_read_maps_(true) {
148 // Allow build-time customization of the max semispace size. Building
149 // V8 with snapshots and a non-default max semispace size is much
150 // easier if you can define it as part of the build environment.
151#if defined(V8_MAX_SEMISPACE_SIZE)
152 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
153#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000154
Steve Block44f0eee2011-05-26 01:26:41 +0100155 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
156 global_contexts_list_ = NULL;
157 mark_compact_collector_.heap_ = this;
158 external_string_table_.heap_ = this;
159}
160
Steve Blocka7e24c12009-10-30 11:49:00 +0000161
Ben Murdochf87a2032010-10-22 12:50:53 +0100162intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000163 if (!HasBeenSetup()) return 0;
164
165 return new_space_.Capacity() +
166 old_pointer_space_->Capacity() +
167 old_data_space_->Capacity() +
168 code_space_->Capacity() +
169 map_space_->Capacity() +
170 cell_space_->Capacity();
171}
172
173
Ben Murdochf87a2032010-10-22 12:50:53 +0100174intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000175 if (!HasBeenSetup()) return 0;
176
177 return new_space_.CommittedMemory() +
178 old_pointer_space_->CommittedMemory() +
179 old_data_space_->CommittedMemory() +
180 code_space_->CommittedMemory() +
181 map_space_->CommittedMemory() +
182 cell_space_->CommittedMemory() +
183 lo_space_->Size();
184}
185
Russell Brenner90bac252010-11-18 13:33:46 -0800186intptr_t Heap::CommittedMemoryExecutable() {
187 if (!HasBeenSetup()) return 0;
188
Steve Block44f0eee2011-05-26 01:26:41 +0100189 return isolate()->memory_allocator()->SizeExecutable();
Russell Brenner90bac252010-11-18 13:33:46 -0800190}
191
Steve Block3ce2e202009-11-05 08:53:23 +0000192
Ben Murdochf87a2032010-10-22 12:50:53 +0100193intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000194 if (!HasBeenSetup()) return 0;
195
196 return new_space_.Available() +
197 old_pointer_space_->Available() +
198 old_data_space_->Available() +
199 code_space_->Available() +
200 map_space_->Available() +
201 cell_space_->Available();
202}
203
204
205bool Heap::HasBeenSetup() {
206 return old_pointer_space_ != NULL &&
207 old_data_space_ != NULL &&
208 code_space_ != NULL &&
209 map_space_ != NULL &&
210 cell_space_ != NULL &&
211 lo_space_ != NULL;
212}
213
214
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100215int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100216 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
217 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100218 MapWord map_word = object->map_word();
219 map_word.ClearMark();
220 map_word.ClearOverflow();
221 return object->SizeFromMap(map_word.ToMap());
222}
223
224
225int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100226 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
227 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100228 uint32_t marker = Memory::uint32_at(object->address());
229 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
230 return kIntSize;
231 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
232 return Memory::int_at(object->address() + kIntSize);
233 } else {
234 MapWord map_word = object->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +0100235 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100236 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
237 return object->SizeFromMap(map);
238 }
239}
240
241
Steve Blocka7e24c12009-10-30 11:49:00 +0000242GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
243 // Is global GC requested?
244 if (space != NEW_SPACE || FLAG_gc_global) {
Steve Block44f0eee2011-05-26 01:26:41 +0100245 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000246 return MARK_COMPACTOR;
247 }
248
249 // Is enough data promoted to justify a global GC?
250 if (OldGenerationPromotionLimitReached()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100251 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000252 return MARK_COMPACTOR;
253 }
254
255 // Have allocation in OLD and LO failed?
256 if (old_gen_exhausted_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100257 isolate_->counters()->
258 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000259 return MARK_COMPACTOR;
260 }
261
262 // Is there enough space left in OLD to guarantee that a scavenge can
263 // succeed?
264 //
265 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
266 // for object promotion. It counts only the bytes that the memory
267 // allocator has not yet allocated from the OS and assigned to any space,
268 // and does not count available bytes already in the old space or code
269 // space. Undercounting is safe---we may get an unrequested full GC when
270 // a scavenge would have succeeded.
Steve Block44f0eee2011-05-26 01:26:41 +0100271 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
272 isolate_->counters()->
273 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000274 return MARK_COMPACTOR;
275 }
276
277 // Default
278 return SCAVENGER;
279}
280
281
282// TODO(1238405): Combine the infrastructure for --heap-stats and
283// --log-gc to avoid the complicated preprocessor and flag testing.
284#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
285void Heap::ReportStatisticsBeforeGC() {
286 // Heap::ReportHeapStatistics will also log NewSpace statistics when
287 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
288 // following logic is used to avoid double logging.
289#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
290 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
291 if (FLAG_heap_stats) {
292 ReportHeapStatistics("Before GC");
293 } else if (FLAG_log_gc) {
294 new_space_.ReportStatistics();
295 }
296 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
297#elif defined(DEBUG)
298 if (FLAG_heap_stats) {
299 new_space_.CollectStatistics();
300 ReportHeapStatistics("Before GC");
301 new_space_.ClearHistograms();
302 }
303#elif defined(ENABLE_LOGGING_AND_PROFILING)
304 if (FLAG_log_gc) {
305 new_space_.CollectStatistics();
306 new_space_.ReportStatistics();
307 new_space_.ClearHistograms();
308 }
309#endif
310}
311
312
313#if defined(ENABLE_LOGGING_AND_PROFILING)
314void Heap::PrintShortHeapStatistics() {
315 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100316 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
317 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block44f0eee2011-05-26 01:26:41 +0100318 isolate_->memory_allocator()->Size(),
319 isolate_->memory_allocator()->Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100320 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
321 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000322 Heap::new_space_.Size(),
323 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100324 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
325 ", available: %8" V8_PTR_PREFIX "d"
326 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000327 old_pointer_space_->Size(),
328 old_pointer_space_->Available(),
329 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100330 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
331 ", available: %8" V8_PTR_PREFIX "d"
332 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000333 old_data_space_->Size(),
334 old_data_space_->Available(),
335 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100336 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
337 ", available: %8" V8_PTR_PREFIX "d"
338 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000339 code_space_->Size(),
340 code_space_->Available(),
341 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100342 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
343 ", available: %8" V8_PTR_PREFIX "d"
344 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000345 map_space_->Size(),
346 map_space_->Available(),
347 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100348 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
349 ", available: %8" V8_PTR_PREFIX "d"
350 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000351 cell_space_->Size(),
352 cell_space_->Available(),
353 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100354 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
355 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000356 lo_space_->Size(),
357 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000358}
359#endif
360
361
362// TODO(1238405): Combine the infrastructure for --heap-stats and
363// --log-gc to avoid the complicated preprocessor and flag testing.
364void Heap::ReportStatisticsAfterGC() {
365 // Similar to the before GC, we use some complicated logic to ensure that
366 // NewSpace statistics are logged exactly once when --log-gc is turned on.
367#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
368 if (FLAG_heap_stats) {
369 new_space_.CollectStatistics();
370 ReportHeapStatistics("After GC");
371 } else if (FLAG_log_gc) {
372 new_space_.ReportStatistics();
373 }
374#elif defined(DEBUG)
375 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
376#elif defined(ENABLE_LOGGING_AND_PROFILING)
377 if (FLAG_log_gc) new_space_.ReportStatistics();
378#endif
379}
380#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
381
382
383void Heap::GarbageCollectionPrologue() {
Steve Block44f0eee2011-05-26 01:26:41 +0100384 isolate_->transcendental_cache()->Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100385 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000386 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100387 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000388#ifdef DEBUG
389 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
390 allow_allocation(false);
391
392 if (FLAG_verify_heap) {
393 Verify();
394 }
395
396 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000397#endif
398
399#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
400 ReportStatisticsBeforeGC();
401#endif
Steve Block1e0659c2011-05-24 12:43:12 +0100402
403 LiveObjectList::GCPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000404}
405
Ben Murdochf87a2032010-10-22 12:50:53 +0100406intptr_t Heap::SizeOfObjects() {
407 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000408 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000409 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800410 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000411 }
412 return total;
413}
414
415void Heap::GarbageCollectionEpilogue() {
Steve Block1e0659c2011-05-24 12:43:12 +0100416 LiveObjectList::GCEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000417#ifdef DEBUG
418 allow_allocation(true);
419 ZapFromSpace();
420
421 if (FLAG_verify_heap) {
422 Verify();
423 }
424
Steve Block44f0eee2011-05-26 01:26:41 +0100425 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000426 if (FLAG_print_handles) PrintHandles();
427 if (FLAG_gc_verbose) Print();
428 if (FLAG_code_stats) ReportCodeStatistics("After GC");
429#endif
430
Steve Block44f0eee2011-05-26 01:26:41 +0100431 isolate_->counters()->alive_after_last_gc()->Set(
432 static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000433
Steve Block44f0eee2011-05-26 01:26:41 +0100434 isolate_->counters()->symbol_table_capacity()->Set(
435 symbol_table()->Capacity());
436 isolate_->counters()->number_of_symbols()->Set(
437 symbol_table()->NumberOfElements());
Steve Blocka7e24c12009-10-30 11:49:00 +0000438#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
439 ReportStatisticsAfterGC();
440#endif
441#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100442 isolate_->debug()->AfterGarbageCollection();
Steve Blocka7e24c12009-10-30 11:49:00 +0000443#endif
444}
445
446
John Reck59135872010-11-02 12:39:01 -0700447void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000448 // Since we are ignoring the return value, the exact choice of space does
449 // not matter, so long as we do not specify NEW_SPACE, which would not
450 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100451 mark_compact_collector_.SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700452 CollectGarbage(OLD_POINTER_SPACE);
Steve Block44f0eee2011-05-26 01:26:41 +0100453 mark_compact_collector_.SetForceCompaction(false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000454}
455
456
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800457void Heap::CollectAllAvailableGarbage() {
458 // Since we are ignoring the return value, the exact choice of space does
459 // not matter, so long as we do not specify NEW_SPACE, which would not
460 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100461 mark_compact_collector()->SetForceCompaction(true);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800462
463 // Major GC would invoke weak handle callbacks on weakly reachable
464 // handles, but won't collect weakly reachable objects until next
465 // major GC. Therefore if we collect aggressively and weak handle callback
466 // has been invoked, we rerun major GC to release objects which become
467 // garbage.
468 // Note: as weak callbacks can execute arbitrary code, we cannot
469 // hope that eventually there will be no weak callbacks invocations.
470 // Therefore stop recollecting after several attempts.
471 const int kMaxNumberOfAttempts = 7;
472 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
473 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
474 break;
475 }
476 }
Steve Block44f0eee2011-05-26 01:26:41 +0100477 mark_compact_collector()->SetForceCompaction(false);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800478}
479
480
481bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000482 // The VM is in the GC state until exiting this function.
Steve Block44f0eee2011-05-26 01:26:41 +0100483 VMState state(isolate_, GC);
Steve Blocka7e24c12009-10-30 11:49:00 +0000484
485#ifdef DEBUG
486 // Reset the allocation timeout to the GC interval, but make sure to
487 // allow at least a few allocations after a collection. The reason
488 // for this is that we have a lot of allocation sequences and we
489 // assume that a garbage collection will allow the subsequent
490 // allocation attempts to go through.
491 allocation_timeout_ = Max(6, FLAG_gc_interval);
492#endif
493
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800494 bool next_gc_likely_to_collect_more = false;
495
Steve Block44f0eee2011-05-26 01:26:41 +0100496 { GCTracer tracer(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000497 GarbageCollectionPrologue();
498 // The GC count was incremented in the prologue. Tell the tracer about
499 // it.
500 tracer.set_gc_count(gc_count_);
501
Steve Blocka7e24c12009-10-30 11:49:00 +0000502 // Tell the tracer which collector we've selected.
503 tracer.set_collector(collector);
504
505 HistogramTimer* rate = (collector == SCAVENGER)
Steve Block44f0eee2011-05-26 01:26:41 +0100506 ? isolate_->counters()->gc_scavenger()
507 : isolate_->counters()->gc_compactor();
Steve Blocka7e24c12009-10-30 11:49:00 +0000508 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800509 next_gc_likely_to_collect_more =
510 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000511 rate->Stop();
512
513 GarbageCollectionEpilogue();
514 }
515
516
517#ifdef ENABLE_LOGGING_AND_PROFILING
518 if (FLAG_log_gc) HeapProfiler::WriteSample();
519#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800520
521 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000522}
523
524
525void Heap::PerformScavenge() {
Steve Block44f0eee2011-05-26 01:26:41 +0100526 GCTracer tracer(this);
John Reck59135872010-11-02 12:39:01 -0700527 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000528}
529
530
531#ifdef DEBUG
532// Helper class for verifying the symbol table.
533class SymbolTableVerifier : public ObjectVisitor {
534 public:
Steve Blocka7e24c12009-10-30 11:49:00 +0000535 void VisitPointers(Object** start, Object** end) {
536 // Visit all HeapObject pointers in [start, end).
537 for (Object** p = start; p < end; p++) {
538 if ((*p)->IsHeapObject()) {
539 // Check that the symbol is actually a symbol.
540 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
541 }
542 }
543 }
544};
545#endif // DEBUG
546
547
548static void VerifySymbolTable() {
549#ifdef DEBUG
550 SymbolTableVerifier verifier;
Steve Block44f0eee2011-05-26 01:26:41 +0100551 HEAP->symbol_table()->IterateElements(&verifier);
Steve Blocka7e24c12009-10-30 11:49:00 +0000552#endif // DEBUG
553}
554
555
Leon Clarkee46be812010-01-19 14:06:41 +0000556void Heap::ReserveSpace(
557 int new_space_size,
558 int pointer_space_size,
559 int data_space_size,
560 int code_space_size,
561 int map_space_size,
562 int cell_space_size,
563 int large_object_size) {
564 NewSpace* new_space = Heap::new_space();
565 PagedSpace* old_pointer_space = Heap::old_pointer_space();
566 PagedSpace* old_data_space = Heap::old_data_space();
567 PagedSpace* code_space = Heap::code_space();
568 PagedSpace* map_space = Heap::map_space();
569 PagedSpace* cell_space = Heap::cell_space();
570 LargeObjectSpace* lo_space = Heap::lo_space();
571 bool gc_performed = true;
572 while (gc_performed) {
573 gc_performed = false;
574 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100575 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000576 gc_performed = true;
577 }
578 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100579 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000580 gc_performed = true;
581 }
582 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100583 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000584 gc_performed = true;
585 }
586 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100587 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000588 gc_performed = true;
589 }
590 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100591 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000592 gc_performed = true;
593 }
594 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100595 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000596 gc_performed = true;
597 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100598 // We add a slack-factor of 2 in order to have space for a series of
599 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000600 large_object_size *= 2;
601 // The ReserveSpace method on the large object space checks how much
602 // we can expand the old generation. This includes expansion caused by
603 // allocation in the other spaces.
604 large_object_size += cell_space_size + map_space_size + code_space_size +
605 data_space_size + pointer_space_size;
606 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100607 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000608 gc_performed = true;
609 }
610 }
611}
612
613
Steve Blocka7e24c12009-10-30 11:49:00 +0000614void Heap::EnsureFromSpaceIsCommitted() {
615 if (new_space_.CommitFromSpaceIfNeeded()) return;
616
617 // Committing memory to from space failed.
618 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100619 PagedSpaces spaces;
620 for (PagedSpace* space = spaces.next();
621 space != NULL;
622 space = spaces.next()) {
623 space->RelinkPageListInChunkOrder(true);
624 }
625
Steve Blocka7e24c12009-10-30 11:49:00 +0000626 Shrink();
627 if (new_space_.CommitFromSpaceIfNeeded()) return;
628
629 // Committing memory to from space failed again.
630 // Memory is exhausted and we will die.
631 V8::FatalProcessOutOfMemory("Committing semi space failed.");
632}
633
634
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800635void Heap::ClearJSFunctionResultCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100636 if (isolate_->bootstrapper()->IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100637
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800638 Object* context = global_contexts_list_;
639 while (!context->IsUndefined()) {
640 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100641 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800642 Context::cast(context)->jsfunction_result_caches();
643 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100644 int length = caches->length();
645 for (int i = 0; i < length; i++) {
646 JSFunctionResultCache::cast(caches->get(i))->Clear();
647 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800648 // Get the next context:
649 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100650 }
Steve Block6ded16b2010-05-10 14:33:55 +0100651}
652
653
Steve Block44f0eee2011-05-26 01:26:41 +0100654
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100655void Heap::ClearNormalizedMapCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100656 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100657
658 Object* context = global_contexts_list_;
659 while (!context->IsUndefined()) {
660 Context::cast(context)->normalized_map_cache()->Clear();
661 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
662 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100663}
664
665
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100666#ifdef DEBUG
667
668enum PageWatermarkValidity {
669 ALL_VALID,
670 ALL_INVALID
671};
672
673static void VerifyPageWatermarkValidity(PagedSpace* space,
674 PageWatermarkValidity validity) {
675 PageIterator it(space, PageIterator::PAGES_IN_USE);
676 bool expected_value = (validity == ALL_VALID);
677 while (it.has_next()) {
678 Page* page = it.next();
679 ASSERT(page->IsWatermarkValid() == expected_value);
680 }
681}
682#endif
683
Steve Block8defd9f2010-07-08 12:39:36 +0100684void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
685 double survival_rate =
686 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
687 start_new_space_size;
688
689 if (survival_rate > kYoungSurvivalRateThreshold) {
690 high_survival_rate_period_length_++;
691 } else {
692 high_survival_rate_period_length_ = 0;
693 }
694
695 double survival_rate_diff = survival_rate_ - survival_rate;
696
697 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
698 set_survival_rate_trend(DECREASING);
699 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
700 set_survival_rate_trend(INCREASING);
701 } else {
702 set_survival_rate_trend(STABLE);
703 }
704
705 survival_rate_ = survival_rate;
706}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100707
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800708bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700709 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800710 bool next_gc_likely_to_collect_more = false;
711
Ben Murdochf87a2032010-10-22 12:50:53 +0100712 if (collector != SCAVENGER) {
Steve Block44f0eee2011-05-26 01:26:41 +0100713 PROFILE(isolate_, CodeMovingGCEvent());
Ben Murdochf87a2032010-10-22 12:50:53 +0100714 }
715
Steve Blocka7e24c12009-10-30 11:49:00 +0000716 VerifySymbolTable();
717 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
718 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100719 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000720 global_gc_prologue_callback_();
721 }
Steve Block6ded16b2010-05-10 14:33:55 +0100722
723 GCType gc_type =
724 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
725
726 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
727 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
728 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
729 }
730 }
731
Steve Blocka7e24c12009-10-30 11:49:00 +0000732 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100733
Ben Murdochf87a2032010-10-22 12:50:53 +0100734 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100735
Steve Blocka7e24c12009-10-30 11:49:00 +0000736 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100737 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000738 MarkCompact(tracer);
739
Steve Block8defd9f2010-07-08 12:39:36 +0100740 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
741 IsStableOrIncreasingSurvivalTrend();
742
743 UpdateSurvivalRateTrend(start_new_space_size);
744
John Reck59135872010-11-02 12:39:01 -0700745 intptr_t old_gen_size = PromotedSpaceSize();
746 old_gen_promotion_limit_ =
747 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
748 old_gen_allocation_limit_ =
749 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100750
John Reck59135872010-11-02 12:39:01 -0700751 if (high_survival_rate_during_scavenges &&
752 IsStableOrIncreasingSurvivalTrend()) {
753 // Stable high survival rates of young objects both during partial and
754 // full collection indicate that mutator is either building or modifying
755 // a structure with a long lifetime.
756 // In this case we aggressively raise old generation memory limits to
757 // postpone subsequent mark-sweep collection and thus trade memory
758 // space for the mutation speed.
759 old_gen_promotion_limit_ *= 2;
760 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100761 }
762
John Reck59135872010-11-02 12:39:01 -0700763 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100764 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100765 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100766 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100767 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100768
769 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000770 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000771
Steve Block44f0eee2011-05-26 01:26:41 +0100772 isolate_->counters()->objs_since_last_young()->Set(0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000773
John Reck59135872010-11-02 12:39:01 -0700774 if (collector == MARK_COMPACTOR) {
775 DisableAssertNoAllocation allow_allocation;
776 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800777 next_gc_likely_to_collect_more =
Steve Block44f0eee2011-05-26 01:26:41 +0100778 isolate_->global_handles()->PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700779 }
780
Steve Block3ce2e202009-11-05 08:53:23 +0000781 // Update relocatables.
782 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000783
784 if (collector == MARK_COMPACTOR) {
785 // Register the amount of external allocated memory.
786 amount_of_external_allocated_memory_at_last_global_gc_ =
787 amount_of_external_allocated_memory_;
788 }
789
Steve Block6ded16b2010-05-10 14:33:55 +0100790 GCCallbackFlags callback_flags = tracer->is_compacting()
791 ? kGCCallbackFlagCompacted
792 : kNoGCCallbackFlags;
793 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
794 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
795 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
796 }
797 }
798
Steve Blocka7e24c12009-10-30 11:49:00 +0000799 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
800 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100801 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000802 global_gc_epilogue_callback_();
803 }
804 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800805
806 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000807}
808
809
Steve Blocka7e24c12009-10-30 11:49:00 +0000810void Heap::MarkCompact(GCTracer* tracer) {
811 gc_state_ = MARK_COMPACT;
Steve Block44f0eee2011-05-26 01:26:41 +0100812 LOG(isolate_, ResourceEvent("markcompact", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000813
Steve Block44f0eee2011-05-26 01:26:41 +0100814 mark_compact_collector_.Prepare(tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000815
Steve Block44f0eee2011-05-26 01:26:41 +0100816 bool is_compacting = mark_compact_collector_.IsCompacting();
Steve Blocka7e24c12009-10-30 11:49:00 +0000817
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100818 if (is_compacting) {
819 mc_count_++;
820 } else {
821 ms_count_++;
822 }
823 tracer->set_full_gc_count(mc_count_ + ms_count_);
824
Steve Blocka7e24c12009-10-30 11:49:00 +0000825 MarkCompactPrologue(is_compacting);
826
Steve Block44f0eee2011-05-26 01:26:41 +0100827 is_safe_to_read_maps_ = false;
828 mark_compact_collector_.CollectGarbage();
829 is_safe_to_read_maps_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000830
Steve Block44f0eee2011-05-26 01:26:41 +0100831 LOG(isolate_, ResourceEvent("markcompact", "end"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000832
833 gc_state_ = NOT_IN_GC;
834
835 Shrink();
836
Steve Block44f0eee2011-05-26 01:26:41 +0100837 isolate_->counters()->objs_since_last_full()->Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100838
839 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000840}
841
842
843void Heap::MarkCompactPrologue(bool is_compacting) {
844 // At any old GC clear the keyed lookup cache to enable collection of unused
845 // maps.
Steve Block44f0eee2011-05-26 01:26:41 +0100846 isolate_->keyed_lookup_cache()->Clear();
847 isolate_->context_slot_cache()->Clear();
848 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000849
Steve Block44f0eee2011-05-26 01:26:41 +0100850 isolate_->compilation_cache()->MarkCompactPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000851
Kristian Monsen25f61362010-05-21 11:50:48 +0100852 CompletelyClearInstanceofCache();
853
Leon Clarkee46be812010-01-19 14:06:41 +0000854 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000855
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100856 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000857}
858
859
860Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700861 Object* obj = NULL; // Initialization to please compiler.
862 { MaybeObject* maybe_obj = code_space_->FindObject(a);
863 if (!maybe_obj->ToObject(&obj)) {
864 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
865 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000867 return obj;
868}
869
870
871// Helper class for copying HeapObjects
872class ScavengeVisitor: public ObjectVisitor {
873 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100874 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000875
876 void VisitPointer(Object** p) { ScavengePointer(p); }
877
878 void VisitPointers(Object** start, Object** end) {
879 // Copy all HeapObject pointers in [start, end)
880 for (Object** p = start; p < end; p++) ScavengePointer(p);
881 }
882
883 private:
884 void ScavengePointer(Object** p) {
885 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +0100886 if (!heap_->InNewSpace(object)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000887 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
888 reinterpret_cast<HeapObject*>(object));
889 }
Steve Block44f0eee2011-05-26 01:26:41 +0100890
891 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000892};
893
894
Steve Blocka7e24c12009-10-30 11:49:00 +0000895#ifdef DEBUG
896// Visitor class to verify pointers in code or data space do not point into
897// new space.
898class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
899 public:
900 void VisitPointers(Object** start, Object**end) {
901 for (Object** current = start; current < end; current++) {
902 if ((*current)->IsHeapObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100903 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000904 }
905 }
906 }
907};
908
909
910static void VerifyNonPointerSpacePointers() {
911 // Verify that there are no pointers to new space in spaces where we
912 // do not expect them.
913 VerifyNonPointerSpacePointersVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +0100914 HeapObjectIterator code_it(HEAP->code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000915 for (HeapObject* object = code_it.next();
916 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000917 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000918
Steve Block44f0eee2011-05-26 01:26:41 +0100919 HeapObjectIterator data_it(HEAP->old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000920 for (HeapObject* object = data_it.next();
921 object != NULL; object = data_it.next())
922 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000923}
924#endif
925
926
Steve Block6ded16b2010-05-10 14:33:55 +0100927void Heap::CheckNewSpaceExpansionCriteria() {
928 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
929 survived_since_last_expansion_ > new_space_.Capacity()) {
930 // Grow the size of new space if there is room to grow and enough
931 // data has survived scavenge since the last expansion.
932 new_space_.Grow();
933 survived_since_last_expansion_ = 0;
934 }
935}
936
937
Steve Blocka7e24c12009-10-30 11:49:00 +0000938void Heap::Scavenge() {
939#ifdef DEBUG
940 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
941#endif
942
943 gc_state_ = SCAVENGE;
944
Ben Murdoch8b112d22011-06-08 16:22:53 +0100945 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
946
Steve Block44f0eee2011-05-26 01:26:41 +0100947 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100948#ifdef DEBUG
949 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
950 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
951#endif
952
953 // We do not update an allocation watermark of the top page during linear
954 // allocation to avoid overhead. So to maintain the watermark invariant
955 // we have to manually cache the watermark and mark the top page as having an
956 // invalid watermark. This guarantees that dirty regions iteration will use a
957 // correct watermark even if a linear allocation happens.
958 old_pointer_space_->FlushTopPageWatermark();
959 map_space_->FlushTopPageWatermark();
960
Steve Blocka7e24c12009-10-30 11:49:00 +0000961 // Implements Cheney's copying algorithm
Steve Block44f0eee2011-05-26 01:26:41 +0100962 LOG(isolate_, ResourceEvent("scavenge", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000963
964 // Clear descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +0100965 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000966
967 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100968 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000969
Steve Block6ded16b2010-05-10 14:33:55 +0100970 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000971
972 // Flip the semispaces. After flipping, to space is empty, from space has
973 // live objects.
974 new_space_.Flip();
975 new_space_.ResetAllocationInfo();
976
977 // We need to sweep newly copied objects which can be either in the
978 // to space or promoted to the old generation. For to-space
979 // objects, we treat the bottom of the to space as a queue. Newly
980 // copied and unswept objects lie between a 'front' mark and the
981 // allocation pointer.
982 //
983 // Promoted objects can go into various old-generation spaces, and
984 // can be allocated internally in the spaces (from the free list).
985 // We treat the top of the to space as a queue of addresses of
986 // promoted objects. The addresses of newly promoted and unswept
987 // objects lie between a 'front' mark and a 'rear' mark that is
988 // updated as a side effect of promoting an object.
989 //
990 // There is guaranteed to be enough room at the top of the to space
991 // for the addresses of promoted objects: every object promoted
992 // frees up its size in bytes from the top of the new space, and
993 // objects are at least one pointer in size.
994 Address new_space_front = new_space_.ToSpaceLow();
Steve Block44f0eee2011-05-26 01:26:41 +0100995 promotion_queue_.Initialize(new_space_.ToSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +0000996
Steve Block44f0eee2011-05-26 01:26:41 +0100997 is_safe_to_read_maps_ = false;
998 ScavengeVisitor scavenge_visitor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000999 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001000 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001001
1002 // Copy objects reachable from the old generation. By definition,
1003 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001004 IterateDirtyRegions(old_pointer_space_,
Steve Block44f0eee2011-05-26 01:26:41 +01001005 &Heap::IteratePointersInDirtyRegion,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001006 &ScavengePointer,
1007 WATERMARK_CAN_BE_INVALID);
1008
1009 IterateDirtyRegions(map_space_,
1010 &IteratePointersInDirtyMapsRegion,
1011 &ScavengePointer,
1012 WATERMARK_CAN_BE_INVALID);
1013
1014 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001015
1016 // Copy objects reachable from cells by scavenging cell values directly.
1017 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001018 for (HeapObject* cell = cell_iterator.next();
1019 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001020 if (cell->IsJSGlobalPropertyCell()) {
1021 Address value_address =
1022 reinterpret_cast<Address>(cell) +
1023 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1024 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1025 }
1026 }
1027
Ben Murdochf87a2032010-10-22 12:50:53 +01001028 // Scavenge object reachable from the global contexts list directly.
1029 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1030
Leon Clarkee46be812010-01-19 14:06:41 +00001031 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1032
Steve Block6ded16b2010-05-10 14:33:55 +01001033 UpdateNewSpaceReferencesInExternalStringTable(
1034 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1035
Steve Block1e0659c2011-05-24 12:43:12 +01001036 LiveObjectList::UpdateReferencesForScavengeGC();
Steve Block44f0eee2011-05-26 01:26:41 +01001037 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Block1e0659c2011-05-24 12:43:12 +01001038
Leon Clarkee46be812010-01-19 14:06:41 +00001039 ASSERT(new_space_front == new_space_.top());
1040
Steve Block44f0eee2011-05-26 01:26:41 +01001041 is_safe_to_read_maps_ = true;
1042
Leon Clarkee46be812010-01-19 14:06:41 +00001043 // Set age mark.
1044 new_space_.set_age_mark(new_space_.top());
1045
1046 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001047 IncrementYoungSurvivorsCounter(static_cast<int>(
1048 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001049
Steve Block44f0eee2011-05-26 01:26:41 +01001050 LOG(isolate_, ResourceEvent("scavenge", "end"));
Leon Clarkee46be812010-01-19 14:06:41 +00001051
1052 gc_state_ = NOT_IN_GC;
1053}
1054
1055
Steve Block44f0eee2011-05-26 01:26:41 +01001056String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1057 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01001058 MapWord first_word = HeapObject::cast(*p)->map_word();
1059
1060 if (!first_word.IsForwardingAddress()) {
1061 // Unreachable external string can be finalized.
Steve Block44f0eee2011-05-26 01:26:41 +01001062 heap->FinalizeExternalString(String::cast(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001063 return NULL;
1064 }
1065
1066 // String is still reachable.
1067 return String::cast(first_word.ToForwardingAddress());
1068}
1069
1070
1071void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1072 ExternalStringTableUpdaterCallback updater_func) {
Steve Block44f0eee2011-05-26 01:26:41 +01001073 external_string_table_.Verify();
Leon Clarkee46be812010-01-19 14:06:41 +00001074
Steve Block44f0eee2011-05-26 01:26:41 +01001075 if (external_string_table_.new_space_strings_.is_empty()) return;
Leon Clarkee46be812010-01-19 14:06:41 +00001076
Steve Block44f0eee2011-05-26 01:26:41 +01001077 Object** start = &external_string_table_.new_space_strings_[0];
1078 Object** end = start + external_string_table_.new_space_strings_.length();
Leon Clarkee46be812010-01-19 14:06:41 +00001079 Object** last = start;
1080
1081 for (Object** p = start; p < end; ++p) {
Steve Block44f0eee2011-05-26 01:26:41 +01001082 ASSERT(InFromSpace(*p));
1083 String* target = updater_func(this, p);
Leon Clarkee46be812010-01-19 14:06:41 +00001084
Steve Block6ded16b2010-05-10 14:33:55 +01001085 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001086
Leon Clarkee46be812010-01-19 14:06:41 +00001087 ASSERT(target->IsExternalString());
1088
Steve Block44f0eee2011-05-26 01:26:41 +01001089 if (InNewSpace(target)) {
Leon Clarkee46be812010-01-19 14:06:41 +00001090 // String is still in new space. Update the table entry.
1091 *last = target;
1092 ++last;
1093 } else {
1094 // String got promoted. Move it to the old string list.
Steve Block44f0eee2011-05-26 01:26:41 +01001095 external_string_table_.AddOldString(target);
Leon Clarkee46be812010-01-19 14:06:41 +00001096 }
1097 }
1098
1099 ASSERT(last <= end);
Steve Block44f0eee2011-05-26 01:26:41 +01001100 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
Leon Clarkee46be812010-01-19 14:06:41 +00001101}
1102
1103
Steve Block44f0eee2011-05-26 01:26:41 +01001104static Object* ProcessFunctionWeakReferences(Heap* heap,
1105 Object* function,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001106 WeakObjectRetainer* retainer) {
Steve Block44f0eee2011-05-26 01:26:41 +01001107 Object* head = heap->undefined_value();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001108 JSFunction* tail = NULL;
1109 Object* candidate = function;
Steve Block44f0eee2011-05-26 01:26:41 +01001110 while (candidate != heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001111 // Check whether to keep the candidate in the list.
1112 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1113 Object* retain = retainer->RetainAs(candidate);
1114 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001115 if (head == heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001116 // First element in the list.
1117 head = candidate_function;
1118 } else {
1119 // Subsequent elements in the list.
1120 ASSERT(tail != NULL);
1121 tail->set_next_function_link(candidate_function);
1122 }
1123 // Retained function is new tail.
1124 tail = candidate_function;
1125 }
1126 // Move to next element in the list.
1127 candidate = candidate_function->next_function_link();
1128 }
1129
1130 // Terminate the list if there is one or more elements.
1131 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001132 tail->set_next_function_link(heap->undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001133 }
1134
1135 return head;
1136}
1137
1138
Ben Murdochf87a2032010-10-22 12:50:53 +01001139void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1140 Object* head = undefined_value();
1141 Context* tail = NULL;
1142 Object* candidate = global_contexts_list_;
Steve Block44f0eee2011-05-26 01:26:41 +01001143 while (candidate != undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001144 // Check whether to keep the candidate in the list.
1145 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1146 Object* retain = retainer->RetainAs(candidate);
1147 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001148 if (head == undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001149 // First element in the list.
1150 head = candidate_context;
1151 } else {
1152 // Subsequent elements in the list.
1153 ASSERT(tail != NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01001154 tail->set_unchecked(this,
1155 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001156 candidate_context,
1157 UPDATE_WRITE_BARRIER);
1158 }
1159 // Retained context is new tail.
1160 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001161
1162 // Process the weak list of optimized functions for the context.
1163 Object* function_list_head =
1164 ProcessFunctionWeakReferences(
Steve Block44f0eee2011-05-26 01:26:41 +01001165 this,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001166 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1167 retainer);
Steve Block44f0eee2011-05-26 01:26:41 +01001168 candidate_context->set_unchecked(this,
1169 Context::OPTIMIZED_FUNCTIONS_LIST,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001170 function_list_head,
1171 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001172 }
1173 // Move to next element in the list.
1174 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1175 }
1176
1177 // Terminate the list if there is one or more elements.
1178 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001179 tail->set_unchecked(this,
1180 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001181 Heap::undefined_value(),
1182 UPDATE_WRITE_BARRIER);
1183 }
1184
1185 // Update the head of the list of contexts.
Steve Block44f0eee2011-05-26 01:26:41 +01001186 global_contexts_list_ = head;
Ben Murdochf87a2032010-10-22 12:50:53 +01001187}
1188
1189
Iain Merrick75681382010-08-19 15:07:18 +01001190class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1191 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001192 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001193 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +01001194 if (!heap->InNewSpace(object)) return;
Iain Merrick75681382010-08-19 15:07:18 +01001195 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1196 reinterpret_cast<HeapObject*>(object));
1197 }
1198};
1199
1200
Leon Clarkee46be812010-01-19 14:06:41 +00001201Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1202 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001203 do {
1204 ASSERT(new_space_front <= new_space_.top());
1205
1206 // The addresses new_space_front and new_space_.top() define a
1207 // queue of unprocessed copied objects. Process them until the
1208 // queue is empty.
1209 while (new_space_front < new_space_.top()) {
1210 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001211 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001212 }
1213
1214 // Promote and process all the to-be-promoted objects.
Steve Block44f0eee2011-05-26 01:26:41 +01001215 while (!promotion_queue_.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001216 HeapObject* target;
1217 int size;
Steve Block44f0eee2011-05-26 01:26:41 +01001218 promotion_queue_.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001219
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001220 // Promoted object might be already partially visited
1221 // during dirty regions iteration. Thus we search specificly
1222 // for pointers to from semispace instead of looking for pointers
1223 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001224 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001225 IterateAndMarkPointersToFromSpace(target->address(),
1226 target->address() + size,
1227 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001228 }
1229
1230 // Take another spin if there are now unswept objects in new space
1231 // (there are currently no more unswept promoted objects).
1232 } while (new_space_front < new_space_.top());
1233
Leon Clarkee46be812010-01-19 14:06:41 +00001234 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001235}
1236
1237
Ben Murdoch8b112d22011-06-08 16:22:53 +01001238enum LoggingAndProfiling {
1239 LOGGING_AND_PROFILING_ENABLED,
1240 LOGGING_AND_PROFILING_DISABLED
1241};
1242
1243
1244typedef void (*ScavengingCallback)(Map* map,
1245 HeapObject** slot,
1246 HeapObject* object);
1247
1248
1249static Atomic32 scavenging_visitors_table_mode_;
1250static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
1251
1252
1253INLINE(static void DoScavengeObject(Map* map,
1254 HeapObject** slot,
1255 HeapObject* obj));
1256
1257
1258void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1259 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1260}
1261
1262
1263template<LoggingAndProfiling logging_and_profiling_mode>
Iain Merrick75681382010-08-19 15:07:18 +01001264class ScavengingVisitor : public StaticVisitorBase {
1265 public:
1266 static void Initialize() {
1267 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1268 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1269 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1270 table_.Register(kVisitByteArray, &EvacuateByteArray);
1271 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001272
Ben Murdochf87a2032010-10-22 12:50:53 +01001273 table_.Register(kVisitGlobalContext,
1274 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001275 template VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001276
1277 table_.Register(kVisitConsString,
1278 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001279 template VisitSpecialized<ConsString::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001280
1281 table_.Register(kVisitSharedFunctionInfo,
1282 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001283 template VisitSpecialized<SharedFunctionInfo::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001284
1285 table_.Register(kVisitJSFunction,
1286 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001287 template VisitSpecialized<JSFunction::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001288
1289 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1290 kVisitDataObject,
1291 kVisitDataObjectGeneric>();
1292
1293 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1294 kVisitJSObject,
1295 kVisitJSObjectGeneric>();
1296
1297 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1298 kVisitStruct,
1299 kVisitStructGeneric>();
1300 }
1301
Ben Murdoch8b112d22011-06-08 16:22:53 +01001302 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1303 return &table_;
Iain Merrick75681382010-08-19 15:07:18 +01001304 }
1305
Iain Merrick75681382010-08-19 15:07:18 +01001306 private:
1307 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1308 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1309
Steve Blocka7e24c12009-10-30 11:49:00 +00001310#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01001311 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
Iain Merrick75681382010-08-19 15:07:18 +01001312 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001313#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001314 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001315#endif
1316#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001317 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001318#endif
Iain Merrick75681382010-08-19 15:07:18 +01001319 if (should_record) {
Steve Block44f0eee2011-05-26 01:26:41 +01001320 if (heap->new_space()->Contains(obj)) {
1321 heap->new_space()->RecordAllocation(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001322 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001323 heap->new_space()->RecordPromotion(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001324 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001325 }
1326 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001327#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1328
Iain Merrick75681382010-08-19 15:07:18 +01001329 // Helper function used by CopyObject to copy a source object to an
1330 // allocated target object and update the forwarding pointer in the source
1331 // object. Returns the target object.
Steve Block44f0eee2011-05-26 01:26:41 +01001332 INLINE(static HeapObject* MigrateObject(Heap* heap,
1333 HeapObject* source,
Iain Merrick75681382010-08-19 15:07:18 +01001334 HeapObject* target,
1335 int size)) {
1336 // Copy the content of source to target.
Steve Block44f0eee2011-05-26 01:26:41 +01001337 heap->CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001338
Iain Merrick75681382010-08-19 15:07:18 +01001339 // Set the forwarding address.
1340 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001341
Ben Murdoch8b112d22011-06-08 16:22:53 +01001342 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001343#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001344 // Update NewSpace stats if necessary.
1345 RecordCopiedObject(heap, target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001346#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001347 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001348#if defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001349 Isolate* isolate = heap->isolate();
1350 if (isolate->logger()->is_logging() ||
1351 isolate->cpu_profiler()->is_profiling()) {
1352 if (target->IsSharedFunctionInfo()) {
1353 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1354 source->address(), target->address()));
1355 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001356 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001357#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001358 }
1359
Iain Merrick75681382010-08-19 15:07:18 +01001360 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001361 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001362
1363
Iain Merrick75681382010-08-19 15:07:18 +01001364 template<ObjectContents object_contents, SizeRestriction size_restriction>
1365 static inline void EvacuateObject(Map* map,
1366 HeapObject** slot,
1367 HeapObject* object,
1368 int object_size) {
1369 ASSERT((size_restriction != SMALL) ||
1370 (object_size <= Page::kMaxHeapObjectSize));
1371 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001372
Steve Block44f0eee2011-05-26 01:26:41 +01001373 Heap* heap = map->heap();
1374 if (heap->ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001375 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001376
Iain Merrick75681382010-08-19 15:07:18 +01001377 if ((size_restriction != SMALL) &&
1378 (object_size > Page::kMaxHeapObjectSize)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001379 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001380 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001381 if (object_contents == DATA_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001382 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001383 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001384 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001385 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001386 }
1387
John Reck59135872010-11-02 12:39:01 -07001388 Object* result = NULL; // Initialization to please compiler.
1389 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001390 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001391 *slot = MigrateObject(heap, object , target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001392
Iain Merrick75681382010-08-19 15:07:18 +01001393 if (object_contents == POINTER_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001394 heap->promotion_queue()->insert(target, object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001395 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001396
Steve Block44f0eee2011-05-26 01:26:41 +01001397 heap->tracer()->increment_promoted_objects_size(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001398 return;
1399 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001400 }
John Reck59135872010-11-02 12:39:01 -07001401 Object* result =
Steve Block44f0eee2011-05-26 01:26:41 +01001402 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
1403 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001404 return;
1405 }
1406
Iain Merrick75681382010-08-19 15:07:18 +01001407
1408 static inline void EvacuateFixedArray(Map* map,
1409 HeapObject** slot,
1410 HeapObject* object) {
1411 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1412 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1413 slot,
1414 object,
1415 object_size);
1416 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001417
1418
Iain Merrick75681382010-08-19 15:07:18 +01001419 static inline void EvacuateByteArray(Map* map,
1420 HeapObject** slot,
1421 HeapObject* object) {
1422 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1423 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1424 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001425
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001426
Iain Merrick75681382010-08-19 15:07:18 +01001427 static inline void EvacuateSeqAsciiString(Map* map,
1428 HeapObject** slot,
1429 HeapObject* object) {
1430 int object_size = SeqAsciiString::cast(object)->
1431 SeqAsciiStringSize(map->instance_type());
1432 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1433 }
1434
1435
1436 static inline void EvacuateSeqTwoByteString(Map* map,
1437 HeapObject** slot,
1438 HeapObject* object) {
1439 int object_size = SeqTwoByteString::cast(object)->
1440 SeqTwoByteStringSize(map->instance_type());
1441 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1442 }
1443
1444
1445 static inline bool IsShortcutCandidate(int type) {
1446 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1447 }
1448
1449 static inline void EvacuateShortcutCandidate(Map* map,
1450 HeapObject** slot,
1451 HeapObject* object) {
1452 ASSERT(IsShortcutCandidate(map->instance_type()));
1453
Steve Block44f0eee2011-05-26 01:26:41 +01001454 if (ConsString::cast(object)->unchecked_second() ==
1455 map->heap()->empty_string()) {
Iain Merrick75681382010-08-19 15:07:18 +01001456 HeapObject* first =
1457 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1458
1459 *slot = first;
1460
Steve Block44f0eee2011-05-26 01:26:41 +01001461 if (!map->heap()->InNewSpace(first)) {
Iain Merrick75681382010-08-19 15:07:18 +01001462 object->set_map_word(MapWord::FromForwardingAddress(first));
1463 return;
1464 }
1465
1466 MapWord first_word = first->map_word();
1467 if (first_word.IsForwardingAddress()) {
1468 HeapObject* target = first_word.ToForwardingAddress();
1469
1470 *slot = target;
1471 object->set_map_word(MapWord::FromForwardingAddress(target));
1472 return;
1473 }
1474
Ben Murdoch8b112d22011-06-08 16:22:53 +01001475 DoScavengeObject(first->map(), slot, first);
Iain Merrick75681382010-08-19 15:07:18 +01001476 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1477 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001478 }
Iain Merrick75681382010-08-19 15:07:18 +01001479
1480 int object_size = ConsString::kSize;
1481 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001482 }
1483
Iain Merrick75681382010-08-19 15:07:18 +01001484 template<ObjectContents object_contents>
1485 class ObjectEvacuationStrategy {
1486 public:
1487 template<int object_size>
1488 static inline void VisitSpecialized(Map* map,
1489 HeapObject** slot,
1490 HeapObject* object) {
1491 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1492 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001493
Iain Merrick75681382010-08-19 15:07:18 +01001494 static inline void Visit(Map* map,
1495 HeapObject** slot,
1496 HeapObject* object) {
1497 int object_size = map->instance_size();
1498 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1499 }
1500 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001501
Ben Murdoch8b112d22011-06-08 16:22:53 +01001502 static VisitorDispatchTable<ScavengingCallback> table_;
Iain Merrick75681382010-08-19 15:07:18 +01001503};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001504
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001505
Ben Murdoch8b112d22011-06-08 16:22:53 +01001506template<LoggingAndProfiling logging_and_profiling_mode>
1507VisitorDispatchTable<ScavengingCallback>
1508 ScavengingVisitor<logging_and_profiling_mode>::table_;
1509
1510
1511static void InitializeScavengingVisitorsTables() {
1512 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
1513 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
1514 scavenging_visitors_table_.CopyFrom(
1515 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
1516 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
1517}
1518
1519
1520void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
1521 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
1522 // Table was already updated by some isolate.
1523 return;
1524 }
1525
1526 if (isolate()->logger()->is_logging() ||
1527 isolate()->cpu_profiler()->is_profiling() ||
1528 (isolate()->heap_profiler() != NULL &&
1529 isolate()->heap_profiler()->is_profiling())) {
1530 // If one of the isolates is doing scavenge at this moment of time
1531 // it might see this table in an inconsitent state when
1532 // some of the callbacks point to
1533 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1534 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1535 // However this does not lead to any bugs as such isolate does not have
1536 // profiling enabled and any isolate with enabled profiling is guaranteed
1537 // to see the table in the consistent state.
1538 scavenging_visitors_table_.CopyFrom(
1539 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1540
1541 // We use Release_Store to prevent reordering of this write before writes
1542 // to the table.
1543 Release_Store(&scavenging_visitors_table_mode_,
1544 LOGGING_AND_PROFILING_ENABLED);
1545 }
1546}
Steve Blocka7e24c12009-10-30 11:49:00 +00001547
1548
1549void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +01001550 ASSERT(HEAP->InFromSpace(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001551 MapWord first_word = object->map_word();
1552 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001553 Map* map = first_word.ToMap();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001554 DoScavengeObject(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001555}
1556
1557
John Reck59135872010-11-02 12:39:01 -07001558MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1559 int instance_size) {
1560 Object* result;
1561 { MaybeObject* maybe_result = AllocateRawMap();
1562 if (!maybe_result->ToObject(&result)) return maybe_result;
1563 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001564
1565 // Map::cast cannot be used due to uninitialized map field.
1566 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1567 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1568 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Steve Block44f0eee2011-05-26 01:26:41 +01001569 reinterpret_cast<Map*>(result)->set_visitor_id(
1570 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001571 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001572 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001573 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001574 reinterpret_cast<Map*>(result)->set_bit_field(0);
1575 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001576 return result;
1577}
1578
1579
John Reck59135872010-11-02 12:39:01 -07001580MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1581 Object* result;
1582 { MaybeObject* maybe_result = AllocateRawMap();
1583 if (!maybe_result->ToObject(&result)) return maybe_result;
1584 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001585
1586 Map* map = reinterpret_cast<Map*>(result);
1587 map->set_map(meta_map());
1588 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001589 map->set_visitor_id(
1590 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001591 map->set_prototype(null_value());
1592 map->set_constructor(null_value());
1593 map->set_instance_size(instance_size);
1594 map->set_inobject_properties(0);
1595 map->set_pre_allocated_property_fields(0);
1596 map->set_instance_descriptors(empty_descriptor_array());
1597 map->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001598 map->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001599 map->set_unused_property_fields(0);
1600 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001601 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001602
1603 // If the map object is aligned fill the padding area with Smi 0 objects.
1604 if (Map::kPadStart < Map::kSize) {
1605 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1606 0,
1607 Map::kSize - Map::kPadStart);
1608 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001609 return map;
1610}
1611
1612
John Reck59135872010-11-02 12:39:01 -07001613MaybeObject* Heap::AllocateCodeCache() {
1614 Object* result;
1615 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1616 if (!maybe_result->ToObject(&result)) return maybe_result;
1617 }
Steve Block6ded16b2010-05-10 14:33:55 +01001618 CodeCache* code_cache = CodeCache::cast(result);
1619 code_cache->set_default_cache(empty_fixed_array());
1620 code_cache->set_normal_type_cache(undefined_value());
1621 return code_cache;
1622}
1623
1624
Steve Blocka7e24c12009-10-30 11:49:00 +00001625const Heap::StringTypeTable Heap::string_type_table[] = {
1626#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1627 {type, size, k##camel_name##MapRootIndex},
1628 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1629#undef STRING_TYPE_ELEMENT
1630};
1631
1632
1633const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1634#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1635 {contents, k##name##RootIndex},
1636 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1637#undef CONSTANT_SYMBOL_ELEMENT
1638};
1639
1640
1641const Heap::StructTable Heap::struct_table[] = {
1642#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1643 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1644 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1645#undef STRUCT_TABLE_ELEMENT
1646};
1647
1648
1649bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001650 Object* obj;
1651 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1652 if (!maybe_obj->ToObject(&obj)) return false;
1653 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001654 // Map::cast cannot be used due to uninitialized map field.
1655 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1656 set_meta_map(new_meta_map);
1657 new_meta_map->set_map(new_meta_map);
1658
John Reck59135872010-11-02 12:39:01 -07001659 { MaybeObject* maybe_obj =
1660 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1661 if (!maybe_obj->ToObject(&obj)) return false;
1662 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001663 set_fixed_array_map(Map::cast(obj));
1664
John Reck59135872010-11-02 12:39:01 -07001665 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1666 if (!maybe_obj->ToObject(&obj)) return false;
1667 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001668 set_oddball_map(Map::cast(obj));
1669
Steve Block6ded16b2010-05-10 14:33:55 +01001670 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001671 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1672 if (!maybe_obj->ToObject(&obj)) return false;
1673 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001674 set_empty_fixed_array(FixedArray::cast(obj));
1675
John Reck59135872010-11-02 12:39:01 -07001676 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1677 if (!maybe_obj->ToObject(&obj)) return false;
1678 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001679 set_null_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001680 Oddball::cast(obj)->set_kind(Oddball::kNull);
Steve Blocka7e24c12009-10-30 11:49:00 +00001681
1682 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001683 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1684 if (!maybe_obj->ToObject(&obj)) return false;
1685 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001686 set_empty_descriptor_array(DescriptorArray::cast(obj));
1687
1688 // Fix the instance_descriptors for the existing maps.
1689 meta_map()->set_instance_descriptors(empty_descriptor_array());
1690 meta_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001691 meta_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001692
1693 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1694 fixed_array_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001695 fixed_array_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001696
1697 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1698 oddball_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001699 oddball_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001700
1701 // Fix prototype object for existing maps.
1702 meta_map()->set_prototype(null_value());
1703 meta_map()->set_constructor(null_value());
1704
1705 fixed_array_map()->set_prototype(null_value());
1706 fixed_array_map()->set_constructor(null_value());
1707
1708 oddball_map()->set_prototype(null_value());
1709 oddball_map()->set_constructor(null_value());
1710
John Reck59135872010-11-02 12:39:01 -07001711 { MaybeObject* maybe_obj =
1712 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1713 if (!maybe_obj->ToObject(&obj)) return false;
1714 }
Iain Merrick75681382010-08-19 15:07:18 +01001715 set_fixed_cow_array_map(Map::cast(obj));
1716 ASSERT(fixed_array_map() != fixed_cow_array_map());
1717
John Reck59135872010-11-02 12:39:01 -07001718 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1719 if (!maybe_obj->ToObject(&obj)) return false;
1720 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001721 set_heap_number_map(Map::cast(obj));
1722
John Reck59135872010-11-02 12:39:01 -07001723 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1724 if (!maybe_obj->ToObject(&obj)) return false;
1725 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001726 set_proxy_map(Map::cast(obj));
1727
1728 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1729 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001730 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1731 if (!maybe_obj->ToObject(&obj)) return false;
1732 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001733 roots_[entry.index] = Map::cast(obj);
1734 }
1735
John Reck59135872010-11-02 12:39:01 -07001736 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1737 if (!maybe_obj->ToObject(&obj)) return false;
1738 }
Steve Blockd0582a62009-12-15 09:54:21 +00001739 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001740 Map::cast(obj)->set_is_undetectable();
1741
John Reck59135872010-11-02 12:39:01 -07001742 { MaybeObject* maybe_obj =
1743 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1744 if (!maybe_obj->ToObject(&obj)) return false;
1745 }
Steve Blockd0582a62009-12-15 09:54:21 +00001746 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001747 Map::cast(obj)->set_is_undetectable();
1748
John Reck59135872010-11-02 12:39:01 -07001749 { MaybeObject* maybe_obj =
1750 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1751 if (!maybe_obj->ToObject(&obj)) return false;
1752 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001753 set_byte_array_map(Map::cast(obj));
1754
Ben Murdochb0fe1622011-05-05 13:52:32 +01001755 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1756 if (!maybe_obj->ToObject(&obj)) return false;
1757 }
1758 set_empty_byte_array(ByteArray::cast(obj));
1759
John Reck59135872010-11-02 12:39:01 -07001760 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01001761 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
John Reck59135872010-11-02 12:39:01 -07001762 if (!maybe_obj->ToObject(&obj)) return false;
1763 }
Steve Block44f0eee2011-05-26 01:26:41 +01001764 set_external_pixel_array_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001765
John Reck59135872010-11-02 12:39:01 -07001766 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1767 ExternalArray::kAlignedSize);
1768 if (!maybe_obj->ToObject(&obj)) return false;
1769 }
Steve Block3ce2e202009-11-05 08:53:23 +00001770 set_external_byte_array_map(Map::cast(obj));
1771
John Reck59135872010-11-02 12:39:01 -07001772 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1773 ExternalArray::kAlignedSize);
1774 if (!maybe_obj->ToObject(&obj)) return false;
1775 }
Steve Block3ce2e202009-11-05 08:53:23 +00001776 set_external_unsigned_byte_array_map(Map::cast(obj));
1777
John Reck59135872010-11-02 12:39:01 -07001778 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1779 ExternalArray::kAlignedSize);
1780 if (!maybe_obj->ToObject(&obj)) return false;
1781 }
Steve Block3ce2e202009-11-05 08:53:23 +00001782 set_external_short_array_map(Map::cast(obj));
1783
John Reck59135872010-11-02 12:39:01 -07001784 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1785 ExternalArray::kAlignedSize);
1786 if (!maybe_obj->ToObject(&obj)) return false;
1787 }
Steve Block3ce2e202009-11-05 08:53:23 +00001788 set_external_unsigned_short_array_map(Map::cast(obj));
1789
John Reck59135872010-11-02 12:39:01 -07001790 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1791 ExternalArray::kAlignedSize);
1792 if (!maybe_obj->ToObject(&obj)) return false;
1793 }
Steve Block3ce2e202009-11-05 08:53:23 +00001794 set_external_int_array_map(Map::cast(obj));
1795
John Reck59135872010-11-02 12:39:01 -07001796 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1797 ExternalArray::kAlignedSize);
1798 if (!maybe_obj->ToObject(&obj)) return false;
1799 }
Steve Block3ce2e202009-11-05 08:53:23 +00001800 set_external_unsigned_int_array_map(Map::cast(obj));
1801
John Reck59135872010-11-02 12:39:01 -07001802 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1803 ExternalArray::kAlignedSize);
1804 if (!maybe_obj->ToObject(&obj)) return false;
1805 }
Steve Block3ce2e202009-11-05 08:53:23 +00001806 set_external_float_array_map(Map::cast(obj));
1807
John Reck59135872010-11-02 12:39:01 -07001808 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1809 if (!maybe_obj->ToObject(&obj)) return false;
1810 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001811 set_code_map(Map::cast(obj));
1812
John Reck59135872010-11-02 12:39:01 -07001813 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1814 JSGlobalPropertyCell::kSize);
1815 if (!maybe_obj->ToObject(&obj)) return false;
1816 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001817 set_global_property_cell_map(Map::cast(obj));
1818
John Reck59135872010-11-02 12:39:01 -07001819 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1820 if (!maybe_obj->ToObject(&obj)) return false;
1821 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001822 set_one_pointer_filler_map(Map::cast(obj));
1823
John Reck59135872010-11-02 12:39:01 -07001824 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1825 if (!maybe_obj->ToObject(&obj)) return false;
1826 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001827 set_two_pointer_filler_map(Map::cast(obj));
1828
1829 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1830 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001831 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1832 if (!maybe_obj->ToObject(&obj)) return false;
1833 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001834 roots_[entry.index] = Map::cast(obj);
1835 }
1836
John Reck59135872010-11-02 12:39:01 -07001837 { MaybeObject* maybe_obj =
1838 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1839 if (!maybe_obj->ToObject(&obj)) return false;
1840 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001841 set_hash_table_map(Map::cast(obj));
1842
John Reck59135872010-11-02 12:39:01 -07001843 { MaybeObject* maybe_obj =
1844 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1845 if (!maybe_obj->ToObject(&obj)) return false;
1846 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001847 set_context_map(Map::cast(obj));
1848
John Reck59135872010-11-02 12:39:01 -07001849 { MaybeObject* maybe_obj =
1850 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1851 if (!maybe_obj->ToObject(&obj)) return false;
1852 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001853 set_catch_context_map(Map::cast(obj));
1854
John Reck59135872010-11-02 12:39:01 -07001855 { MaybeObject* maybe_obj =
1856 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1857 if (!maybe_obj->ToObject(&obj)) return false;
1858 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001859 Map* global_context_map = Map::cast(obj);
1860 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1861 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001862
John Reck59135872010-11-02 12:39:01 -07001863 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1864 SharedFunctionInfo::kAlignedSize);
1865 if (!maybe_obj->ToObject(&obj)) return false;
1866 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001867 set_shared_function_info_map(Map::cast(obj));
1868
Steve Block1e0659c2011-05-24 12:43:12 +01001869 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1870 JSMessageObject::kSize);
1871 if (!maybe_obj->ToObject(&obj)) return false;
1872 }
1873 set_message_object_map(Map::cast(obj));
1874
Steve Block44f0eee2011-05-26 01:26:41 +01001875 ASSERT(!InNewSpace(empty_fixed_array()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001876 return true;
1877}
1878
1879
John Reck59135872010-11-02 12:39:01 -07001880MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001881 // Statically ensure that it is safe to allocate heap numbers in paged
1882 // spaces.
1883 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1884 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1885
John Reck59135872010-11-02 12:39:01 -07001886 Object* result;
1887 { MaybeObject* maybe_result =
1888 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1889 if (!maybe_result->ToObject(&result)) return maybe_result;
1890 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001891
1892 HeapObject::cast(result)->set_map(heap_number_map());
1893 HeapNumber::cast(result)->set_value(value);
1894 return result;
1895}
1896
1897
John Reck59135872010-11-02 12:39:01 -07001898MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001899 // Use general version, if we're forced to always allocate.
1900 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1901
1902 // This version of AllocateHeapNumber is optimized for
1903 // allocation in new space.
1904 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1905 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001906 Object* result;
1907 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1908 if (!maybe_result->ToObject(&result)) return maybe_result;
1909 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001910 HeapObject::cast(result)->set_map(heap_number_map());
1911 HeapNumber::cast(result)->set_value(value);
1912 return result;
1913}
1914
1915
John Reck59135872010-11-02 12:39:01 -07001916MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1917 Object* result;
1918 { MaybeObject* maybe_result = AllocateRawCell();
1919 if (!maybe_result->ToObject(&result)) return maybe_result;
1920 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001921 HeapObject::cast(result)->set_map(global_property_cell_map());
1922 JSGlobalPropertyCell::cast(result)->set_value(value);
1923 return result;
1924}
1925
1926
John Reck59135872010-11-02 12:39:01 -07001927MaybeObject* Heap::CreateOddball(const char* to_string,
Steve Block44f0eee2011-05-26 01:26:41 +01001928 Object* to_number,
1929 byte kind) {
John Reck59135872010-11-02 12:39:01 -07001930 Object* result;
1931 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1932 if (!maybe_result->ToObject(&result)) return maybe_result;
1933 }
Steve Block44f0eee2011-05-26 01:26:41 +01001934 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
Steve Blocka7e24c12009-10-30 11:49:00 +00001935}
1936
1937
1938bool Heap::CreateApiObjects() {
1939 Object* obj;
1940
John Reck59135872010-11-02 12:39:01 -07001941 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1942 if (!maybe_obj->ToObject(&obj)) return false;
1943 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001944 set_neander_map(Map::cast(obj));
1945
Steve Block44f0eee2011-05-26 01:26:41 +01001946 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
John Reck59135872010-11-02 12:39:01 -07001947 if (!maybe_obj->ToObject(&obj)) return false;
1948 }
1949 Object* elements;
1950 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1951 if (!maybe_elements->ToObject(&elements)) return false;
1952 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001953 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1954 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1955 set_message_listeners(JSObject::cast(obj));
1956
1957 return true;
1958}
1959
1960
Steve Blocka7e24c12009-10-30 11:49:00 +00001961void Heap::CreateJSEntryStub() {
1962 JSEntryStub stub;
1963 set_js_entry_code(*stub.GetCode());
1964}
1965
1966
1967void Heap::CreateJSConstructEntryStub() {
1968 JSConstructEntryStub stub;
1969 set_js_construct_entry_code(*stub.GetCode());
1970}
1971
1972
1973void Heap::CreateFixedStubs() {
1974 // Here we create roots for fixed stubs. They are needed at GC
1975 // for cooking and uncooking (check out frames.cc).
1976 // The eliminates the need for doing dictionary lookup in the
1977 // stub cache for these stubs.
1978 HandleScope scope;
1979 // gcc-4.4 has problem generating correct code of following snippet:
Steve Block44f0eee2011-05-26 01:26:41 +01001980 // { JSEntryStub stub;
1981 // js_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001982 // }
Steve Block44f0eee2011-05-26 01:26:41 +01001983 // { JSConstructEntryStub stub;
1984 // js_construct_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001985 // }
1986 // To workaround the problem, make separate functions without inlining.
Steve Blocka7e24c12009-10-30 11:49:00 +00001987 Heap::CreateJSEntryStub();
1988 Heap::CreateJSConstructEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001989}
1990
1991
1992bool Heap::CreateInitialObjects() {
1993 Object* obj;
1994
1995 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001996 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1997 if (!maybe_obj->ToObject(&obj)) return false;
1998 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001999 set_minus_zero_value(obj);
2000 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2001
John Reck59135872010-11-02 12:39:01 -07002002 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2003 if (!maybe_obj->ToObject(&obj)) return false;
2004 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002005 set_nan_value(obj);
2006
John Reck59135872010-11-02 12:39:01 -07002007 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
2008 if (!maybe_obj->ToObject(&obj)) return false;
2009 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002010 set_undefined_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002011 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
Steve Blocka7e24c12009-10-30 11:49:00 +00002012 ASSERT(!InNewSpace(undefined_value()));
2013
2014 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07002015 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2016 if (!maybe_obj->ToObject(&obj)) return false;
2017 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002018 // Don't use set_symbol_table() due to asserts.
2019 roots_[kSymbolTableRootIndex] = obj;
2020
2021 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07002022 Object* symbol;
2023 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
2024 if (!maybe_symbol->ToObject(&symbol)) return false;
2025 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002026 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
2027 Oddball::cast(undefined_value())->set_to_number(nan_value());
2028
Steve Blocka7e24c12009-10-30 11:49:00 +00002029 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07002030 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01002031 Oddball::cast(null_value())->Initialize("null",
2032 Smi::FromInt(0),
2033 Oddball::kNull);
John Reck59135872010-11-02 12:39:01 -07002034 if (!maybe_obj->ToObject(&obj)) return false;
2035 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002036
Steve Block44f0eee2011-05-26 01:26:41 +01002037 { MaybeObject* maybe_obj = CreateOddball("true",
2038 Smi::FromInt(1),
2039 Oddball::kTrue);
John Reck59135872010-11-02 12:39:01 -07002040 if (!maybe_obj->ToObject(&obj)) return false;
2041 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002042 set_true_value(obj);
2043
Steve Block44f0eee2011-05-26 01:26:41 +01002044 { MaybeObject* maybe_obj = CreateOddball("false",
2045 Smi::FromInt(0),
2046 Oddball::kFalse);
John Reck59135872010-11-02 12:39:01 -07002047 if (!maybe_obj->ToObject(&obj)) return false;
2048 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002049 set_false_value(obj);
2050
Steve Block44f0eee2011-05-26 01:26:41 +01002051 { MaybeObject* maybe_obj = CreateOddball("hole",
2052 Smi::FromInt(-1),
2053 Oddball::kTheHole);
John Reck59135872010-11-02 12:39:01 -07002054 if (!maybe_obj->ToObject(&obj)) return false;
2055 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002056 set_the_hole_value(obj);
2057
Ben Murdoch086aeea2011-05-13 15:57:08 +01002058 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Steve Block44f0eee2011-05-26 01:26:41 +01002059 Smi::FromInt(-4),
2060 Oddball::kArgumentMarker);
Ben Murdoch086aeea2011-05-13 15:57:08 +01002061 if (!maybe_obj->ToObject(&obj)) return false;
2062 }
2063 set_arguments_marker(obj);
2064
Steve Block44f0eee2011-05-26 01:26:41 +01002065 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2066 Smi::FromInt(-2),
2067 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002068 if (!maybe_obj->ToObject(&obj)) return false;
2069 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002070 set_no_interceptor_result_sentinel(obj);
2071
Steve Block44f0eee2011-05-26 01:26:41 +01002072 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2073 Smi::FromInt(-3),
2074 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002075 if (!maybe_obj->ToObject(&obj)) return false;
2076 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002077 set_termination_exception(obj);
2078
2079 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002080 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2081 if (!maybe_obj->ToObject(&obj)) return false;
2082 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002083 set_empty_string(String::cast(obj));
2084
2085 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002086 { MaybeObject* maybe_obj =
2087 LookupAsciiSymbol(constant_symbol_table[i].contents);
2088 if (!maybe_obj->ToObject(&obj)) return false;
2089 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002090 roots_[constant_symbol_table[i].index] = String::cast(obj);
2091 }
2092
2093 // Allocate the hidden symbol which is used to identify the hidden properties
2094 // in JSObjects. The hash code has a special value so that it will not match
2095 // the empty string when searching for the property. It cannot be part of the
2096 // loop above because it needs to be allocated manually with the special
2097 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2098 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002099 { MaybeObject* maybe_obj =
2100 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2101 if (!maybe_obj->ToObject(&obj)) return false;
2102 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002103 hidden_symbol_ = String::cast(obj);
2104
2105 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07002106 { MaybeObject* maybe_obj =
2107 AllocateProxy((Address) &Accessors::ObjectPrototype);
2108 if (!maybe_obj->ToObject(&obj)) return false;
2109 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002110 set_prototype_accessors(Proxy::cast(obj));
2111
2112 // Allocate the code_stubs dictionary. The initial size is set to avoid
2113 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002114 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2115 if (!maybe_obj->ToObject(&obj)) return false;
2116 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002117 set_code_stubs(NumberDictionary::cast(obj));
2118
2119 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2120 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002121 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2122 if (!maybe_obj->ToObject(&obj)) return false;
2123 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002124 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2125
Kristian Monsen25f61362010-05-21 11:50:48 +01002126 set_instanceof_cache_function(Smi::FromInt(0));
2127 set_instanceof_cache_map(Smi::FromInt(0));
2128 set_instanceof_cache_answer(Smi::FromInt(0));
2129
Steve Blocka7e24c12009-10-30 11:49:00 +00002130 CreateFixedStubs();
2131
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002132 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002133 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2134 if (!maybe_obj->ToObject(&obj)) return false;
2135 }
Steve Block44f0eee2011-05-26 01:26:41 +01002136 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2137 obj);
John Reck59135872010-11-02 12:39:01 -07002138 if (!maybe_obj->ToObject(&obj)) return false;
2139 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002140 set_intrinsic_function_names(StringDictionary::cast(obj));
2141
Leon Clarkee46be812010-01-19 14:06:41 +00002142 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002143
Steve Block6ded16b2010-05-10 14:33:55 +01002144 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002145 { MaybeObject* maybe_obj =
2146 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2147 if (!maybe_obj->ToObject(&obj)) return false;
2148 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002149 set_single_character_string_cache(FixedArray::cast(obj));
2150
2151 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002152 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2153 if (!maybe_obj->ToObject(&obj)) return false;
2154 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002155 set_natives_source_cache(FixedArray::cast(obj));
2156
Steve Block44f0eee2011-05-26 01:26:41 +01002157 // Handling of script id generation is in FACTORY->NewScript.
Steve Blocka7e24c12009-10-30 11:49:00 +00002158 set_last_script_id(undefined_value());
2159
2160 // Initialize keyed lookup cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002161 isolate_->keyed_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002162
2163 // Initialize context slot cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002164 isolate_->context_slot_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002165
2166 // Initialize descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002167 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002168
2169 // Initialize compilation cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002170 isolate_->compilation_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002171
2172 return true;
2173}
2174
2175
John Reck59135872010-11-02 12:39:01 -07002176MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002177 // Compute the size of the number string cache based on the max heap size.
2178 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2179 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2180 int number_string_cache_size = max_semispace_size_ / 512;
2181 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002182 Object* obj;
2183 MaybeObject* maybe_obj =
2184 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2185 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2186 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002187}
2188
2189
2190void Heap::FlushNumberStringCache() {
2191 // Flush the number to string cache.
2192 int len = number_string_cache()->length();
2193 for (int i = 0; i < len; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01002194 number_string_cache()->set_undefined(this, i);
Leon Clarkee46be812010-01-19 14:06:41 +00002195 }
2196}
2197
2198
Steve Blocka7e24c12009-10-30 11:49:00 +00002199static inline int double_get_hash(double d) {
2200 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002201 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002202}
2203
2204
2205static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002206 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002207}
2208
2209
Steve Blocka7e24c12009-10-30 11:49:00 +00002210Object* Heap::GetNumberStringCache(Object* number) {
2211 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002212 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002213 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002214 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002215 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002216 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002217 }
2218 Object* key = number_string_cache()->get(hash * 2);
2219 if (key == number) {
2220 return String::cast(number_string_cache()->get(hash * 2 + 1));
2221 } else if (key->IsHeapNumber() &&
2222 number->IsHeapNumber() &&
2223 key->Number() == number->Number()) {
2224 return String::cast(number_string_cache()->get(hash * 2 + 1));
2225 }
2226 return undefined_value();
2227}
2228
2229
2230void Heap::SetNumberStringCache(Object* number, String* string) {
2231 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002232 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002233 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002234 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002235 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002236 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002237 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002238 number_string_cache()->set(hash * 2, number);
2239 }
2240 number_string_cache()->set(hash * 2 + 1, string);
2241}
2242
2243
John Reck59135872010-11-02 12:39:01 -07002244MaybeObject* Heap::NumberToString(Object* number,
2245 bool check_number_string_cache) {
Steve Block44f0eee2011-05-26 01:26:41 +01002246 isolate_->counters()->number_to_string_runtime()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002247 if (check_number_string_cache) {
2248 Object* cached = GetNumberStringCache(number);
2249 if (cached != undefined_value()) {
2250 return cached;
2251 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002252 }
2253
2254 char arr[100];
2255 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2256 const char* str;
2257 if (number->IsSmi()) {
2258 int num = Smi::cast(number)->value();
2259 str = IntToCString(num, buffer);
2260 } else {
2261 double num = HeapNumber::cast(number)->value();
2262 str = DoubleToCString(num, buffer);
2263 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002264
John Reck59135872010-11-02 12:39:01 -07002265 Object* js_string;
2266 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2267 if (maybe_js_string->ToObject(&js_string)) {
2268 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002269 }
John Reck59135872010-11-02 12:39:01 -07002270 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002271}
2272
2273
Steve Block3ce2e202009-11-05 08:53:23 +00002274Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2275 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2276}
2277
2278
2279Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2280 ExternalArrayType array_type) {
2281 switch (array_type) {
2282 case kExternalByteArray:
2283 return kExternalByteArrayMapRootIndex;
2284 case kExternalUnsignedByteArray:
2285 return kExternalUnsignedByteArrayMapRootIndex;
2286 case kExternalShortArray:
2287 return kExternalShortArrayMapRootIndex;
2288 case kExternalUnsignedShortArray:
2289 return kExternalUnsignedShortArrayMapRootIndex;
2290 case kExternalIntArray:
2291 return kExternalIntArrayMapRootIndex;
2292 case kExternalUnsignedIntArray:
2293 return kExternalUnsignedIntArrayMapRootIndex;
2294 case kExternalFloatArray:
2295 return kExternalFloatArrayMapRootIndex;
Steve Block44f0eee2011-05-26 01:26:41 +01002296 case kExternalPixelArray:
2297 return kExternalPixelArrayMapRootIndex;
Steve Block3ce2e202009-11-05 08:53:23 +00002298 default:
2299 UNREACHABLE();
2300 return kUndefinedValueRootIndex;
2301 }
2302}
2303
2304
John Reck59135872010-11-02 12:39:01 -07002305MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002306 // We need to distinguish the minus zero value and this cannot be
2307 // done after conversion to int. Doing this by comparing bit
2308 // patterns is faster than using fpclassify() et al.
2309 static const DoubleRepresentation minus_zero(-0.0);
2310
2311 DoubleRepresentation rep(value);
2312 if (rep.bits == minus_zero.bits) {
2313 return AllocateHeapNumber(-0.0, pretenure);
2314 }
2315
2316 int int_value = FastD2I(value);
2317 if (value == int_value && Smi::IsValid(int_value)) {
2318 return Smi::FromInt(int_value);
2319 }
2320
2321 // Materialize the value in the heap.
2322 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002323}
2324
2325
John Reck59135872010-11-02 12:39:01 -07002326MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002327 // Statically ensure that it is safe to allocate proxies in paged spaces.
2328 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2329 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002330 Object* result;
2331 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2332 if (!maybe_result->ToObject(&result)) return maybe_result;
2333 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002334
2335 Proxy::cast(result)->set_proxy(proxy);
2336 return result;
2337}
2338
2339
John Reck59135872010-11-02 12:39:01 -07002340MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2341 Object* result;
2342 { MaybeObject* maybe_result =
2343 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2344 if (!maybe_result->ToObject(&result)) return maybe_result;
2345 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002346
2347 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2348 share->set_name(name);
Steve Block44f0eee2011-05-26 01:26:41 +01002349 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
Steve Blocka7e24c12009-10-30 11:49:00 +00002350 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002351 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Block44f0eee2011-05-26 01:26:41 +01002352 Code* construct_stub = isolate_->builtins()->builtin(
2353 Builtins::kJSConstructStubGeneric);
Steve Blocka7e24c12009-10-30 11:49:00 +00002354 share->set_construct_stub(construct_stub);
2355 share->set_expected_nof_properties(0);
2356 share->set_length(0);
2357 share->set_formal_parameter_count(0);
2358 share->set_instance_class_name(Object_symbol());
2359 share->set_function_data(undefined_value());
2360 share->set_script(undefined_value());
2361 share->set_start_position_and_type(0);
2362 share->set_debug_info(undefined_value());
2363 share->set_inferred_name(empty_string());
2364 share->set_compiler_hints(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002365 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002366 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002367 share->set_this_property_assignments_count(0);
2368 share->set_this_property_assignments(undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002369 share->set_opt_count(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002370 share->set_num_literals(0);
2371 share->set_end_position(0);
2372 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002373 return result;
2374}
2375
2376
Steve Block1e0659c2011-05-24 12:43:12 +01002377MaybeObject* Heap::AllocateJSMessageObject(String* type,
2378 JSArray* arguments,
2379 int start_position,
2380 int end_position,
2381 Object* script,
2382 Object* stack_trace,
2383 Object* stack_frames) {
2384 Object* result;
2385 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2386 if (!maybe_result->ToObject(&result)) return maybe_result;
2387 }
2388 JSMessageObject* message = JSMessageObject::cast(result);
2389 message->set_properties(Heap::empty_fixed_array());
2390 message->set_elements(Heap::empty_fixed_array());
2391 message->set_type(type);
2392 message->set_arguments(arguments);
2393 message->set_start_position(start_position);
2394 message->set_end_position(end_position);
2395 message->set_script(script);
2396 message->set_stack_trace(stack_trace);
2397 message->set_stack_frames(stack_frames);
2398 return result;
2399}
2400
2401
2402
Steve Blockd0582a62009-12-15 09:54:21 +00002403// Returns true for a character in a range. Both limits are inclusive.
2404static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2405 // This makes uses of the the unsigned wraparound.
2406 return character - from <= to - from;
2407}
2408
2409
John Reck59135872010-11-02 12:39:01 -07002410MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Steve Block44f0eee2011-05-26 01:26:41 +01002411 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07002412 uint32_t c1,
2413 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002414 String* symbol;
2415 // Numeric strings have a different hash algorithm not known by
2416 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2417 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
Steve Block44f0eee2011-05-26 01:26:41 +01002418 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002419 return symbol;
2420 // Now we know the length is 2, we might as well make use of that fact
2421 // when building the new string.
2422 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2423 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002424 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002425 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
John Reck59135872010-11-02 12:39:01 -07002426 if (!maybe_result->ToObject(&result)) return maybe_result;
2427 }
Steve Blockd0582a62009-12-15 09:54:21 +00002428 char* dest = SeqAsciiString::cast(result)->GetChars();
2429 dest[0] = c1;
2430 dest[1] = c2;
2431 return result;
2432 } else {
John Reck59135872010-11-02 12:39:01 -07002433 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002434 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
John Reck59135872010-11-02 12:39:01 -07002435 if (!maybe_result->ToObject(&result)) return maybe_result;
2436 }
Steve Blockd0582a62009-12-15 09:54:21 +00002437 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2438 dest[0] = c1;
2439 dest[1] = c2;
2440 return result;
2441 }
2442}
2443
2444
John Reck59135872010-11-02 12:39:01 -07002445MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002446 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002447 if (first_length == 0) {
2448 return second;
2449 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002450
2451 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002452 if (second_length == 0) {
2453 return first;
2454 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002455
2456 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002457
2458 // Optimization for 2-byte strings often used as keys in a decompression
2459 // dictionary. Check whether we already have the string in the symbol
2460 // table to prevent creation of many unneccesary strings.
2461 if (length == 2) {
2462 unsigned c1 = first->Get(0);
2463 unsigned c2 = second->Get(0);
Steve Block44f0eee2011-05-26 01:26:41 +01002464 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blockd0582a62009-12-15 09:54:21 +00002465 }
2466
Steve Block6ded16b2010-05-10 14:33:55 +01002467 bool first_is_ascii = first->IsAsciiRepresentation();
2468 bool second_is_ascii = second->IsAsciiRepresentation();
2469 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002470
2471 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002472 // of the new cons string is too large.
2473 if (length > String::kMaxLength || length < 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01002474 isolate()->context()->mark_out_of_memory();
Steve Blocka7e24c12009-10-30 11:49:00 +00002475 return Failure::OutOfMemoryException();
2476 }
2477
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002478 bool is_ascii_data_in_two_byte_string = false;
2479 if (!is_ascii) {
2480 // At least one of the strings uses two-byte representation so we
2481 // can't use the fast case code for short ascii strings below, but
2482 // we can try to save memory if all chars actually fit in ascii.
2483 is_ascii_data_in_two_byte_string =
2484 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2485 if (is_ascii_data_in_two_byte_string) {
Steve Block44f0eee2011-05-26 01:26:41 +01002486 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002487 }
2488 }
2489
Steve Blocka7e24c12009-10-30 11:49:00 +00002490 // If the resulting string is small make a flat string.
2491 if (length < String::kMinNonFlatLength) {
2492 ASSERT(first->IsFlat());
2493 ASSERT(second->IsFlat());
2494 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002495 Object* result;
2496 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2497 if (!maybe_result->ToObject(&result)) return maybe_result;
2498 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002499 // Copy the characters into the new object.
2500 char* dest = SeqAsciiString::cast(result)->GetChars();
2501 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002502 const char* src;
2503 if (first->IsExternalString()) {
2504 src = ExternalAsciiString::cast(first)->resource()->data();
2505 } else {
2506 src = SeqAsciiString::cast(first)->GetChars();
2507 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002508 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2509 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002510 if (second->IsExternalString()) {
2511 src = ExternalAsciiString::cast(second)->resource()->data();
2512 } else {
2513 src = SeqAsciiString::cast(second)->GetChars();
2514 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002515 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2516 return result;
2517 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002518 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002519 Object* result;
2520 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2521 if (!maybe_result->ToObject(&result)) return maybe_result;
2522 }
Steve Block6ded16b2010-05-10 14:33:55 +01002523 // Copy the characters into the new object.
2524 char* dest = SeqAsciiString::cast(result)->GetChars();
2525 String::WriteToFlat(first, dest, 0, first_length);
2526 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block44f0eee2011-05-26 01:26:41 +01002527 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002528 return result;
2529 }
2530
John Reck59135872010-11-02 12:39:01 -07002531 Object* result;
2532 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2533 if (!maybe_result->ToObject(&result)) return maybe_result;
2534 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002535 // Copy the characters into the new object.
2536 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2537 String::WriteToFlat(first, dest, 0, first_length);
2538 String::WriteToFlat(second, dest + first_length, 0, second_length);
2539 return result;
2540 }
2541 }
2542
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002543 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2544 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002545
John Reck59135872010-11-02 12:39:01 -07002546 Object* result;
2547 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2548 if (!maybe_result->ToObject(&result)) return maybe_result;
2549 }
Leon Clarke4515c472010-02-03 11:58:03 +00002550
2551 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002552 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002553 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002554 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002555 cons_string->set_hash_field(String::kEmptyHashField);
2556 cons_string->set_first(first, mode);
2557 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002558 return result;
2559}
2560
2561
John Reck59135872010-11-02 12:39:01 -07002562MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002563 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002564 int end,
2565 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002566 int length = end - start;
2567
2568 if (length == 1) {
Steve Block44f0eee2011-05-26 01:26:41 +01002569 return LookupSingleCharacterStringFromCode(buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002570 } else if (length == 2) {
2571 // Optimization for 2-byte strings often used as keys in a decompression
2572 // dictionary. Check whether we already have the string in the symbol
2573 // table to prevent creation of many unneccesary strings.
2574 unsigned c1 = buffer->Get(start);
2575 unsigned c2 = buffer->Get(start + 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002576 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002577 }
2578
2579 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002580 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002581
John Reck59135872010-11-02 12:39:01 -07002582 Object* result;
2583 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2584 ? AllocateRawAsciiString(length, pretenure )
2585 : AllocateRawTwoByteString(length, pretenure);
2586 if (!maybe_result->ToObject(&result)) return maybe_result;
2587 }
Steve Blockd0582a62009-12-15 09:54:21 +00002588 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002589 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002590 if (buffer->IsAsciiRepresentation()) {
2591 ASSERT(string_result->IsAsciiRepresentation());
2592 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2593 String::WriteToFlat(buffer, dest, start, end);
2594 } else {
2595 ASSERT(string_result->IsTwoByteRepresentation());
2596 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2597 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002598 }
Steve Blockd0582a62009-12-15 09:54:21 +00002599
Steve Blocka7e24c12009-10-30 11:49:00 +00002600 return result;
2601}
2602
2603
John Reck59135872010-11-02 12:39:01 -07002604MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002605 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002606 size_t length = resource->length();
2607 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002608 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002609 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002610 }
2611
Steve Blockd0582a62009-12-15 09:54:21 +00002612 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002613 Object* result;
2614 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2615 if (!maybe_result->ToObject(&result)) return maybe_result;
2616 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002617
2618 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002619 external_string->set_length(static_cast<int>(length));
2620 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002621 external_string->set_resource(resource);
2622
2623 return result;
2624}
2625
2626
John Reck59135872010-11-02 12:39:01 -07002627MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002628 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002629 size_t length = resource->length();
2630 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002631 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002632 return Failure::OutOfMemoryException();
2633 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002634
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002635 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002636 // ASCII characters. If yes, we use a different string map.
2637 static const size_t kAsciiCheckLengthLimit = 32;
2638 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2639 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002640 Map* map = is_ascii ?
Steve Block44f0eee2011-05-26 01:26:41 +01002641 external_string_with_ascii_data_map() : external_string_map();
John Reck59135872010-11-02 12:39:01 -07002642 Object* result;
2643 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2644 if (!maybe_result->ToObject(&result)) return maybe_result;
2645 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002646
2647 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002648 external_string->set_length(static_cast<int>(length));
2649 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002650 external_string->set_resource(resource);
2651
2652 return result;
2653}
2654
2655
John Reck59135872010-11-02 12:39:01 -07002656MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002657 if (code <= String::kMaxAsciiCharCode) {
Steve Block44f0eee2011-05-26 01:26:41 +01002658 Object* value = single_character_string_cache()->get(code);
2659 if (value != undefined_value()) return value;
Steve Blocka7e24c12009-10-30 11:49:00 +00002660
2661 char buffer[1];
2662 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002663 Object* result;
2664 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002665
John Reck59135872010-11-02 12:39:01 -07002666 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002667 single_character_string_cache()->set(code, result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002668 return result;
2669 }
2670
John Reck59135872010-11-02 12:39:01 -07002671 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002672 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
John Reck59135872010-11-02 12:39:01 -07002673 if (!maybe_result->ToObject(&result)) return maybe_result;
2674 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002675 String* answer = String::cast(result);
2676 answer->Set(0, code);
2677 return answer;
2678}
2679
2680
John Reck59135872010-11-02 12:39:01 -07002681MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002682 if (length < 0 || length > ByteArray::kMaxLength) {
2683 return Failure::OutOfMemoryException();
2684 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002685 if (pretenure == NOT_TENURED) {
2686 return AllocateByteArray(length);
2687 }
2688 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002689 Object* result;
2690 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2691 ? old_data_space_->AllocateRaw(size)
2692 : lo_space_->AllocateRaw(size);
2693 if (!maybe_result->ToObject(&result)) return maybe_result;
2694 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002695
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002696 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2697 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002698 return result;
2699}
2700
2701
John Reck59135872010-11-02 12:39:01 -07002702MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002703 if (length < 0 || length > ByteArray::kMaxLength) {
2704 return Failure::OutOfMemoryException();
2705 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002706 int size = ByteArray::SizeFor(length);
2707 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002708 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002709 Object* result;
2710 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2711 if (!maybe_result->ToObject(&result)) return maybe_result;
2712 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002713
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002714 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2715 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002716 return result;
2717}
2718
2719
2720void Heap::CreateFillerObjectAt(Address addr, int size) {
2721 if (size == 0) return;
2722 HeapObject* filler = HeapObject::FromAddress(addr);
2723 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002724 filler->set_map(one_pointer_filler_map());
2725 } else if (size == 2 * kPointerSize) {
2726 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002727 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002728 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002729 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2730 }
2731}
2732
2733
John Reck59135872010-11-02 12:39:01 -07002734MaybeObject* Heap::AllocateExternalArray(int length,
2735 ExternalArrayType array_type,
2736 void* external_pointer,
2737 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002738 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002739 Object* result;
2740 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2741 space,
2742 OLD_DATA_SPACE);
2743 if (!maybe_result->ToObject(&result)) return maybe_result;
2744 }
Steve Block3ce2e202009-11-05 08:53:23 +00002745
2746 reinterpret_cast<ExternalArray*>(result)->set_map(
2747 MapForExternalArrayType(array_type));
2748 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2749 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2750 external_pointer);
2751
2752 return result;
2753}
2754
2755
John Reck59135872010-11-02 12:39:01 -07002756MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2757 Code::Flags flags,
Steve Block44f0eee2011-05-26 01:26:41 +01002758 Handle<Object> self_reference,
2759 bool immovable) {
Leon Clarkeac952652010-07-15 11:15:24 +01002760 // Allocate ByteArray before the Code object, so that we do not risk
2761 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002762 Object* reloc_info;
2763 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2764 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2765 }
Leon Clarkeac952652010-07-15 11:15:24 +01002766
Steve Block44f0eee2011-05-26 01:26:41 +01002767 // Compute size.
Leon Clarkeac952652010-07-15 11:15:24 +01002768 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002769 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002770 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002771 MaybeObject* maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002772 // Large code objects and code objects which should stay at a fixed address
2773 // are allocated in large object space.
2774 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
John Reck59135872010-11-02 12:39:01 -07002775 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002776 } else {
John Reck59135872010-11-02 12:39:01 -07002777 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002778 }
2779
John Reck59135872010-11-02 12:39:01 -07002780 Object* result;
2781 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002782
2783 // Initialize the object
2784 HeapObject::cast(result)->set_map(code_map());
2785 Code* code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002786 ASSERT(!isolate_->code_range()->exists() ||
2787 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002788 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002789 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002790 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002791 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2792 code->set_check_type(RECEIVER_MAP_CHECK);
2793 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002794 code->set_deoptimization_data(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002795 // Allow self references to created code object by patching the handle to
2796 // point to the newly allocated Code object.
2797 if (!self_reference.is_null()) {
2798 *(self_reference.location()) = code;
2799 }
2800 // Migrate generated code.
2801 // The generated code can contain Object** values (typically from handles)
2802 // that are dereferenced during the copy to point directly to the actual heap
2803 // objects. These pointers can include references to the code object itself,
2804 // through the self_reference parameter.
2805 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002806
2807#ifdef DEBUG
2808 code->Verify();
2809#endif
2810 return code;
2811}
2812
2813
John Reck59135872010-11-02 12:39:01 -07002814MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002815 // Allocate an object the same size as the code object.
2816 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002817 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002818 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002819 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002820 } else {
John Reck59135872010-11-02 12:39:01 -07002821 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002822 }
2823
John Reck59135872010-11-02 12:39:01 -07002824 Object* result;
2825 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002826
2827 // Copy code object.
2828 Address old_addr = code->address();
2829 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002830 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002831 // Relocate the copy.
2832 Code* new_code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002833 ASSERT(!isolate_->code_range()->exists() ||
2834 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002835 new_code->Relocate(new_addr - old_addr);
2836 return new_code;
2837}
2838
2839
John Reck59135872010-11-02 12:39:01 -07002840MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002841 // Allocate ByteArray before the Code object, so that we do not risk
2842 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002843 Object* reloc_info_array;
2844 { MaybeObject* maybe_reloc_info_array =
2845 AllocateByteArray(reloc_info.length(), TENURED);
2846 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2847 return maybe_reloc_info_array;
2848 }
2849 }
Leon Clarkeac952652010-07-15 11:15:24 +01002850
2851 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002852
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002853 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002854
2855 Address old_addr = code->address();
2856
2857 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002858 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002859
John Reck59135872010-11-02 12:39:01 -07002860 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002861 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002862 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002863 } else {
John Reck59135872010-11-02 12:39:01 -07002864 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002865 }
2866
John Reck59135872010-11-02 12:39:01 -07002867 Object* result;
2868 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002869
2870 // Copy code object.
2871 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2872
2873 // Copy header and instructions.
2874 memcpy(new_addr, old_addr, relocation_offset);
2875
Steve Block6ded16b2010-05-10 14:33:55 +01002876 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002877 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002878
Leon Clarkeac952652010-07-15 11:15:24 +01002879 // Copy patched rinfo.
2880 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002881
2882 // Relocate the copy.
Steve Block44f0eee2011-05-26 01:26:41 +01002883 ASSERT(!isolate_->code_range()->exists() ||
2884 isolate_->code_range()->contains(code->address()));
Steve Block6ded16b2010-05-10 14:33:55 +01002885 new_code->Relocate(new_addr - old_addr);
2886
2887#ifdef DEBUG
2888 code->Verify();
2889#endif
2890 return new_code;
2891}
2892
2893
John Reck59135872010-11-02 12:39:01 -07002894MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002895 ASSERT(gc_state_ == NOT_IN_GC);
2896 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002897 // If allocation failures are disallowed, we may allocate in a different
2898 // space when new space is full and the object is not a large object.
2899 AllocationSpace retry_space =
2900 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002901 Object* result;
2902 { MaybeObject* maybe_result =
2903 AllocateRaw(map->instance_size(), space, retry_space);
2904 if (!maybe_result->ToObject(&result)) return maybe_result;
2905 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002906 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002907#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01002908 isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
Steve Block3ce2e202009-11-05 08:53:23 +00002909#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002910 return result;
2911}
2912
2913
John Reck59135872010-11-02 12:39:01 -07002914MaybeObject* Heap::InitializeFunction(JSFunction* function,
2915 SharedFunctionInfo* shared,
2916 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002917 ASSERT(!prototype->IsMap());
2918 function->initialize_properties();
2919 function->initialize_elements();
2920 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002921 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002922 function->set_prototype_or_initial_map(prototype);
2923 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002924 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002925 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002926 return function;
2927}
2928
2929
John Reck59135872010-11-02 12:39:01 -07002930MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002931 // Allocate the prototype. Make sure to use the object function
2932 // from the function's context, since the function can be from a
2933 // different context.
2934 JSFunction* object_function =
2935 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002936 Object* prototype;
2937 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2938 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2939 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002940 // When creating the prototype for the function we must set its
2941 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002942 Object* result;
2943 { MaybeObject* maybe_result =
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002944 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
2945 constructor_symbol(), function, DONT_ENUM);
John Reck59135872010-11-02 12:39:01 -07002946 if (!maybe_result->ToObject(&result)) return maybe_result;
2947 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002948 return prototype;
2949}
2950
2951
John Reck59135872010-11-02 12:39:01 -07002952MaybeObject* Heap::AllocateFunction(Map* function_map,
2953 SharedFunctionInfo* shared,
2954 Object* prototype,
2955 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002956 AllocationSpace space =
2957 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002958 Object* result;
2959 { MaybeObject* maybe_result = Allocate(function_map, space);
2960 if (!maybe_result->ToObject(&result)) return maybe_result;
2961 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002962 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2963}
2964
2965
John Reck59135872010-11-02 12:39:01 -07002966MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002967 // To get fast allocation and map sharing for arguments objects we
2968 // allocate them based on an arguments boilerplate.
2969
Steve Block44f0eee2011-05-26 01:26:41 +01002970 JSObject* boilerplate;
2971 int arguments_object_size;
2972 bool strict_mode_callee = callee->IsJSFunction() &&
2973 JSFunction::cast(callee)->shared()->strict_mode();
2974 if (strict_mode_callee) {
2975 boilerplate =
2976 isolate()->context()->global_context()->
2977 strict_mode_arguments_boilerplate();
2978 arguments_object_size = kArgumentsObjectSizeStrict;
2979 } else {
2980 boilerplate =
2981 isolate()->context()->global_context()->arguments_boilerplate();
2982 arguments_object_size = kArgumentsObjectSize;
2983 }
2984
Steve Blocka7e24c12009-10-30 11:49:00 +00002985 // This calls Copy directly rather than using Heap::AllocateRaw so we
2986 // duplicate the check here.
2987 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2988
Leon Clarkee46be812010-01-19 14:06:41 +00002989 // Check that the size of the boilerplate matches our
2990 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2991 // on the size being a known constant.
Steve Block44f0eee2011-05-26 01:26:41 +01002992 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
Leon Clarkee46be812010-01-19 14:06:41 +00002993
2994 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002995 Object* result;
2996 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01002997 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
John Reck59135872010-11-02 12:39:01 -07002998 if (!maybe_result->ToObject(&result)) return maybe_result;
2999 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003000
3001 // Copy the content. The arguments boilerplate doesn't have any
3002 // fields that point to new space so it's safe to skip the write
3003 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003004 CopyBlock(HeapObject::cast(result)->address(),
3005 boilerplate->address(),
Steve Block44f0eee2011-05-26 01:26:41 +01003006 JSObject::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003007
Steve Block44f0eee2011-05-26 01:26:41 +01003008 // Set the length property.
3009 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Steve Blocka7e24c12009-10-30 11:49:00 +00003010 Smi::FromInt(length),
3011 SKIP_WRITE_BARRIER);
Steve Block44f0eee2011-05-26 01:26:41 +01003012 // Set the callee property for non-strict mode arguments object only.
3013 if (!strict_mode_callee) {
3014 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3015 callee);
3016 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003017
3018 // Check the state of the object
3019 ASSERT(JSObject::cast(result)->HasFastProperties());
3020 ASSERT(JSObject::cast(result)->HasFastElements());
3021
3022 return result;
3023}
3024
3025
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003026static bool HasDuplicates(DescriptorArray* descriptors) {
3027 int count = descriptors->number_of_descriptors();
3028 if (count > 1) {
3029 String* prev_key = descriptors->GetKey(0);
3030 for (int i = 1; i != count; i++) {
3031 String* current_key = descriptors->GetKey(i);
3032 if (prev_key == current_key) return true;
3033 prev_key = current_key;
3034 }
3035 }
3036 return false;
3037}
3038
3039
John Reck59135872010-11-02 12:39:01 -07003040MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003041 ASSERT(!fun->has_initial_map());
3042
3043 // First create a new map with the size and number of in-object properties
3044 // suggested by the function.
3045 int instance_size = fun->shared()->CalculateInstanceSize();
3046 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07003047 Object* map_obj;
Steve Block44f0eee2011-05-26 01:26:41 +01003048 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
John Reck59135872010-11-02 12:39:01 -07003049 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3050 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003051
3052 // Fetch or allocate prototype.
3053 Object* prototype;
3054 if (fun->has_instance_prototype()) {
3055 prototype = fun->instance_prototype();
3056 } else {
John Reck59135872010-11-02 12:39:01 -07003057 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3058 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3059 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003060 }
3061 Map* map = Map::cast(map_obj);
3062 map->set_inobject_properties(in_object_properties);
3063 map->set_unused_property_fields(in_object_properties);
3064 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01003065 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003066
Andrei Popescu402d9372010-02-26 13:31:12 +00003067 // If the function has only simple this property assignments add
3068 // field descriptors for these to the initial map as the object
3069 // cannot be constructed without having these properties. Guard by
3070 // the inline_new flag so we only change the map if we generate a
3071 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00003072 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00003073 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003074 int count = fun->shared()->this_property_assignments_count();
3075 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003076 // Inline constructor can only handle inobject properties.
3077 fun->shared()->ForbidInlineConstructor();
3078 } else {
John Reck59135872010-11-02 12:39:01 -07003079 Object* descriptors_obj;
3080 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3081 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3082 return maybe_descriptors_obj;
3083 }
3084 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003085 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3086 for (int i = 0; i < count; i++) {
3087 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3088 ASSERT(name->IsSymbol());
3089 FieldDescriptor field(name, i, NONE);
3090 field.SetEnumerationIndex(i);
3091 descriptors->Set(i, &field);
3092 }
3093 descriptors->SetNextEnumerationIndex(count);
3094 descriptors->SortUnchecked();
3095
3096 // The descriptors may contain duplicates because the compiler does not
3097 // guarantee the uniqueness of property names (it would have required
3098 // quadratic time). Once the descriptors are sorted we can check for
3099 // duplicates in linear time.
3100 if (HasDuplicates(descriptors)) {
3101 fun->shared()->ForbidInlineConstructor();
3102 } else {
3103 map->set_instance_descriptors(descriptors);
3104 map->set_pre_allocated_property_fields(count);
3105 map->set_unused_property_fields(in_object_properties - count);
3106 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003107 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003108 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003109
3110 fun->shared()->StartInobjectSlackTracking(map);
3111
Steve Blocka7e24c12009-10-30 11:49:00 +00003112 return map;
3113}
3114
3115
3116void Heap::InitializeJSObjectFromMap(JSObject* obj,
3117 FixedArray* properties,
3118 Map* map) {
3119 obj->set_properties(properties);
3120 obj->initialize_elements();
3121 // TODO(1240798): Initialize the object's body using valid initial values
3122 // according to the object's initial map. For example, if the map's
3123 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3124 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3125 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3126 // verification code has to cope with (temporarily) invalid objects. See
3127 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003128 Object* filler;
3129 // We cannot always fill with one_pointer_filler_map because objects
3130 // created from API functions expect their internal fields to be initialized
3131 // with undefined_value.
3132 if (map->constructor()->IsJSFunction() &&
3133 JSFunction::cast(map->constructor())->shared()->
3134 IsInobjectSlackTrackingInProgress()) {
3135 // We might want to shrink the object later.
3136 ASSERT(obj->GetInternalFieldCount() == 0);
3137 filler = Heap::one_pointer_filler_map();
3138 } else {
3139 filler = Heap::undefined_value();
3140 }
3141 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003142}
3143
3144
John Reck59135872010-11-02 12:39:01 -07003145MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003146 // JSFunctions should be allocated using AllocateFunction to be
3147 // properly initialized.
3148 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3149
Steve Block8defd9f2010-07-08 12:39:36 +01003150 // Both types of global objects should be allocated using
3151 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003152 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3153 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3154
3155 // Allocate the backing storage for the properties.
3156 int prop_size =
3157 map->pre_allocated_property_fields() +
3158 map->unused_property_fields() -
3159 map->inobject_properties();
3160 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003161 Object* properties;
3162 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3163 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3164 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003165
3166 // Allocate the JSObject.
3167 AllocationSpace space =
3168 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3169 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003170 Object* obj;
3171 { MaybeObject* maybe_obj = Allocate(map, space);
3172 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3173 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003174
3175 // Initialize the JSObject.
3176 InitializeJSObjectFromMap(JSObject::cast(obj),
3177 FixedArray::cast(properties),
3178 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003179 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003180 return obj;
3181}
3182
3183
John Reck59135872010-11-02 12:39:01 -07003184MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3185 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003186 // Allocate the initial map if absent.
3187 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003188 Object* initial_map;
3189 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3190 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3191 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003192 constructor->set_initial_map(Map::cast(initial_map));
3193 Map::cast(initial_map)->set_constructor(constructor);
3194 }
3195 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003196 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003197 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003198#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003199 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003200 Object* non_failure;
3201 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3202#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003203 return result;
3204}
3205
3206
John Reck59135872010-11-02 12:39:01 -07003207MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003208 ASSERT(constructor->has_initial_map());
3209 Map* map = constructor->initial_map();
3210
3211 // Make sure no field properties are described in the initial map.
3212 // This guarantees us that normalizing the properties does not
3213 // require us to change property values to JSGlobalPropertyCells.
3214 ASSERT(map->NextFreePropertyIndex() == 0);
3215
3216 // Make sure we don't have a ton of pre-allocated slots in the
3217 // global objects. They will be unused once we normalize the object.
3218 ASSERT(map->unused_property_fields() == 0);
3219 ASSERT(map->inobject_properties() == 0);
3220
3221 // Initial size of the backing store to avoid resize of the storage during
3222 // bootstrapping. The size differs between the JS global object ad the
3223 // builtins object.
3224 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3225
3226 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003227 Object* obj;
3228 { MaybeObject* maybe_obj =
3229 StringDictionary::Allocate(
3230 map->NumberOfDescribedProperties() * 2 + initial_size);
3231 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3232 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003233 StringDictionary* dictionary = StringDictionary::cast(obj);
3234
3235 // The global object might be created from an object template with accessors.
3236 // Fill these accessors into the dictionary.
3237 DescriptorArray* descs = map->instance_descriptors();
3238 for (int i = 0; i < descs->number_of_descriptors(); i++) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003239 PropertyDetails details(descs->GetDetails(i));
Steve Blocka7e24c12009-10-30 11:49:00 +00003240 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3241 PropertyDetails d =
3242 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3243 Object* value = descs->GetCallbacksObject(i);
Steve Block44f0eee2011-05-26 01:26:41 +01003244 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
John Reck59135872010-11-02 12:39:01 -07003245 if (!maybe_value->ToObject(&value)) return maybe_value;
3246 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003247
John Reck59135872010-11-02 12:39:01 -07003248 Object* result;
3249 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3250 if (!maybe_result->ToObject(&result)) return maybe_result;
3251 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003252 dictionary = StringDictionary::cast(result);
3253 }
3254
3255 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003256 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3257 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3258 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003259 JSObject* global = JSObject::cast(obj);
3260 InitializeJSObjectFromMap(global, dictionary, map);
3261
3262 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003263 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3264 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3265 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003266 Map* new_map = Map::cast(obj);
3267
3268 // Setup the global object as a normalized object.
3269 global->set_map(new_map);
Steve Block44f0eee2011-05-26 01:26:41 +01003270 global->map()->set_instance_descriptors(empty_descriptor_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00003271 global->set_properties(dictionary);
3272
3273 // Make sure result is a global object with properties in dictionary.
3274 ASSERT(global->IsGlobalObject());
3275 ASSERT(!global->HasFastProperties());
3276 return global;
3277}
3278
3279
John Reck59135872010-11-02 12:39:01 -07003280MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003281 // Never used to copy functions. If functions need to be copied we
3282 // have to be careful to clear the literals array.
3283 ASSERT(!source->IsJSFunction());
3284
3285 // Make the clone.
3286 Map* map = source->map();
3287 int object_size = map->instance_size();
3288 Object* clone;
3289
3290 // If we're forced to always allocate, we use the general allocation
3291 // functions which may leave us with an object in old space.
3292 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003293 { MaybeObject* maybe_clone =
3294 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3295 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3296 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003297 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003298 CopyBlock(clone_address,
3299 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003300 object_size);
3301 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003302 RecordWrites(clone_address,
3303 JSObject::kHeaderSize,
3304 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003305 } else {
John Reck59135872010-11-02 12:39:01 -07003306 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3307 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3308 }
Steve Block44f0eee2011-05-26 01:26:41 +01003309 ASSERT(InNewSpace(clone));
Steve Blocka7e24c12009-10-30 11:49:00 +00003310 // Since we know the clone is allocated in new space, we can copy
3311 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003312 CopyBlock(HeapObject::cast(clone)->address(),
3313 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003314 object_size);
3315 }
3316
3317 FixedArray* elements = FixedArray::cast(source->elements());
3318 FixedArray* properties = FixedArray::cast(source->properties());
3319 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003320 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003321 Object* elem;
3322 { MaybeObject* maybe_elem =
3323 (elements->map() == fixed_cow_array_map()) ?
3324 elements : CopyFixedArray(elements);
3325 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3326 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003327 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3328 }
3329 // Update properties if necessary.
3330 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003331 Object* prop;
3332 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3333 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3334 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003335 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3336 }
3337 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003338#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01003339 isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
Steve Block3ce2e202009-11-05 08:53:23 +00003340#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003341 return clone;
3342}
3343
3344
John Reck59135872010-11-02 12:39:01 -07003345MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3346 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003347 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003348 Map* map = constructor->initial_map();
3349
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003350 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003351 // objects allocated using the constructor.
3352 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003353 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003354
3355 // Allocate the backing storage for the properties.
3356 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003357 Object* properties;
3358 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3359 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3360 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003361
3362 // Reset the map for the object.
3363 object->set_map(constructor->initial_map());
3364
3365 // Reinitialize the object from the constructor map.
3366 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3367 return object;
3368}
3369
3370
John Reck59135872010-11-02 12:39:01 -07003371MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3372 PretenureFlag pretenure) {
3373 Object* result;
3374 { MaybeObject* maybe_result =
3375 AllocateRawAsciiString(string.length(), pretenure);
3376 if (!maybe_result->ToObject(&result)) return maybe_result;
3377 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003378
3379 // Copy the characters into the new object.
3380 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3381 for (int i = 0; i < string.length(); i++) {
3382 string_result->SeqAsciiStringSet(i, string[i]);
3383 }
3384 return result;
3385}
3386
3387
Steve Block9fac8402011-05-12 15:51:54 +01003388MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3389 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003390 // V8 only supports characters in the Basic Multilingual Plane.
3391 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003392 // Count the number of characters in the UTF-8 string and check if
3393 // it is an ASCII string.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003394 Access<UnicodeCache::Utf8Decoder>
3395 decoder(isolate_->unicode_cache()->utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003396 decoder->Reset(string.start(), string.length());
3397 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003398 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003399 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003400 chars++;
3401 }
3402
John Reck59135872010-11-02 12:39:01 -07003403 Object* result;
3404 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3405 if (!maybe_result->ToObject(&result)) return maybe_result;
3406 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003407
3408 // Convert and copy the characters into the new object.
3409 String* string_result = String::cast(result);
3410 decoder->Reset(string.start(), string.length());
3411 for (int i = 0; i < chars; i++) {
3412 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003413 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003414 string_result->Set(i, r);
3415 }
3416 return result;
3417}
3418
3419
John Reck59135872010-11-02 12:39:01 -07003420MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3421 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003422 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003423 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003424 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003425 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003426 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003427 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003428 }
John Reck59135872010-11-02 12:39:01 -07003429 Object* result;
3430 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003431
3432 // Copy the characters into the new object, which may be either ASCII or
3433 // UTF-16.
3434 String* string_result = String::cast(result);
3435 for (int i = 0; i < string.length(); i++) {
3436 string_result->Set(i, string[i]);
3437 }
3438 return result;
3439}
3440
3441
3442Map* Heap::SymbolMapForString(String* string) {
3443 // If the string is in new space it cannot be used as a symbol.
3444 if (InNewSpace(string)) return NULL;
3445
3446 // Find the corresponding symbol map for strings.
3447 Map* map = string->map();
Steve Block44f0eee2011-05-26 01:26:41 +01003448 if (map == ascii_string_map()) {
3449 return ascii_symbol_map();
3450 }
3451 if (map == string_map()) {
3452 return symbol_map();
3453 }
3454 if (map == cons_string_map()) {
3455 return cons_symbol_map();
3456 }
3457 if (map == cons_ascii_string_map()) {
3458 return cons_ascii_symbol_map();
3459 }
3460 if (map == external_string_map()) {
3461 return external_symbol_map();
3462 }
3463 if (map == external_ascii_string_map()) {
3464 return external_ascii_symbol_map();
3465 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003466 if (map == external_string_with_ascii_data_map()) {
3467 return external_symbol_with_ascii_data_map();
3468 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003469
3470 // No match found.
3471 return NULL;
3472}
3473
3474
John Reck59135872010-11-02 12:39:01 -07003475MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3476 int chars,
3477 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003478 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003479 // Ensure the chars matches the number of characters in the buffer.
3480 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3481 // Determine whether the string is ascii.
3482 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003483 while (buffer->has_more()) {
3484 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3485 is_ascii = false;
3486 break;
3487 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003488 }
3489 buffer->Rewind();
3490
3491 // Compute map and object size.
3492 int size;
3493 Map* map;
3494
3495 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003496 if (chars > SeqAsciiString::kMaxLength) {
3497 return Failure::OutOfMemoryException();
3498 }
Steve Blockd0582a62009-12-15 09:54:21 +00003499 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003500 size = SeqAsciiString::SizeFor(chars);
3501 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003502 if (chars > SeqTwoByteString::kMaxLength) {
3503 return Failure::OutOfMemoryException();
3504 }
Steve Blockd0582a62009-12-15 09:54:21 +00003505 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003506 size = SeqTwoByteString::SizeFor(chars);
3507 }
3508
3509 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003510 Object* result;
3511 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3512 ? lo_space_->AllocateRaw(size)
3513 : old_data_space_->AllocateRaw(size);
3514 if (!maybe_result->ToObject(&result)) return maybe_result;
3515 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003516
3517 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003518 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003519 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003520 answer->set_length(chars);
3521 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003522
3523 ASSERT_EQ(size, answer->Size());
3524
3525 // Fill in the characters.
3526 for (int i = 0; i < chars; i++) {
3527 answer->Set(i, buffer->GetNext());
3528 }
3529 return answer;
3530}
3531
3532
John Reck59135872010-11-02 12:39:01 -07003533MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003534 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3535 return Failure::OutOfMemoryException();
3536 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003537
3538 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003539 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003540
Leon Clarkee46be812010-01-19 14:06:41 +00003541 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3542 AllocationSpace retry_space = OLD_DATA_SPACE;
3543
Steve Blocka7e24c12009-10-30 11:49:00 +00003544 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003545 if (size > kMaxObjectSizeInNewSpace) {
3546 // Allocate in large object space, retry space will be ignored.
3547 space = LO_SPACE;
3548 } else if (size > MaxObjectSizeInPagedSpace()) {
3549 // Allocate in new space, retry in large object space.
3550 retry_space = LO_SPACE;
3551 }
3552 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3553 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003554 }
John Reck59135872010-11-02 12:39:01 -07003555 Object* result;
3556 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3557 if (!maybe_result->ToObject(&result)) return maybe_result;
3558 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003559
Steve Blocka7e24c12009-10-30 11:49:00 +00003560 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003561 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003562 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003563 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003564 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3565 return result;
3566}
3567
3568
John Reck59135872010-11-02 12:39:01 -07003569MaybeObject* Heap::AllocateRawTwoByteString(int length,
3570 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003571 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3572 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003573 }
Leon Clarkee46be812010-01-19 14:06:41 +00003574 int size = SeqTwoByteString::SizeFor(length);
3575 ASSERT(size <= SeqTwoByteString::kMaxSize);
3576 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3577 AllocationSpace retry_space = OLD_DATA_SPACE;
3578
3579 if (space == NEW_SPACE) {
3580 if (size > kMaxObjectSizeInNewSpace) {
3581 // Allocate in large object space, retry space will be ignored.
3582 space = LO_SPACE;
3583 } else if (size > MaxObjectSizeInPagedSpace()) {
3584 // Allocate in new space, retry in large object space.
3585 retry_space = LO_SPACE;
3586 }
3587 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3588 space = LO_SPACE;
3589 }
John Reck59135872010-11-02 12:39:01 -07003590 Object* result;
3591 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3592 if (!maybe_result->ToObject(&result)) return maybe_result;
3593 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003594
Steve Blocka7e24c12009-10-30 11:49:00 +00003595 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003596 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003597 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003598 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003599 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3600 return result;
3601}
3602
3603
John Reck59135872010-11-02 12:39:01 -07003604MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003605 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003606 Object* result;
3607 { MaybeObject* maybe_result =
3608 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3609 if (!maybe_result->ToObject(&result)) return maybe_result;
3610 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003611 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003612 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3613 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003614 return result;
3615}
3616
3617
John Reck59135872010-11-02 12:39:01 -07003618MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003619 if (length < 0 || length > FixedArray::kMaxLength) {
3620 return Failure::OutOfMemoryException();
3621 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003622 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003623 // Use the general function if we're forced to always allocate.
3624 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3625 // Allocate the raw data for a fixed array.
3626 int size = FixedArray::SizeFor(length);
3627 return size <= kMaxObjectSizeInNewSpace
3628 ? new_space_.AllocateRaw(size)
3629 : lo_space_->AllocateRawFixedArray(size);
3630}
3631
3632
John Reck59135872010-11-02 12:39:01 -07003633MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003634 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003635 Object* obj;
3636 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3637 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3638 }
Steve Block44f0eee2011-05-26 01:26:41 +01003639 if (InNewSpace(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003640 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003641 dst->set_map(map);
3642 CopyBlock(dst->address() + kPointerSize,
3643 src->address() + kPointerSize,
3644 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003645 return obj;
3646 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003647 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003648 FixedArray* result = FixedArray::cast(obj);
3649 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003650
Steve Blocka7e24c12009-10-30 11:49:00 +00003651 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003652 AssertNoAllocation no_gc;
3653 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003654 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3655 return result;
3656}
3657
3658
John Reck59135872010-11-02 12:39:01 -07003659MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003660 ASSERT(length >= 0);
3661 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003662 Object* result;
3663 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3664 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003665 }
John Reck59135872010-11-02 12:39:01 -07003666 // Initialize header.
3667 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3668 array->set_map(fixed_array_map());
3669 array->set_length(length);
3670 // Initialize body.
Steve Block44f0eee2011-05-26 01:26:41 +01003671 ASSERT(!InNewSpace(undefined_value()));
John Reck59135872010-11-02 12:39:01 -07003672 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003673 return result;
3674}
3675
3676
John Reck59135872010-11-02 12:39:01 -07003677MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003678 if (length < 0 || length > FixedArray::kMaxLength) {
3679 return Failure::OutOfMemoryException();
3680 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003681
Leon Clarkee46be812010-01-19 14:06:41 +00003682 AllocationSpace space =
3683 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003684 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003685 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3686 // Too big for new space.
3687 space = LO_SPACE;
3688 } else if (space == OLD_POINTER_SPACE &&
3689 size > MaxObjectSizeInPagedSpace()) {
3690 // Too big for old pointer space.
3691 space = LO_SPACE;
3692 }
3693
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003694 AllocationSpace retry_space =
3695 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3696
3697 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003698}
3699
3700
John Reck59135872010-11-02 12:39:01 -07003701MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
Steve Block44f0eee2011-05-26 01:26:41 +01003702 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07003703 int length,
3704 PretenureFlag pretenure,
3705 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003706 ASSERT(length >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003707 ASSERT(heap->empty_fixed_array()->IsFixedArray());
3708 if (length == 0) return heap->empty_fixed_array();
Steve Block6ded16b2010-05-10 14:33:55 +01003709
Steve Block44f0eee2011-05-26 01:26:41 +01003710 ASSERT(!heap->InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003711 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003712 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003713 if (!maybe_result->ToObject(&result)) return maybe_result;
3714 }
Steve Block6ded16b2010-05-10 14:33:55 +01003715
Steve Block44f0eee2011-05-26 01:26:41 +01003716 HeapObject::cast(result)->set_map(heap->fixed_array_map());
Steve Block6ded16b2010-05-10 14:33:55 +01003717 FixedArray* array = FixedArray::cast(result);
3718 array->set_length(length);
3719 MemsetPointer(array->data_start(), filler, length);
3720 return array;
3721}
3722
3723
John Reck59135872010-11-02 12:39:01 -07003724MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003725 return AllocateFixedArrayWithFiller(this,
3726 length,
3727 pretenure,
3728 undefined_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003729}
3730
3731
John Reck59135872010-11-02 12:39:01 -07003732MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3733 PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003734 return AllocateFixedArrayWithFiller(this,
3735 length,
3736 pretenure,
3737 the_hole_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003738}
3739
3740
John Reck59135872010-11-02 12:39:01 -07003741MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003742 if (length == 0) return empty_fixed_array();
3743
John Reck59135872010-11-02 12:39:01 -07003744 Object* obj;
3745 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3746 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3747 }
Steve Block6ded16b2010-05-10 14:33:55 +01003748
3749 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3750 FixedArray::cast(obj)->set_length(length);
3751 return obj;
3752}
3753
3754
John Reck59135872010-11-02 12:39:01 -07003755MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3756 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003757 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003758 if (!maybe_result->ToObject(&result)) return maybe_result;
3759 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003760 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003761 ASSERT(result->IsHashTable());
3762 return result;
3763}
3764
3765
John Reck59135872010-11-02 12:39:01 -07003766MaybeObject* Heap::AllocateGlobalContext() {
3767 Object* result;
3768 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003769 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003770 if (!maybe_result->ToObject(&result)) return maybe_result;
3771 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003772 Context* context = reinterpret_cast<Context*>(result);
3773 context->set_map(global_context_map());
3774 ASSERT(context->IsGlobalContext());
3775 ASSERT(result->IsContext());
3776 return result;
3777}
3778
3779
John Reck59135872010-11-02 12:39:01 -07003780MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003781 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003782 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003783 { MaybeObject* maybe_result = AllocateFixedArray(length);
John Reck59135872010-11-02 12:39:01 -07003784 if (!maybe_result->ToObject(&result)) return maybe_result;
3785 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003786 Context* context = reinterpret_cast<Context*>(result);
3787 context->set_map(context_map());
3788 context->set_closure(function);
3789 context->set_fcontext(context);
3790 context->set_previous(NULL);
3791 context->set_extension(NULL);
3792 context->set_global(function->context()->global());
3793 ASSERT(!context->IsGlobalContext());
3794 ASSERT(context->is_function_context());
3795 ASSERT(result->IsContext());
3796 return result;
3797}
3798
3799
John Reck59135872010-11-02 12:39:01 -07003800MaybeObject* Heap::AllocateWithContext(Context* previous,
3801 JSObject* extension,
3802 bool is_catch_context) {
3803 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003804 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003805 if (!maybe_result->ToObject(&result)) return maybe_result;
3806 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003807 Context* context = reinterpret_cast<Context*>(result);
Steve Block44f0eee2011-05-26 01:26:41 +01003808 context->set_map(is_catch_context ? catch_context_map() :
3809 context_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003810 context->set_closure(previous->closure());
3811 context->set_fcontext(previous->fcontext());
3812 context->set_previous(previous);
3813 context->set_extension(extension);
3814 context->set_global(previous->global());
3815 ASSERT(!context->IsGlobalContext());
3816 ASSERT(!context->is_function_context());
3817 ASSERT(result->IsContext());
3818 return result;
3819}
3820
3821
John Reck59135872010-11-02 12:39:01 -07003822MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003823 Map* map;
3824 switch (type) {
Steve Block44f0eee2011-05-26 01:26:41 +01003825#define MAKE_CASE(NAME, Name, name) \
3826 case NAME##_TYPE: map = name##_map(); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00003827STRUCT_LIST(MAKE_CASE)
3828#undef MAKE_CASE
3829 default:
3830 UNREACHABLE();
3831 return Failure::InternalError();
3832 }
3833 int size = map->instance_size();
3834 AllocationSpace space =
3835 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003836 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003837 { MaybeObject* maybe_result = Allocate(map, space);
John Reck59135872010-11-02 12:39:01 -07003838 if (!maybe_result->ToObject(&result)) return maybe_result;
3839 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003840 Struct::cast(result)->InitializeBody(size);
3841 return result;
3842}
3843
3844
3845bool Heap::IdleNotification() {
3846 static const int kIdlesBeforeScavenge = 4;
3847 static const int kIdlesBeforeMarkSweep = 7;
3848 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003849 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003850 static const unsigned int kGCsBetweenCleanup = 4;
Steve Block44f0eee2011-05-26 01:26:41 +01003851
3852 if (!last_idle_notification_gc_count_init_) {
3853 last_idle_notification_gc_count_ = gc_count_;
3854 last_idle_notification_gc_count_init_ = true;
3855 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003856
Steve Block6ded16b2010-05-10 14:33:55 +01003857 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003858 bool finished = false;
3859
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003860 // Reset the number of idle notifications received when a number of
3861 // GCs have taken place. This allows another round of cleanup based
3862 // on idle notifications if enough work has been carried out to
3863 // provoke a number of garbage collections.
Steve Block44f0eee2011-05-26 01:26:41 +01003864 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
3865 number_idle_notifications_ =
3866 Min(number_idle_notifications_ + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00003867 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003868 number_idle_notifications_ = 0;
3869 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003870 }
3871
Steve Block44f0eee2011-05-26 01:26:41 +01003872 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003873 if (contexts_disposed_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01003874 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003875 CollectAllGarbage(false);
3876 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003877 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003878 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003879 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003880 last_idle_notification_gc_count_ = gc_count_;
3881 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003882 // Before doing the mark-sweep collections we clear the
3883 // compilation cache to avoid hanging on to source code and
3884 // generated code for cached functions.
Steve Block44f0eee2011-05-26 01:26:41 +01003885 isolate_->compilation_cache()->Clear();
Steve Blockd0582a62009-12-15 09:54:21 +00003886
Steve Blocka7e24c12009-10-30 11:49:00 +00003887 CollectAllGarbage(false);
3888 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003889 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003890
Steve Block44f0eee2011-05-26 01:26:41 +01003891 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003892 CollectAllGarbage(true);
3893 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003894 last_idle_notification_gc_count_ = gc_count_;
3895 number_idle_notifications_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003896 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003897 } else if (contexts_disposed_ > 0) {
3898 if (FLAG_expose_gc) {
3899 contexts_disposed_ = 0;
3900 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003901 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003902 CollectAllGarbage(false);
Steve Block44f0eee2011-05-26 01:26:41 +01003903 last_idle_notification_gc_count_ = gc_count_;
Steve Block6ded16b2010-05-10 14:33:55 +01003904 }
3905 // If this is the first idle notification, we reset the
3906 // notification count to avoid letting idle notifications for
3907 // context disposal garbage collections start a potentially too
3908 // aggressive idle GC cycle.
Steve Block44f0eee2011-05-26 01:26:41 +01003909 if (number_idle_notifications_ <= 1) {
3910 number_idle_notifications_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +01003911 uncommit = false;
3912 }
Steve Block44f0eee2011-05-26 01:26:41 +01003913 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003914 // If we have received more than kIdlesBeforeMarkCompact idle
3915 // notifications we do not perform any cleanup because we don't
3916 // expect to gain much by doing so.
3917 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003918 }
3919
Steve Block6ded16b2010-05-10 14:33:55 +01003920 // Make sure that we have no pending context disposals and
3921 // conditionally uncommit from space.
3922 ASSERT(contexts_disposed_ == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003923 if (uncommit) UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003924 return finished;
3925}
3926
3927
3928#ifdef DEBUG
3929
3930void Heap::Print() {
3931 if (!HasBeenSetup()) return;
Steve Block44f0eee2011-05-26 01:26:41 +01003932 isolate()->PrintStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00003933 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003934 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3935 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003936}
3937
3938
3939void Heap::ReportCodeStatistics(const char* title) {
3940 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3941 PagedSpace::ResetCodeStatistics();
3942 // We do not look for code in new space, map space, or old space. If code
3943 // somehow ends up in those spaces, we would miss it here.
3944 code_space_->CollectCodeStatistics();
3945 lo_space_->CollectCodeStatistics();
3946 PagedSpace::ReportCodeStatistics();
3947}
3948
3949
3950// This function expects that NewSpace's allocated objects histogram is
3951// populated (via a call to CollectStatistics or else as a side effect of a
3952// just-completed scavenge collection).
3953void Heap::ReportHeapStatistics(const char* title) {
3954 USE(title);
3955 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3956 title, gc_count_);
3957 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003958 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3959 old_gen_promotion_limit_);
3960 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3961 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003962
3963 PrintF("\n");
3964 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
Steve Block44f0eee2011-05-26 01:26:41 +01003965 isolate_->global_handles()->PrintStats();
Steve Blocka7e24c12009-10-30 11:49:00 +00003966 PrintF("\n");
3967
3968 PrintF("Heap statistics : ");
Steve Block44f0eee2011-05-26 01:26:41 +01003969 isolate_->memory_allocator()->ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00003970 PrintF("To space : ");
3971 new_space_.ReportStatistics();
3972 PrintF("Old pointer space : ");
3973 old_pointer_space_->ReportStatistics();
3974 PrintF("Old data space : ");
3975 old_data_space_->ReportStatistics();
3976 PrintF("Code space : ");
3977 code_space_->ReportStatistics();
3978 PrintF("Map space : ");
3979 map_space_->ReportStatistics();
3980 PrintF("Cell space : ");
3981 cell_space_->ReportStatistics();
3982 PrintF("Large object space : ");
3983 lo_space_->ReportStatistics();
3984 PrintF(">>>>>> ========================================= >>>>>>\n");
3985}
3986
3987#endif // DEBUG
3988
3989bool Heap::Contains(HeapObject* value) {
3990 return Contains(value->address());
3991}
3992
3993
3994bool Heap::Contains(Address addr) {
3995 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3996 return HasBeenSetup() &&
3997 (new_space_.ToSpaceContains(addr) ||
3998 old_pointer_space_->Contains(addr) ||
3999 old_data_space_->Contains(addr) ||
4000 code_space_->Contains(addr) ||
4001 map_space_->Contains(addr) ||
4002 cell_space_->Contains(addr) ||
4003 lo_space_->SlowContains(addr));
4004}
4005
4006
4007bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4008 return InSpace(value->address(), space);
4009}
4010
4011
4012bool Heap::InSpace(Address addr, AllocationSpace space) {
4013 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4014 if (!HasBeenSetup()) return false;
4015
4016 switch (space) {
4017 case NEW_SPACE:
4018 return new_space_.ToSpaceContains(addr);
4019 case OLD_POINTER_SPACE:
4020 return old_pointer_space_->Contains(addr);
4021 case OLD_DATA_SPACE:
4022 return old_data_space_->Contains(addr);
4023 case CODE_SPACE:
4024 return code_space_->Contains(addr);
4025 case MAP_SPACE:
4026 return map_space_->Contains(addr);
4027 case CELL_SPACE:
4028 return cell_space_->Contains(addr);
4029 case LO_SPACE:
4030 return lo_space_->SlowContains(addr);
4031 }
4032
4033 return false;
4034}
4035
4036
4037#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004038static void DummyScavengePointer(HeapObject** p) {
4039}
4040
4041
4042static void VerifyPointersUnderWatermark(
4043 PagedSpace* space,
4044 DirtyRegionCallback visit_dirty_region) {
4045 PageIterator it(space, PageIterator::PAGES_IN_USE);
4046
4047 while (it.has_next()) {
4048 Page* page = it.next();
4049 Address start = page->ObjectAreaStart();
4050 Address end = page->AllocationWatermark();
4051
Steve Block44f0eee2011-05-26 01:26:41 +01004052 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004053 start,
4054 end,
4055 visit_dirty_region,
4056 &DummyScavengePointer);
4057 }
4058}
4059
4060
4061static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4062 LargeObjectIterator it(space);
4063 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4064 if (object->IsFixedArray()) {
4065 Address slot_address = object->address();
4066 Address end = object->address() + object->Size();
4067
4068 while (slot_address < end) {
4069 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4070 // When we are not in GC the Heap::InNewSpace() predicate
4071 // checks that pointers which satisfy predicate point into
4072 // the active semispace.
Steve Block44f0eee2011-05-26 01:26:41 +01004073 HEAP->InNewSpace(*slot);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004074 slot_address += kPointerSize;
4075 }
4076 }
4077 }
4078}
4079
4080
Steve Blocka7e24c12009-10-30 11:49:00 +00004081void Heap::Verify() {
4082 ASSERT(HasBeenSetup());
4083
4084 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004085 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004086
4087 new_space_.Verify();
4088
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004089 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4090 old_pointer_space_->Verify(&dirty_regions_visitor);
4091 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004092
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004093 VerifyPointersUnderWatermark(old_pointer_space_,
4094 &IteratePointersInDirtyRegion);
4095 VerifyPointersUnderWatermark(map_space_,
4096 &IteratePointersInDirtyMapsRegion);
4097 VerifyPointersUnderWatermark(lo_space_);
4098
4099 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4100 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4101
4102 VerifyPointersVisitor no_dirty_regions_visitor;
4103 old_data_space_->Verify(&no_dirty_regions_visitor);
4104 code_space_->Verify(&no_dirty_regions_visitor);
4105 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004106
4107 lo_space_->Verify();
4108}
4109#endif // DEBUG
4110
4111
John Reck59135872010-11-02 12:39:01 -07004112MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004113 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004114 Object* new_table;
4115 { MaybeObject* maybe_new_table =
4116 symbol_table()->LookupSymbol(string, &symbol);
4117 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4118 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004119 // Can't use set_symbol_table because SymbolTable::cast knows that
4120 // SymbolTable is a singleton and checks for identity.
4121 roots_[kSymbolTableRootIndex] = new_table;
4122 ASSERT(symbol != NULL);
4123 return symbol;
4124}
4125
4126
Steve Block9fac8402011-05-12 15:51:54 +01004127MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4128 Object* symbol = NULL;
4129 Object* new_table;
4130 { MaybeObject* maybe_new_table =
4131 symbol_table()->LookupAsciiSymbol(string, &symbol);
4132 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4133 }
4134 // Can't use set_symbol_table because SymbolTable::cast knows that
4135 // SymbolTable is a singleton and checks for identity.
4136 roots_[kSymbolTableRootIndex] = new_table;
4137 ASSERT(symbol != NULL);
4138 return symbol;
4139}
4140
4141
4142MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4143 Object* symbol = NULL;
4144 Object* new_table;
4145 { MaybeObject* maybe_new_table =
4146 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4147 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4148 }
4149 // Can't use set_symbol_table because SymbolTable::cast knows that
4150 // SymbolTable is a singleton and checks for identity.
4151 roots_[kSymbolTableRootIndex] = new_table;
4152 ASSERT(symbol != NULL);
4153 return symbol;
4154}
4155
4156
John Reck59135872010-11-02 12:39:01 -07004157MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004158 if (string->IsSymbol()) return string;
4159 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004160 Object* new_table;
4161 { MaybeObject* maybe_new_table =
4162 symbol_table()->LookupString(string, &symbol);
4163 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4164 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004165 // Can't use set_symbol_table because SymbolTable::cast knows that
4166 // SymbolTable is a singleton and checks for identity.
4167 roots_[kSymbolTableRootIndex] = new_table;
4168 ASSERT(symbol != NULL);
4169 return symbol;
4170}
4171
4172
4173bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4174 if (string->IsSymbol()) {
4175 *symbol = string;
4176 return true;
4177 }
4178 return symbol_table()->LookupSymbolIfExists(string, symbol);
4179}
4180
4181
4182#ifdef DEBUG
4183void Heap::ZapFromSpace() {
Steve Block1e0659c2011-05-24 12:43:12 +01004184 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
Steve Blocka7e24c12009-10-30 11:49:00 +00004185 for (Address a = new_space_.FromSpaceLow();
4186 a < new_space_.FromSpaceHigh();
4187 a += kPointerSize) {
4188 Memory::Address_at(a) = kFromSpaceZapValue;
4189 }
4190}
4191#endif // DEBUG
4192
4193
Steve Block44f0eee2011-05-26 01:26:41 +01004194bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4195 Address start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004196 Address end,
4197 ObjectSlotCallback copy_object_func) {
4198 Address slot_address = start;
4199 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004200
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004201 while (slot_address < end) {
4202 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004203 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004204 ASSERT((*slot)->IsHeapObject());
4205 copy_object_func(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004206 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004207 ASSERT((*slot)->IsHeapObject());
4208 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004209 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004210 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004211 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004212 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004213 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004214}
4215
4216
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004217// Compute start address of the first map following given addr.
4218static inline Address MapStartAlign(Address addr) {
4219 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4220 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4221}
Steve Blocka7e24c12009-10-30 11:49:00 +00004222
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004223
4224// Compute end address of the first map preceding given addr.
4225static inline Address MapEndAlign(Address addr) {
4226 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4227 return page + ((addr - page) / Map::kSize * Map::kSize);
4228}
4229
4230
4231static bool IteratePointersInDirtyMaps(Address start,
4232 Address end,
4233 ObjectSlotCallback copy_object_func) {
4234 ASSERT(MapStartAlign(start) == start);
4235 ASSERT(MapEndAlign(end) == end);
4236
4237 Address map_address = start;
4238 bool pointers_to_new_space_found = false;
4239
Steve Block44f0eee2011-05-26 01:26:41 +01004240 Heap* heap = HEAP;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004241 while (map_address < end) {
Steve Block44f0eee2011-05-26 01:26:41 +01004242 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004243 ASSERT(Memory::Object_at(map_address)->IsMap());
4244
4245 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4246 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4247
Steve Block44f0eee2011-05-26 01:26:41 +01004248 if (Heap::IteratePointersInDirtyRegion(heap,
4249 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004250 pointer_fields_end,
4251 copy_object_func)) {
4252 pointers_to_new_space_found = true;
4253 }
4254
4255 map_address += Map::kSize;
4256 }
4257
4258 return pointers_to_new_space_found;
4259}
4260
4261
4262bool Heap::IteratePointersInDirtyMapsRegion(
Steve Block44f0eee2011-05-26 01:26:41 +01004263 Heap* heap,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004264 Address start,
4265 Address end,
4266 ObjectSlotCallback copy_object_func) {
4267 Address map_aligned_start = MapStartAlign(start);
4268 Address map_aligned_end = MapEndAlign(end);
4269
4270 bool contains_pointers_to_new_space = false;
4271
4272 if (map_aligned_start != start) {
4273 Address prev_map = map_aligned_start - Map::kSize;
4274 ASSERT(Memory::Object_at(prev_map)->IsMap());
4275
4276 Address pointer_fields_start =
4277 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4278
4279 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004280 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004281
4282 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004283 IteratePointersInDirtyRegion(heap,
4284 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004285 pointer_fields_end,
4286 copy_object_func)
4287 || contains_pointers_to_new_space;
4288 }
4289
4290 contains_pointers_to_new_space =
4291 IteratePointersInDirtyMaps(map_aligned_start,
4292 map_aligned_end,
4293 copy_object_func)
4294 || contains_pointers_to_new_space;
4295
4296 if (map_aligned_end != end) {
4297 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4298
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004299 Address pointer_fields_start =
4300 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004301
4302 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004303 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004304
4305 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004306 IteratePointersInDirtyRegion(heap,
4307 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004308 pointer_fields_end,
4309 copy_object_func)
4310 || contains_pointers_to_new_space;
4311 }
4312
4313 return contains_pointers_to_new_space;
4314}
4315
4316
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004317void Heap::IterateAndMarkPointersToFromSpace(Address start,
4318 Address end,
4319 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004320 Address slot_address = start;
4321 Page* page = Page::FromAddress(start);
4322
4323 uint32_t marks = page->GetRegionMarks();
4324
4325 while (slot_address < end) {
4326 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004327 if (InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004328 ASSERT((*slot)->IsHeapObject());
4329 callback(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004330 if (InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004331 ASSERT((*slot)->IsHeapObject());
4332 marks |= page->GetRegionMaskForAddress(slot_address);
4333 }
4334 }
4335 slot_address += kPointerSize;
4336 }
4337
4338 page->SetRegionMarks(marks);
4339}
4340
4341
4342uint32_t Heap::IterateDirtyRegions(
4343 uint32_t marks,
4344 Address area_start,
4345 Address area_end,
4346 DirtyRegionCallback visit_dirty_region,
4347 ObjectSlotCallback copy_object_func) {
4348 uint32_t newmarks = 0;
4349 uint32_t mask = 1;
4350
4351 if (area_start >= area_end) {
4352 return newmarks;
4353 }
4354
4355 Address region_start = area_start;
4356
4357 // area_start does not necessarily coincide with start of the first region.
4358 // Thus to calculate the beginning of the next region we have to align
4359 // area_start by Page::kRegionSize.
4360 Address second_region =
4361 reinterpret_cast<Address>(
4362 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4363 ~Page::kRegionAlignmentMask);
4364
4365 // Next region might be beyond area_end.
4366 Address region_end = Min(second_region, area_end);
4367
4368 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004369 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004370 newmarks |= mask;
4371 }
4372 }
4373 mask <<= 1;
4374
4375 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4376 region_start = region_end;
4377 region_end = region_start + Page::kRegionSize;
4378
4379 while (region_end <= area_end) {
4380 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004381 if (visit_dirty_region(this,
4382 region_start,
4383 region_end,
4384 copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004385 newmarks |= mask;
4386 }
4387 }
4388
4389 region_start = region_end;
4390 region_end = region_start + Page::kRegionSize;
4391
4392 mask <<= 1;
4393 }
4394
4395 if (region_start != area_end) {
4396 // A small piece of area left uniterated because area_end does not coincide
4397 // with region end. Check whether region covering last part of area is
4398 // dirty.
4399 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004400 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004401 newmarks |= mask;
4402 }
4403 }
4404 }
4405
4406 return newmarks;
4407}
4408
4409
4410
4411void Heap::IterateDirtyRegions(
4412 PagedSpace* space,
4413 DirtyRegionCallback visit_dirty_region,
4414 ObjectSlotCallback copy_object_func,
4415 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004416
4417 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004418
Steve Blocka7e24c12009-10-30 11:49:00 +00004419 while (it.has_next()) {
4420 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004421 uint32_t marks = page->GetRegionMarks();
4422
4423 if (marks != Page::kAllRegionsCleanMarks) {
4424 Address start = page->ObjectAreaStart();
4425
4426 // Do not try to visit pointers beyond page allocation watermark.
4427 // Page can contain garbage pointers there.
4428 Address end;
4429
4430 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4431 page->IsWatermarkValid()) {
4432 end = page->AllocationWatermark();
4433 } else {
4434 end = page->CachedAllocationWatermark();
4435 }
4436
4437 ASSERT(space == old_pointer_space_ ||
4438 (space == map_space_ &&
4439 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4440
4441 page->SetRegionMarks(IterateDirtyRegions(marks,
4442 start,
4443 end,
4444 visit_dirty_region,
4445 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004446 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004447
4448 // Mark page watermark as invalid to maintain watermark validity invariant.
4449 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4450 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004451 }
4452}
4453
4454
Steve Blockd0582a62009-12-15 09:54:21 +00004455void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4456 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004457 IterateWeakRoots(v, mode);
4458}
4459
4460
4461void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004462 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004463 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004464 if (mode != VISIT_ALL_IN_SCAVENGE) {
4465 // Scavenge collections have special processing for this.
Steve Block44f0eee2011-05-26 01:26:41 +01004466 external_string_table_.Iterate(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004467 }
4468 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004469}
4470
4471
Steve Blockd0582a62009-12-15 09:54:21 +00004472void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004473 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004474 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004475
Iain Merrick75681382010-08-19 15:07:18 +01004476 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004477 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004478
Steve Block44f0eee2011-05-26 01:26:41 +01004479 isolate_->bootstrapper()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004480 v->Synchronize("bootstrapper");
Steve Block44f0eee2011-05-26 01:26:41 +01004481 isolate_->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004482 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004483 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004484 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004485
4486#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +01004487 isolate_->debug()->Iterate(v);
Steve Blocka7e24c12009-10-30 11:49:00 +00004488#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004489 v->Synchronize("debug");
Steve Block44f0eee2011-05-26 01:26:41 +01004490 isolate_->compilation_cache()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004491 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004492
4493 // Iterate over local handles in handle scopes.
Steve Block44f0eee2011-05-26 01:26:41 +01004494 isolate_->handle_scope_implementer()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004495 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004496
Leon Clarkee46be812010-01-19 14:06:41 +00004497 // Iterate over the builtin code objects and code stubs in the
4498 // heap. Note that it is not necessary to iterate over code objects
4499 // on scavenge collections.
4500 if (mode != VISIT_ALL_IN_SCAVENGE) {
Steve Block44f0eee2011-05-26 01:26:41 +01004501 isolate_->builtins()->IterateBuiltins(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004502 }
Steve Blockd0582a62009-12-15 09:54:21 +00004503 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004504
4505 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004506 if (mode == VISIT_ONLY_STRONG) {
Steve Block44f0eee2011-05-26 01:26:41 +01004507 isolate_->global_handles()->IterateStrongRoots(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004508 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004509 isolate_->global_handles()->IterateAllRoots(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004510 }
4511 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004512
4513 // Iterate over pointers being held by inactive threads.
Steve Block44f0eee2011-05-26 01:26:41 +01004514 isolate_->thread_manager()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004515 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004516
4517 // Iterate over the pointers the Serialization/Deserialization code is
4518 // holding.
4519 // During garbage collection this keeps the partial snapshot cache alive.
4520 // During deserialization of the startup snapshot this creates the partial
4521 // snapshot cache and deserializes the objects it refers to. During
4522 // serialization this does nothing, since the partial snapshot cache is
4523 // empty. However the next thing we do is create the partial snapshot,
4524 // filling up the partial snapshot cache with objects it needs as we go.
4525 SerializerDeserializer::Iterate(v);
4526 // We don't do a v->Synchronize call here, because in debug mode that will
4527 // output a flag to the snapshot. However at this point the serializer and
4528 // deserializer are deliberately a little unsynchronized (see above) so the
4529 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004530}
Steve Blocka7e24c12009-10-30 11:49:00 +00004531
4532
Steve Blocka7e24c12009-10-30 11:49:00 +00004533// TODO(1236194): Since the heap size is configurable on the command line
4534// and through the API, we should gracefully handle the case that the heap
4535// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004536bool Heap::ConfigureHeap(int max_semispace_size,
4537 int max_old_gen_size,
4538 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004539 if (HasBeenSetup()) return false;
4540
Steve Block3ce2e202009-11-05 08:53:23 +00004541 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4542
4543 if (Snapshot::IsEnabled()) {
4544 // If we are using a snapshot we always reserve the default amount
4545 // of memory for each semispace because code in the snapshot has
4546 // write-barrier code that relies on the size and alignment of new
4547 // space. We therefore cannot use a larger max semispace size
4548 // than the default reserved semispace size.
4549 if (max_semispace_size_ > reserved_semispace_size_) {
4550 max_semispace_size_ = reserved_semispace_size_;
4551 }
4552 } else {
4553 // If we are not using snapshots we reserve space for the actual
4554 // max semispace size.
4555 reserved_semispace_size_ = max_semispace_size_;
4556 }
4557
4558 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004559 if (max_executable_size > 0) {
4560 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4561 }
4562
4563 // The max executable size must be less than or equal to the max old
4564 // generation size.
4565 if (max_executable_size_ > max_old_generation_size_) {
4566 max_executable_size_ = max_old_generation_size_;
4567 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004568
4569 // The new space size must be a power of two to support single-bit testing
4570 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004571 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4572 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4573 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4574 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004575
4576 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004577 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004578
Steve Block44f0eee2011-05-26 01:26:41 +01004579 configured_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004580 return true;
4581}
4582
4583
4584bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004585 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4586 FLAG_max_old_space_size * MB,
4587 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004588}
4589
4590
Ben Murdochbb769b22010-08-11 14:56:33 +01004591void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004592 *stats->start_marker = HeapStats::kStartMarker;
4593 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004594 *stats->new_space_size = new_space_.SizeAsInt();
4595 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004596 *stats->old_pointer_space_size = old_pointer_space_->Size();
4597 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4598 *stats->old_data_space_size = old_data_space_->Size();
4599 *stats->old_data_space_capacity = old_data_space_->Capacity();
4600 *stats->code_space_size = code_space_->Size();
4601 *stats->code_space_capacity = code_space_->Capacity();
4602 *stats->map_space_size = map_space_->Size();
4603 *stats->map_space_capacity = map_space_->Capacity();
4604 *stats->cell_space_size = cell_space_->Size();
4605 *stats->cell_space_capacity = cell_space_->Capacity();
4606 *stats->lo_space_size = lo_space_->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01004607 isolate_->global_handles()->RecordStats(stats);
4608 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
Ben Murdochbb769b22010-08-11 14:56:33 +01004609 *stats->memory_allocator_capacity =
Steve Block44f0eee2011-05-26 01:26:41 +01004610 isolate()->memory_allocator()->Size() +
4611 isolate()->memory_allocator()->Available();
Iain Merrick75681382010-08-19 15:07:18 +01004612 *stats->os_error = OS::GetLastError();
Steve Block44f0eee2011-05-26 01:26:41 +01004613 isolate()->memory_allocator()->Available();
Ben Murdochbb769b22010-08-11 14:56:33 +01004614 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004615 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004616 for (HeapObject* obj = iterator.next();
4617 obj != NULL;
4618 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004619 InstanceType type = obj->map()->instance_type();
4620 ASSERT(0 <= type && type <= LAST_TYPE);
4621 stats->objects_per_type[type]++;
4622 stats->size_per_type[type] += obj->Size();
4623 }
4624 }
Steve Blockd0582a62009-12-15 09:54:21 +00004625}
4626
4627
Ben Murdochf87a2032010-10-22 12:50:53 +01004628intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004629 return old_pointer_space_->Size()
4630 + old_data_space_->Size()
4631 + code_space_->Size()
4632 + map_space_->Size()
4633 + cell_space_->Size()
4634 + lo_space_->Size();
4635}
4636
4637
4638int Heap::PromotedExternalMemorySize() {
4639 if (amount_of_external_allocated_memory_
4640 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4641 return amount_of_external_allocated_memory_
4642 - amount_of_external_allocated_memory_at_last_global_gc_;
4643}
4644
Steve Block44f0eee2011-05-26 01:26:41 +01004645#ifdef DEBUG
4646
4647// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4648static const int kMarkTag = 2;
4649
4650
4651class HeapDebugUtils {
4652 public:
4653 explicit HeapDebugUtils(Heap* heap)
4654 : search_for_any_global_(false),
4655 search_target_(NULL),
4656 found_target_(false),
4657 object_stack_(20),
4658 heap_(heap) {
4659 }
4660
4661 class MarkObjectVisitor : public ObjectVisitor {
4662 public:
4663 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4664
4665 void VisitPointers(Object** start, Object** end) {
4666 // Copy all HeapObject pointers in [start, end)
4667 for (Object** p = start; p < end; p++) {
4668 if ((*p)->IsHeapObject())
4669 utils_->MarkObjectRecursively(p);
4670 }
4671 }
4672
4673 HeapDebugUtils* utils_;
4674 };
4675
4676 void MarkObjectRecursively(Object** p) {
4677 if (!(*p)->IsHeapObject()) return;
4678
4679 HeapObject* obj = HeapObject::cast(*p);
4680
4681 Object* map = obj->map();
4682
4683 if (!map->IsHeapObject()) return; // visited before
4684
4685 if (found_target_) return; // stop if target found
4686 object_stack_.Add(obj);
4687 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
4688 (!search_for_any_global_ && (obj == search_target_))) {
4689 found_target_ = true;
4690 return;
4691 }
4692
4693 // not visited yet
4694 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4695
4696 Address map_addr = map_p->address();
4697
4698 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4699
4700 MarkObjectRecursively(&map);
4701
4702 MarkObjectVisitor mark_visitor(this);
4703
4704 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4705 &mark_visitor);
4706
4707 if (!found_target_) // don't pop if found the target
4708 object_stack_.RemoveLast();
4709 }
4710
4711
4712 class UnmarkObjectVisitor : public ObjectVisitor {
4713 public:
4714 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4715
4716 void VisitPointers(Object** start, Object** end) {
4717 // Copy all HeapObject pointers in [start, end)
4718 for (Object** p = start; p < end; p++) {
4719 if ((*p)->IsHeapObject())
4720 utils_->UnmarkObjectRecursively(p);
4721 }
4722 }
4723
4724 HeapDebugUtils* utils_;
4725 };
4726
4727
4728 void UnmarkObjectRecursively(Object** p) {
4729 if (!(*p)->IsHeapObject()) return;
4730
4731 HeapObject* obj = HeapObject::cast(*p);
4732
4733 Object* map = obj->map();
4734
4735 if (map->IsHeapObject()) return; // unmarked already
4736
4737 Address map_addr = reinterpret_cast<Address>(map);
4738
4739 map_addr -= kMarkTag;
4740
4741 ASSERT_TAG_ALIGNED(map_addr);
4742
4743 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4744
4745 obj->set_map(reinterpret_cast<Map*>(map_p));
4746
4747 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4748
4749 UnmarkObjectVisitor unmark_visitor(this);
4750
4751 obj->IterateBody(Map::cast(map_p)->instance_type(),
4752 obj->SizeFromMap(Map::cast(map_p)),
4753 &unmark_visitor);
4754 }
4755
4756
4757 void MarkRootObjectRecursively(Object** root) {
4758 if (search_for_any_global_) {
4759 ASSERT(search_target_ == NULL);
4760 } else {
4761 ASSERT(search_target_->IsHeapObject());
4762 }
4763 found_target_ = false;
4764 object_stack_.Clear();
4765
4766 MarkObjectRecursively(root);
4767 UnmarkObjectRecursively(root);
4768
4769 if (found_target_) {
4770 PrintF("=====================================\n");
4771 PrintF("==== Path to object ====\n");
4772 PrintF("=====================================\n\n");
4773
4774 ASSERT(!object_stack_.is_empty());
4775 for (int i = 0; i < object_stack_.length(); i++) {
4776 if (i > 0) PrintF("\n |\n |\n V\n\n");
4777 Object* obj = object_stack_[i];
4778 obj->Print();
4779 }
4780 PrintF("=====================================\n");
4781 }
4782 }
4783
4784 // Helper class for visiting HeapObjects recursively.
4785 class MarkRootVisitor: public ObjectVisitor {
4786 public:
4787 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4788
4789 void VisitPointers(Object** start, Object** end) {
4790 // Visit all HeapObject pointers in [start, end)
4791 for (Object** p = start; p < end; p++) {
4792 if ((*p)->IsHeapObject())
4793 utils_->MarkRootObjectRecursively(p);
4794 }
4795 }
4796
4797 HeapDebugUtils* utils_;
4798 };
4799
4800 bool search_for_any_global_;
4801 Object* search_target_;
4802 bool found_target_;
4803 List<Object*> object_stack_;
4804 Heap* heap_;
4805
4806 friend class Heap;
4807};
4808
4809#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00004810
4811bool Heap::Setup(bool create_heap_objects) {
Steve Block44f0eee2011-05-26 01:26:41 +01004812#ifdef DEBUG
4813 debug_utils_ = new HeapDebugUtils(this);
4814#endif
4815
Steve Blocka7e24c12009-10-30 11:49:00 +00004816 // Initialize heap spaces and initial maps and objects. Whenever something
4817 // goes wrong, just return false. The caller should check the results and
4818 // call Heap::TearDown() to release allocated memory.
4819 //
4820 // If the heap is not yet configured (eg, through the API), configure it.
4821 // Configuration is based on the flags new-space-size (really the semispace
4822 // size) and old-space-size if set or the initial values of semispace_size_
4823 // and old_generation_size_ otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +01004824 if (!configured_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004825 if (!ConfigureHeapDefault()) return false;
4826 }
4827
Steve Block44f0eee2011-05-26 01:26:41 +01004828 gc_initializer_mutex->Lock();
4829 static bool initialized_gc = false;
4830 if (!initialized_gc) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01004831 initialized_gc = true;
4832 InitializeScavengingVisitorsTables();
4833 NewSpaceScavenger::Initialize();
4834 MarkCompactCollector::Initialize();
Steve Block44f0eee2011-05-26 01:26:41 +01004835 }
4836 gc_initializer_mutex->Unlock();
Iain Merrick75681382010-08-19 15:07:18 +01004837
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004838 MarkMapPointersAsEncoded(false);
4839
Steve Blocka7e24c12009-10-30 11:49:00 +00004840 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004841 // space. The chunk is double the size of the requested reserved
4842 // new space size to ensure that we can find a pair of semispaces that
4843 // are contiguous and aligned to their size.
Steve Block44f0eee2011-05-26 01:26:41 +01004844 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
4845 return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004846 void* chunk =
Steve Block44f0eee2011-05-26 01:26:41 +01004847 isolate_->memory_allocator()->ReserveInitialChunk(
4848 4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004849 if (chunk == NULL) return false;
4850
4851 // Align the pair of semispaces to their size, which must be a power
4852 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004853 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004854 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4855 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4856 return false;
4857 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004858
4859 // Initialize old pointer space.
4860 old_pointer_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004861 new OldSpace(this,
4862 max_old_generation_size_,
4863 OLD_POINTER_SPACE,
4864 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004865 if (old_pointer_space_ == NULL) return false;
4866 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4867
4868 // Initialize old data space.
4869 old_data_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004870 new OldSpace(this,
4871 max_old_generation_size_,
4872 OLD_DATA_SPACE,
4873 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004874 if (old_data_space_ == NULL) return false;
4875 if (!old_data_space_->Setup(NULL, 0)) return false;
4876
4877 // Initialize the code space, set its maximum capacity to the old
4878 // generation size. It needs executable memory.
4879 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4880 // virtual address space, so that they can call each other with near calls.
4881 if (code_range_size_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01004882 if (!isolate_->code_range()->Setup(code_range_size_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004883 return false;
4884 }
4885 }
4886
4887 code_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004888 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004889 if (code_space_ == NULL) return false;
4890 if (!code_space_->Setup(NULL, 0)) return false;
4891
4892 // Initialize map space.
Steve Block44f0eee2011-05-26 01:26:41 +01004893 map_space_ = new MapSpace(this, FLAG_use_big_map_space
Leon Clarkee46be812010-01-19 14:06:41 +00004894 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004895 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4896 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004897 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004898 if (map_space_ == NULL) return false;
4899 if (!map_space_->Setup(NULL, 0)) return false;
4900
4901 // Initialize global property cell space.
Steve Block44f0eee2011-05-26 01:26:41 +01004902 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004903 if (cell_space_ == NULL) return false;
4904 if (!cell_space_->Setup(NULL, 0)) return false;
4905
4906 // The large object code space may contain code or data. We set the memory
4907 // to be non-executable here for safety, but this means we need to enable it
4908 // explicitly when allocating large code objects.
Steve Block44f0eee2011-05-26 01:26:41 +01004909 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004910 if (lo_space_ == NULL) return false;
4911 if (!lo_space_->Setup()) return false;
4912
4913 if (create_heap_objects) {
4914 // Create initial maps.
4915 if (!CreateInitialMaps()) return false;
4916 if (!CreateApiObjects()) return false;
4917
4918 // Create initial objects
4919 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004920
4921 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004922 }
4923
Steve Block44f0eee2011-05-26 01:26:41 +01004924 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
4925 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004926
Steve Block3ce2e202009-11-05 08:53:23 +00004927#ifdef ENABLE_LOGGING_AND_PROFILING
4928 // This should be called only after initial objects have been created.
Steve Block44f0eee2011-05-26 01:26:41 +01004929 isolate_->producer_heap_profile()->Setup();
Steve Block3ce2e202009-11-05 08:53:23 +00004930#endif
4931
Steve Blocka7e24c12009-10-30 11:49:00 +00004932 return true;
4933}
4934
4935
Steve Blockd0582a62009-12-15 09:54:21 +00004936void Heap::SetStackLimits() {
Steve Block44f0eee2011-05-26 01:26:41 +01004937 ASSERT(isolate_ != NULL);
4938 ASSERT(isolate_ == isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00004939 // On 64 bit machines, pointers are generally out of range of Smis. We write
4940 // something that looks like an out of range Smi to the GC.
4941
Steve Blockd0582a62009-12-15 09:54:21 +00004942 // Set up the special root array entries containing the stack limits.
4943 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004944 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004945 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01004946 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blockd0582a62009-12-15 09:54:21 +00004947 roots_[kRealStackLimitRootIndex] =
4948 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01004949 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004950}
4951
4952
4953void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004954 if (FLAG_print_cumulative_gc_stat) {
4955 PrintF("\n\n");
4956 PrintF("gc_count=%d ", gc_count_);
4957 PrintF("mark_sweep_count=%d ", ms_count_);
4958 PrintF("mark_compact_count=%d ", mc_count_);
Steve Block44f0eee2011-05-26 01:26:41 +01004959 PrintF("max_gc_pause=%d ", get_max_gc_pause());
4960 PrintF("min_in_mutator=%d ", get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004961 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
Steve Block44f0eee2011-05-26 01:26:41 +01004962 get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004963 PrintF("\n\n");
4964 }
4965
Steve Block44f0eee2011-05-26 01:26:41 +01004966 isolate_->global_handles()->TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00004967
Steve Block44f0eee2011-05-26 01:26:41 +01004968 external_string_table_.TearDown();
Leon Clarkee46be812010-01-19 14:06:41 +00004969
Steve Blocka7e24c12009-10-30 11:49:00 +00004970 new_space_.TearDown();
4971
4972 if (old_pointer_space_ != NULL) {
4973 old_pointer_space_->TearDown();
4974 delete old_pointer_space_;
4975 old_pointer_space_ = NULL;
4976 }
4977
4978 if (old_data_space_ != NULL) {
4979 old_data_space_->TearDown();
4980 delete old_data_space_;
4981 old_data_space_ = NULL;
4982 }
4983
4984 if (code_space_ != NULL) {
4985 code_space_->TearDown();
4986 delete code_space_;
4987 code_space_ = NULL;
4988 }
4989
4990 if (map_space_ != NULL) {
4991 map_space_->TearDown();
4992 delete map_space_;
4993 map_space_ = NULL;
4994 }
4995
4996 if (cell_space_ != NULL) {
4997 cell_space_->TearDown();
4998 delete cell_space_;
4999 cell_space_ = NULL;
5000 }
5001
5002 if (lo_space_ != NULL) {
5003 lo_space_->TearDown();
5004 delete lo_space_;
5005 lo_space_ = NULL;
5006 }
5007
Steve Block44f0eee2011-05-26 01:26:41 +01005008 isolate_->memory_allocator()->TearDown();
5009
5010#ifdef DEBUG
5011 delete debug_utils_;
5012 debug_utils_ = NULL;
5013#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005014}
5015
5016
5017void Heap::Shrink() {
5018 // Try to shrink all paged spaces.
5019 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005020 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5021 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00005022}
5023
5024
5025#ifdef ENABLE_HEAP_PROTECTION
5026
5027void Heap::Protect() {
5028 if (HasBeenSetup()) {
5029 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005030 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5031 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00005032 }
5033}
5034
5035
5036void Heap::Unprotect() {
5037 if (HasBeenSetup()) {
5038 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005039 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5040 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00005041 }
5042}
5043
5044#endif
5045
5046
Steve Block6ded16b2010-05-10 14:33:55 +01005047void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5048 ASSERT(callback != NULL);
5049 GCPrologueCallbackPair pair(callback, gc_type);
5050 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5051 return gc_prologue_callbacks_.Add(pair);
5052}
5053
5054
5055void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5056 ASSERT(callback != NULL);
5057 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5058 if (gc_prologue_callbacks_[i].callback == callback) {
5059 gc_prologue_callbacks_.Remove(i);
5060 return;
5061 }
5062 }
5063 UNREACHABLE();
5064}
5065
5066
5067void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5068 ASSERT(callback != NULL);
5069 GCEpilogueCallbackPair pair(callback, gc_type);
5070 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5071 return gc_epilogue_callbacks_.Add(pair);
5072}
5073
5074
5075void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5076 ASSERT(callback != NULL);
5077 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5078 if (gc_epilogue_callbacks_[i].callback == callback) {
5079 gc_epilogue_callbacks_.Remove(i);
5080 return;
5081 }
5082 }
5083 UNREACHABLE();
5084}
5085
5086
Steve Blocka7e24c12009-10-30 11:49:00 +00005087#ifdef DEBUG
5088
5089class PrintHandleVisitor: public ObjectVisitor {
5090 public:
5091 void VisitPointers(Object** start, Object** end) {
5092 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01005093 PrintF(" handle %p to %p\n",
5094 reinterpret_cast<void*>(p),
5095 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00005096 }
5097};
5098
5099void Heap::PrintHandles() {
5100 PrintF("Handles:\n");
5101 PrintHandleVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +01005102 isolate_->handle_scope_implementer()->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +00005103}
5104
5105#endif
5106
5107
5108Space* AllSpaces::next() {
5109 switch (counter_++) {
5110 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005111 return HEAP->new_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005112 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005113 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005114 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005115 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005116 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005117 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005118 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005119 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005120 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005121 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005122 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005123 return HEAP->lo_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005124 default:
5125 return NULL;
5126 }
5127}
5128
5129
5130PagedSpace* PagedSpaces::next() {
5131 switch (counter_++) {
5132 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005133 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005134 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005135 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005136 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005137 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005138 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005139 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005140 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005141 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005142 default:
5143 return NULL;
5144 }
5145}
5146
5147
5148
5149OldSpace* OldSpaces::next() {
5150 switch (counter_++) {
5151 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005152 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005153 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005154 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005155 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005156 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005157 default:
5158 return NULL;
5159 }
5160}
5161
5162
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005163SpaceIterator::SpaceIterator()
5164 : current_space_(FIRST_SPACE),
5165 iterator_(NULL),
5166 size_func_(NULL) {
5167}
5168
5169
5170SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5171 : current_space_(FIRST_SPACE),
5172 iterator_(NULL),
5173 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005174}
5175
5176
5177SpaceIterator::~SpaceIterator() {
5178 // Delete active iterator if any.
5179 delete iterator_;
5180}
5181
5182
5183bool SpaceIterator::has_next() {
5184 // Iterate until no more spaces.
5185 return current_space_ != LAST_SPACE;
5186}
5187
5188
5189ObjectIterator* SpaceIterator::next() {
5190 if (iterator_ != NULL) {
5191 delete iterator_;
5192 iterator_ = NULL;
5193 // Move to the next space
5194 current_space_++;
5195 if (current_space_ > LAST_SPACE) {
5196 return NULL;
5197 }
5198 }
5199
5200 // Return iterator for the new current space.
5201 return CreateIterator();
5202}
5203
5204
5205// Create an iterator for the space to iterate.
5206ObjectIterator* SpaceIterator::CreateIterator() {
5207 ASSERT(iterator_ == NULL);
5208
5209 switch (current_space_) {
5210 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005211 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005212 break;
5213 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005214 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005215 break;
5216 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005217 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005218 break;
5219 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005220 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005221 break;
5222 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005223 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005224 break;
5225 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005226 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005227 break;
5228 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005229 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005230 break;
5231 }
5232
5233 // Return the newly allocated iterator;
5234 ASSERT(iterator_ != NULL);
5235 return iterator_;
5236}
5237
5238
Ben Murdochb0fe1622011-05-05 13:52:32 +01005239class HeapObjectsFilter {
5240 public:
5241 virtual ~HeapObjectsFilter() {}
5242 virtual bool SkipObject(HeapObject* object) = 0;
5243};
5244
5245
5246class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005247 public:
5248 FreeListNodesFilter() {
5249 MarkFreeListNodes();
5250 }
5251
Ben Murdochb0fe1622011-05-05 13:52:32 +01005252 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005253 if (object->IsMarked()) {
5254 object->ClearMark();
5255 return true;
5256 } else {
5257 return false;
5258 }
5259 }
5260
5261 private:
5262 void MarkFreeListNodes() {
Steve Block44f0eee2011-05-26 01:26:41 +01005263 Heap* heap = HEAP;
5264 heap->old_pointer_space()->MarkFreeListNodes();
5265 heap->old_data_space()->MarkFreeListNodes();
5266 MarkCodeSpaceFreeListNodes(heap);
5267 heap->map_space()->MarkFreeListNodes();
5268 heap->cell_space()->MarkFreeListNodes();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005269 }
5270
Steve Block44f0eee2011-05-26 01:26:41 +01005271 void MarkCodeSpaceFreeListNodes(Heap* heap) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005272 // For code space, using FreeListNode::IsFreeListNode is OK.
Steve Block44f0eee2011-05-26 01:26:41 +01005273 HeapObjectIterator iter(heap->code_space());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005274 for (HeapObject* obj = iter.next_object();
5275 obj != NULL;
5276 obj = iter.next_object()) {
5277 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5278 }
5279 }
5280
5281 AssertNoAllocation no_alloc;
5282};
5283
5284
Ben Murdochb0fe1622011-05-05 13:52:32 +01005285class UnreachableObjectsFilter : public HeapObjectsFilter {
5286 public:
5287 UnreachableObjectsFilter() {
5288 MarkUnreachableObjects();
5289 }
5290
5291 bool SkipObject(HeapObject* object) {
5292 if (object->IsMarked()) {
5293 object->ClearMark();
5294 return true;
5295 } else {
5296 return false;
5297 }
5298 }
5299
5300 private:
5301 class UnmarkingVisitor : public ObjectVisitor {
5302 public:
5303 UnmarkingVisitor() : list_(10) {}
5304
5305 void VisitPointers(Object** start, Object** end) {
5306 for (Object** p = start; p < end; p++) {
5307 if (!(*p)->IsHeapObject()) continue;
5308 HeapObject* obj = HeapObject::cast(*p);
5309 if (obj->IsMarked()) {
5310 obj->ClearMark();
5311 list_.Add(obj);
5312 }
5313 }
5314 }
5315
5316 bool can_process() { return !list_.is_empty(); }
5317
5318 void ProcessNext() {
5319 HeapObject* obj = list_.RemoveLast();
5320 obj->Iterate(this);
5321 }
5322
5323 private:
5324 List<HeapObject*> list_;
5325 };
5326
5327 void MarkUnreachableObjects() {
5328 HeapIterator iterator;
5329 for (HeapObject* obj = iterator.next();
5330 obj != NULL;
5331 obj = iterator.next()) {
5332 obj->SetMark();
5333 }
5334 UnmarkingVisitor visitor;
Steve Block44f0eee2011-05-26 01:26:41 +01005335 HEAP->IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005336 while (visitor.can_process())
5337 visitor.ProcessNext();
5338 }
5339
5340 AssertNoAllocation no_alloc;
5341};
5342
5343
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005344HeapIterator::HeapIterator()
5345 : filtering_(HeapIterator::kNoFiltering),
5346 filter_(NULL) {
5347 Init();
5348}
5349
5350
Ben Murdochb0fe1622011-05-05 13:52:32 +01005351HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005352 : filtering_(filtering),
5353 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005354 Init();
5355}
5356
5357
5358HeapIterator::~HeapIterator() {
5359 Shutdown();
5360}
5361
5362
5363void HeapIterator::Init() {
5364 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005365 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5366 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5367 switch (filtering_) {
5368 case kFilterFreeListNodes:
5369 filter_ = new FreeListNodesFilter;
5370 break;
5371 case kFilterUnreachable:
5372 filter_ = new UnreachableObjectsFilter;
5373 break;
5374 default:
5375 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005376 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005377 object_iterator_ = space_iterator_->next();
5378}
5379
5380
5381void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005382#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005383 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005384 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005385 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005386 ASSERT(object_iterator_ == NULL);
5387 }
5388#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005389 // Make sure the last iterator is deallocated.
5390 delete space_iterator_;
5391 space_iterator_ = NULL;
5392 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005393 delete filter_;
5394 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005395}
5396
5397
Leon Clarked91b9f72010-01-27 17:25:45 +00005398HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005399 if (filter_ == NULL) return NextObject();
5400
5401 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005402 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005403 return obj;
5404}
5405
5406
5407HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005408 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005409 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005410
Leon Clarked91b9f72010-01-27 17:25:45 +00005411 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005412 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005413 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005414 } else {
5415 // Go though the spaces looking for one that has objects.
5416 while (space_iterator_->has_next()) {
5417 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005418 if (HeapObject* obj = object_iterator_->next_object()) {
5419 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005420 }
5421 }
5422 }
5423 // Done with the last space.
5424 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005425 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005426}
5427
5428
5429void HeapIterator::reset() {
5430 // Restart the iterator.
5431 Shutdown();
5432 Init();
5433}
5434
5435
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005436#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
Steve Blocka7e24c12009-10-30 11:49:00 +00005437
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005438Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00005439
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005440class PathTracer::MarkVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00005441 public:
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005442 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00005443 void VisitPointers(Object** start, Object** end) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005444 // Scan all HeapObject pointers in [start, end)
5445 for (Object** p = start; !tracer_->found() && (p < end); p++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005446 if ((*p)->IsHeapObject())
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005447 tracer_->MarkRecursively(p, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005448 }
5449 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005450
5451 private:
5452 PathTracer* tracer_;
Steve Blocka7e24c12009-10-30 11:49:00 +00005453};
5454
Steve Blocka7e24c12009-10-30 11:49:00 +00005455
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005456class PathTracer::UnmarkVisitor: public ObjectVisitor {
5457 public:
5458 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5459 void VisitPointers(Object** start, Object** end) {
5460 // Scan all HeapObject pointers in [start, end)
5461 for (Object** p = start; p < end; p++) {
5462 if ((*p)->IsHeapObject())
5463 tracer_->UnmarkRecursively(p, this);
5464 }
5465 }
5466
5467 private:
5468 PathTracer* tracer_;
5469};
5470
5471
5472void PathTracer::VisitPointers(Object** start, Object** end) {
5473 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5474 // Visit all HeapObject pointers in [start, end)
5475 for (Object** p = start; !done && (p < end); p++) {
5476 if ((*p)->IsHeapObject()) {
5477 TracePathFrom(p);
5478 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5479 }
5480 }
5481}
5482
5483
5484void PathTracer::Reset() {
5485 found_target_ = false;
5486 object_stack_.Clear();
5487}
5488
5489
5490void PathTracer::TracePathFrom(Object** root) {
5491 ASSERT((search_target_ == kAnyGlobalObject) ||
5492 search_target_->IsHeapObject());
5493 found_target_in_trace_ = false;
5494 object_stack_.Clear();
5495
5496 MarkVisitor mark_visitor(this);
5497 MarkRecursively(root, &mark_visitor);
5498
5499 UnmarkVisitor unmark_visitor(this);
5500 UnmarkRecursively(root, &unmark_visitor);
5501
5502 ProcessResults();
5503}
5504
5505
5506void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005507 if (!(*p)->IsHeapObject()) return;
5508
5509 HeapObject* obj = HeapObject::cast(*p);
5510
5511 Object* map = obj->map();
5512
5513 if (!map->IsHeapObject()) return; // visited before
5514
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005515 if (found_target_in_trace_) return; // stop if target found
5516 object_stack_.Add(obj);
5517 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5518 (obj == search_target_)) {
5519 found_target_in_trace_ = true;
5520 found_target_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00005521 return;
5522 }
5523
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005524 bool is_global_context = obj->IsGlobalContext();
5525
Steve Blocka7e24c12009-10-30 11:49:00 +00005526 // not visited yet
5527 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5528
5529 Address map_addr = map_p->address();
5530
5531 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5532
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005533 // Scan the object body.
5534 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5535 // This is specialized to scan Context's properly.
5536 Object** start = reinterpret_cast<Object**>(obj->address() +
5537 Context::kHeaderSize);
5538 Object** end = reinterpret_cast<Object**>(obj->address() +
5539 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5540 mark_visitor->VisitPointers(start, end);
5541 } else {
5542 obj->IterateBody(map_p->instance_type(),
5543 obj->SizeFromMap(map_p),
5544 mark_visitor);
5545 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005546
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005547 // Scan the map after the body because the body is a lot more interesting
5548 // when doing leak detection.
5549 MarkRecursively(&map, mark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005550
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005551 if (!found_target_in_trace_) // don't pop if found the target
5552 object_stack_.RemoveLast();
Steve Blocka7e24c12009-10-30 11:49:00 +00005553}
5554
5555
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005556void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005557 if (!(*p)->IsHeapObject()) return;
5558
5559 HeapObject* obj = HeapObject::cast(*p);
5560
5561 Object* map = obj->map();
5562
5563 if (map->IsHeapObject()) return; // unmarked already
5564
5565 Address map_addr = reinterpret_cast<Address>(map);
5566
5567 map_addr -= kMarkTag;
5568
5569 ASSERT_TAG_ALIGNED(map_addr);
5570
5571 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5572
5573 obj->set_map(reinterpret_cast<Map*>(map_p));
5574
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005575 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005576
5577 obj->IterateBody(Map::cast(map_p)->instance_type(),
5578 obj->SizeFromMap(Map::cast(map_p)),
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005579 unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005580}
5581
5582
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005583void PathTracer::ProcessResults() {
5584 if (found_target_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005585 PrintF("=====================================\n");
5586 PrintF("==== Path to object ====\n");
5587 PrintF("=====================================\n\n");
5588
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005589 ASSERT(!object_stack_.is_empty());
5590 for (int i = 0; i < object_stack_.length(); i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005591 if (i > 0) PrintF("\n |\n |\n V\n\n");
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005592 Object* obj = object_stack_[i];
5593#ifdef OBJECT_PRINT
Steve Blocka7e24c12009-10-30 11:49:00 +00005594 obj->Print();
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005595#else
5596 obj->ShortPrint();
5597#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005598 }
5599 PrintF("=====================================\n");
5600 }
5601}
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005602#endif // DEBUG || LIVE_OBJECT_LIST
Steve Blocka7e24c12009-10-30 11:49:00 +00005603
5604
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005605#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00005606// Triggers a depth-first traversal of reachable objects from roots
5607// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005608void Heap::TracePathToObject(Object* target) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005609 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5610 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005611}
5612
5613
5614// Triggers a depth-first traversal of reachable objects from roots
5615// and finds a path to any global object and prints it. Useful for
5616// determining the source for leaks of global objects.
5617void Heap::TracePathToGlobal() {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005618 PathTracer tracer(PathTracer::kAnyGlobalObject,
5619 PathTracer::FIND_ALL,
5620 VISIT_ALL);
5621 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005622}
5623#endif
5624
5625
Ben Murdochf87a2032010-10-22 12:50:53 +01005626static intptr_t CountTotalHolesSize() {
5627 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005628 OldSpaces spaces;
5629 for (OldSpace* space = spaces.next();
5630 space != NULL;
5631 space = spaces.next()) {
5632 holes_size += space->Waste() + space->AvailableFree();
5633 }
5634 return holes_size;
5635}
5636
5637
Steve Block44f0eee2011-05-26 01:26:41 +01005638GCTracer::GCTracer(Heap* heap)
Steve Blocka7e24c12009-10-30 11:49:00 +00005639 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005640 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005641 gc_count_(0),
5642 full_gc_count_(0),
5643 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005644 marked_count_(0),
5645 allocated_since_last_gc_(0),
5646 spent_in_mutator_(0),
Steve Block44f0eee2011-05-26 01:26:41 +01005647 promoted_objects_size_(0),
5648 heap_(heap) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005649 // These two fields reflect the state of the previous full collection.
5650 // Set them before they are changed by the collector.
Steve Block44f0eee2011-05-26 01:26:41 +01005651 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
5652 previous_marked_count_ =
5653 heap_->mark_compact_collector_.previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005654 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005655 start_time_ = OS::TimeCurrentMillis();
Steve Block44f0eee2011-05-26 01:26:41 +01005656 start_size_ = heap_->SizeOfObjects();
Leon Clarkef7060e22010-06-03 12:02:55 +01005657
5658 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5659 scopes_[i] = 0;
5660 }
5661
5662 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5663
Steve Block44f0eee2011-05-26 01:26:41 +01005664 allocated_since_last_gc_ =
5665 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01005666
Steve Block44f0eee2011-05-26 01:26:41 +01005667 if (heap_->last_gc_end_timestamp_ > 0) {
5668 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005669 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005670}
5671
5672
5673GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005674 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005675 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5676
Steve Block44f0eee2011-05-26 01:26:41 +01005677 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005678
Steve Block44f0eee2011-05-26 01:26:41 +01005679 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
5680 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005681
Steve Block44f0eee2011-05-26 01:26:41 +01005682 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005683
5684 // Update cumulative GC statistics if required.
5685 if (FLAG_print_cumulative_gc_stat) {
Steve Block44f0eee2011-05-26 01:26:41 +01005686 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
5687 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
5688 heap_->alive_after_last_gc_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005689 if (!first_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01005690 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
5691 static_cast<int>(spent_in_mutator_));
Leon Clarkef7060e22010-06-03 12:02:55 +01005692 }
5693 }
5694
5695 if (!FLAG_trace_gc_nvp) {
5696 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5697
5698 PrintF("%s %.1f -> %.1f MB, ",
5699 CollectorString(),
5700 static_cast<double>(start_size_) / MB,
5701 SizeOfHeapObjects());
5702
5703 if (external_time > 0) PrintF("%d / ", external_time);
5704 PrintF("%d ms.\n", time);
5705 } else {
5706 PrintF("pause=%d ", time);
5707 PrintF("mutator=%d ",
5708 static_cast<int>(spent_in_mutator_));
5709
5710 PrintF("gc=");
5711 switch (collector_) {
5712 case SCAVENGER:
5713 PrintF("s");
5714 break;
5715 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005716 PrintF("%s",
5717 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
Leon Clarkef7060e22010-06-03 12:02:55 +01005718 break;
5719 default:
5720 UNREACHABLE();
5721 }
5722 PrintF(" ");
5723
5724 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5725 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5726 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005727 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005728 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5729
Ben Murdochf87a2032010-10-22 12:50:53 +01005730 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
Steve Block44f0eee2011-05-26 01:26:41 +01005731 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
Ben Murdochf87a2032010-10-22 12:50:53 +01005732 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5733 in_free_list_or_wasted_before_gc_);
5734 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005735
Ben Murdochf87a2032010-10-22 12:50:53 +01005736 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5737 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005738
5739 PrintF("\n");
5740 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005741
5742#if defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01005743 heap_->PrintShortHeapStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00005744#endif
5745}
5746
5747
5748const char* GCTracer::CollectorString() {
5749 switch (collector_) {
5750 case SCAVENGER:
5751 return "Scavenge";
5752 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005753 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
5754 : "Mark-sweep";
Steve Blocka7e24c12009-10-30 11:49:00 +00005755 }
5756 return "Unknown GC";
5757}
5758
5759
5760int KeyedLookupCache::Hash(Map* map, String* name) {
5761 // Uses only lower 32 bits if pointers are larger.
5762 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005763 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005764 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005765}
5766
5767
5768int KeyedLookupCache::Lookup(Map* map, String* name) {
5769 int index = Hash(map, name);
5770 Key& key = keys_[index];
5771 if ((key.map == map) && key.name->Equals(name)) {
5772 return field_offsets_[index];
5773 }
Steve Block44f0eee2011-05-26 01:26:41 +01005774 return kNotFound;
Steve Blocka7e24c12009-10-30 11:49:00 +00005775}
5776
5777
5778void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5779 String* symbol;
Steve Block44f0eee2011-05-26 01:26:41 +01005780 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005781 int index = Hash(map, symbol);
5782 Key& key = keys_[index];
5783 key.map = map;
5784 key.name = symbol;
5785 field_offsets_[index] = field_offset;
5786 }
5787}
5788
5789
5790void KeyedLookupCache::Clear() {
5791 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5792}
5793
5794
Steve Blocka7e24c12009-10-30 11:49:00 +00005795void DescriptorLookupCache::Clear() {
5796 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5797}
5798
5799
Steve Blocka7e24c12009-10-30 11:49:00 +00005800#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005801void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005802 ASSERT(FLAG_gc_greedy);
Steve Block44f0eee2011-05-26 01:26:41 +01005803 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +01005804 if (disallow_allocation_failure()) return;
5805 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005806}
5807#endif
5808
5809
Steve Block44f0eee2011-05-26 01:26:41 +01005810TranscendentalCache::SubCache::SubCache(Type t)
5811 : type_(t),
5812 isolate_(Isolate::Current()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005813 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5814 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5815 for (int i = 0; i < kCacheSize; i++) {
5816 elements_[i].in[0] = in0;
5817 elements_[i].in[1] = in1;
5818 elements_[i].output = NULL;
5819 }
5820}
5821
5822
Steve Blocka7e24c12009-10-30 11:49:00 +00005823void TranscendentalCache::Clear() {
5824 for (int i = 0; i < kNumberOfCaches; i++) {
5825 if (caches_[i] != NULL) {
5826 delete caches_[i];
5827 caches_[i] = NULL;
5828 }
5829 }
5830}
5831
5832
Leon Clarkee46be812010-01-19 14:06:41 +00005833void ExternalStringTable::CleanUp() {
5834 int last = 0;
5835 for (int i = 0; i < new_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005836 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5837 if (heap_->InNewSpace(new_space_strings_[i])) {
Leon Clarkee46be812010-01-19 14:06:41 +00005838 new_space_strings_[last++] = new_space_strings_[i];
5839 } else {
5840 old_space_strings_.Add(new_space_strings_[i]);
5841 }
5842 }
5843 new_space_strings_.Rewind(last);
5844 last = 0;
5845 for (int i = 0; i < old_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005846 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5847 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
Leon Clarkee46be812010-01-19 14:06:41 +00005848 old_space_strings_[last++] = old_space_strings_[i];
5849 }
5850 old_space_strings_.Rewind(last);
5851 Verify();
5852}
5853
5854
5855void ExternalStringTable::TearDown() {
5856 new_space_strings_.Free();
5857 old_space_strings_.Free();
5858}
5859
5860
Steve Blocka7e24c12009-10-30 11:49:00 +00005861} } // namespace v8::internal