blob: 0b4abf30f9a4413dc29132b9c778fb9d82653a95 [file] [log] [blame]
Ben Murdoch8b112d22011-06-08 16:22:53 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
Ben Murdoch8b112d22011-06-08 16:22:53 +010033#include "codegen.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
Steve Block1e0659c2011-05-24 12:43:12 +010038#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "mark-compact.h"
40#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010041#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010042#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080043#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000045#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000046#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010047#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010048#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000049#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000050#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000051#endif
Steve Block44f0eee2011-05-26 01:26:41 +010052#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
53#include "regexp-macro-assembler.h"
54#include "mips/regexp-macro-assembler-mips.h"
55#endif
Steve Block6ded16b2010-05-10 14:33:55 +010056
Steve Blocka7e24c12009-10-30 11:49:00 +000057namespace v8 {
58namespace internal {
59
60
John Reck59135872010-11-02 12:39:01 -070061static const intptr_t kMinimumPromotionLimit = 2 * MB;
62static const intptr_t kMinimumAllocationLimit = 8 * MB;
63
Steve Blocka7e24c12009-10-30 11:49:00 +000064
Steve Block44f0eee2011-05-26 01:26:41 +010065static Mutex* gc_initializer_mutex = OS::CreateMutex();
Steve Blocka7e24c12009-10-30 11:49:00 +000066
Steve Blocka7e24c12009-10-30 11:49:00 +000067
Steve Block44f0eee2011-05-26 01:26:41 +010068Heap::Heap()
69 : isolate_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +000070// semispace_size_ should be a power of 2 and old_generation_size_ should be
71// a multiple of Page::kPageSize.
72#if defined(ANDROID)
Steve Block44f0eee2011-05-26 01:26:41 +010073 reserved_semispace_size_(2*MB),
74 max_semispace_size_(2*MB),
75 initial_semispace_size_(128*KB),
76 max_old_generation_size_(192*MB),
77 max_executable_size_(max_old_generation_size_),
78 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000079#elif defined(V8_TARGET_ARCH_X64)
Steve Block44f0eee2011-05-26 01:26:41 +010080 reserved_semispace_size_(16*MB),
81 max_semispace_size_(16*MB),
82 initial_semispace_size_(1*MB),
83 max_old_generation_size_(1*GB),
84 max_executable_size_(256*MB),
85 code_range_size_(512*MB),
Steve Blocka7e24c12009-10-30 11:49:00 +000086#else
Steve Block44f0eee2011-05-26 01:26:41 +010087 reserved_semispace_size_(8*MB),
88 max_semispace_size_(8*MB),
89 initial_semispace_size_(512*KB),
90 max_old_generation_size_(512*MB),
91 max_executable_size_(128*MB),
92 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000093#endif
Steve Blocka7e24c12009-10-30 11:49:00 +000094// Variables set based on semispace_size_ and old_generation_size_ in
Steve Block44f0eee2011-05-26 01:26:41 +010095// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
Steve Block3ce2e202009-11-05 08:53:23 +000096// Will be 4 * reserved_semispace_size_ to ensure that young
97// generation can be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +010098 survived_since_last_expansion_(0),
99 always_allocate_scope_depth_(0),
100 linear_allocation_scope_depth_(0),
101 contexts_disposed_(0),
102 new_space_(this),
103 old_pointer_space_(NULL),
104 old_data_space_(NULL),
105 code_space_(NULL),
106 map_space_(NULL),
107 cell_space_(NULL),
108 lo_space_(NULL),
109 gc_state_(NOT_IN_GC),
110 mc_count_(0),
111 ms_count_(0),
112 gc_count_(0),
113 unflattened_strings_length_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000114#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100115 allocation_allowed_(true),
116 allocation_timeout_(0),
117 disallow_allocation_failure_(false),
118 debug_utils_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000119#endif // DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100120 old_gen_promotion_limit_(kMinimumPromotionLimit),
121 old_gen_allocation_limit_(kMinimumAllocationLimit),
122 external_allocation_limit_(0),
123 amount_of_external_allocated_memory_(0),
124 amount_of_external_allocated_memory_at_last_global_gc_(0),
125 old_gen_exhausted_(false),
126 hidden_symbol_(NULL),
127 global_gc_prologue_callback_(NULL),
128 global_gc_epilogue_callback_(NULL),
129 gc_safe_size_of_old_object_(NULL),
Steve Block053d10c2011-06-13 19:13:29 +0100130 total_regexp_code_generated_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100131 tracer_(NULL),
132 young_survivors_after_last_gc_(0),
133 high_survival_rate_period_length_(0),
134 survival_rate_(0),
135 previous_survival_rate_trend_(Heap::STABLE),
136 survival_rate_trend_(Heap::STABLE),
137 max_gc_pause_(0),
138 max_alive_after_gc_(0),
139 min_in_mutator_(kMaxInt),
140 alive_after_last_gc_(0),
141 last_gc_end_timestamp_(0.0),
142 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
143 number_idle_notifications_(0),
144 last_idle_notification_gc_count_(0),
145 last_idle_notification_gc_count_init_(false),
146 configured_(false),
147 is_safe_to_read_maps_(true) {
148 // Allow build-time customization of the max semispace size. Building
149 // V8 with snapshots and a non-default max semispace size is much
150 // easier if you can define it as part of the build environment.
151#if defined(V8_MAX_SEMISPACE_SIZE)
152 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
153#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000154
Steve Block44f0eee2011-05-26 01:26:41 +0100155 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
156 global_contexts_list_ = NULL;
157 mark_compact_collector_.heap_ = this;
158 external_string_table_.heap_ = this;
159}
160
Steve Blocka7e24c12009-10-30 11:49:00 +0000161
Ben Murdochf87a2032010-10-22 12:50:53 +0100162intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000163 if (!HasBeenSetup()) return 0;
164
165 return new_space_.Capacity() +
166 old_pointer_space_->Capacity() +
167 old_data_space_->Capacity() +
168 code_space_->Capacity() +
169 map_space_->Capacity() +
170 cell_space_->Capacity();
171}
172
173
Ben Murdochf87a2032010-10-22 12:50:53 +0100174intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000175 if (!HasBeenSetup()) return 0;
176
177 return new_space_.CommittedMemory() +
178 old_pointer_space_->CommittedMemory() +
179 old_data_space_->CommittedMemory() +
180 code_space_->CommittedMemory() +
181 map_space_->CommittedMemory() +
182 cell_space_->CommittedMemory() +
183 lo_space_->Size();
184}
185
Russell Brenner90bac252010-11-18 13:33:46 -0800186intptr_t Heap::CommittedMemoryExecutable() {
187 if (!HasBeenSetup()) return 0;
188
Steve Block44f0eee2011-05-26 01:26:41 +0100189 return isolate()->memory_allocator()->SizeExecutable();
Russell Brenner90bac252010-11-18 13:33:46 -0800190}
191
Steve Block3ce2e202009-11-05 08:53:23 +0000192
Ben Murdochf87a2032010-10-22 12:50:53 +0100193intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000194 if (!HasBeenSetup()) return 0;
195
196 return new_space_.Available() +
197 old_pointer_space_->Available() +
198 old_data_space_->Available() +
199 code_space_->Available() +
200 map_space_->Available() +
201 cell_space_->Available();
202}
203
204
205bool Heap::HasBeenSetup() {
206 return old_pointer_space_ != NULL &&
207 old_data_space_ != NULL &&
208 code_space_ != NULL &&
209 map_space_ != NULL &&
210 cell_space_ != NULL &&
211 lo_space_ != NULL;
212}
213
214
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100215int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100216 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
217 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100218 MapWord map_word = object->map_word();
219 map_word.ClearMark();
220 map_word.ClearOverflow();
221 return object->SizeFromMap(map_word.ToMap());
222}
223
224
225int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100226 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
227 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100228 uint32_t marker = Memory::uint32_at(object->address());
229 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
230 return kIntSize;
231 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
232 return Memory::int_at(object->address() + kIntSize);
233 } else {
234 MapWord map_word = object->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +0100235 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100236 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
237 return object->SizeFromMap(map);
238 }
239}
240
241
Steve Blocka7e24c12009-10-30 11:49:00 +0000242GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
243 // Is global GC requested?
244 if (space != NEW_SPACE || FLAG_gc_global) {
Steve Block44f0eee2011-05-26 01:26:41 +0100245 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000246 return MARK_COMPACTOR;
247 }
248
249 // Is enough data promoted to justify a global GC?
250 if (OldGenerationPromotionLimitReached()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100251 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000252 return MARK_COMPACTOR;
253 }
254
255 // Have allocation in OLD and LO failed?
256 if (old_gen_exhausted_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100257 isolate_->counters()->
258 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000259 return MARK_COMPACTOR;
260 }
261
262 // Is there enough space left in OLD to guarantee that a scavenge can
263 // succeed?
264 //
265 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
266 // for object promotion. It counts only the bytes that the memory
267 // allocator has not yet allocated from the OS and assigned to any space,
268 // and does not count available bytes already in the old space or code
269 // space. Undercounting is safe---we may get an unrequested full GC when
270 // a scavenge would have succeeded.
Steve Block44f0eee2011-05-26 01:26:41 +0100271 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
272 isolate_->counters()->
273 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000274 return MARK_COMPACTOR;
275 }
276
277 // Default
278 return SCAVENGER;
279}
280
281
282// TODO(1238405): Combine the infrastructure for --heap-stats and
283// --log-gc to avoid the complicated preprocessor and flag testing.
284#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
285void Heap::ReportStatisticsBeforeGC() {
286 // Heap::ReportHeapStatistics will also log NewSpace statistics when
287 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
288 // following logic is used to avoid double logging.
289#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
290 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
291 if (FLAG_heap_stats) {
292 ReportHeapStatistics("Before GC");
293 } else if (FLAG_log_gc) {
294 new_space_.ReportStatistics();
295 }
296 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
297#elif defined(DEBUG)
298 if (FLAG_heap_stats) {
299 new_space_.CollectStatistics();
300 ReportHeapStatistics("Before GC");
301 new_space_.ClearHistograms();
302 }
303#elif defined(ENABLE_LOGGING_AND_PROFILING)
304 if (FLAG_log_gc) {
305 new_space_.CollectStatistics();
306 new_space_.ReportStatistics();
307 new_space_.ClearHistograms();
308 }
309#endif
310}
311
312
313#if defined(ENABLE_LOGGING_AND_PROFILING)
314void Heap::PrintShortHeapStatistics() {
315 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100316 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
317 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block44f0eee2011-05-26 01:26:41 +0100318 isolate_->memory_allocator()->Size(),
319 isolate_->memory_allocator()->Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100320 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
321 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000322 Heap::new_space_.Size(),
323 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100324 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
325 ", available: %8" V8_PTR_PREFIX "d"
326 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000327 old_pointer_space_->Size(),
328 old_pointer_space_->Available(),
329 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100330 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
331 ", available: %8" V8_PTR_PREFIX "d"
332 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000333 old_data_space_->Size(),
334 old_data_space_->Available(),
335 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100336 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
337 ", available: %8" V8_PTR_PREFIX "d"
338 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000339 code_space_->Size(),
340 code_space_->Available(),
341 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100342 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
343 ", available: %8" V8_PTR_PREFIX "d"
344 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000345 map_space_->Size(),
346 map_space_->Available(),
347 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100348 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
349 ", available: %8" V8_PTR_PREFIX "d"
350 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000351 cell_space_->Size(),
352 cell_space_->Available(),
353 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100354 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
355 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000356 lo_space_->Size(),
357 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000358}
359#endif
360
361
362// TODO(1238405): Combine the infrastructure for --heap-stats and
363// --log-gc to avoid the complicated preprocessor and flag testing.
364void Heap::ReportStatisticsAfterGC() {
365 // Similar to the before GC, we use some complicated logic to ensure that
366 // NewSpace statistics are logged exactly once when --log-gc is turned on.
367#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
368 if (FLAG_heap_stats) {
369 new_space_.CollectStatistics();
370 ReportHeapStatistics("After GC");
371 } else if (FLAG_log_gc) {
372 new_space_.ReportStatistics();
373 }
374#elif defined(DEBUG)
375 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
376#elif defined(ENABLE_LOGGING_AND_PROFILING)
377 if (FLAG_log_gc) new_space_.ReportStatistics();
378#endif
379}
380#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
381
382
383void Heap::GarbageCollectionPrologue() {
Steve Block44f0eee2011-05-26 01:26:41 +0100384 isolate_->transcendental_cache()->Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100385 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000386 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100387 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000388#ifdef DEBUG
389 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
390 allow_allocation(false);
391
392 if (FLAG_verify_heap) {
393 Verify();
394 }
395
396 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000397#endif
398
399#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
400 ReportStatisticsBeforeGC();
401#endif
Steve Block1e0659c2011-05-24 12:43:12 +0100402
403 LiveObjectList::GCPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000404}
405
Ben Murdochf87a2032010-10-22 12:50:53 +0100406intptr_t Heap::SizeOfObjects() {
407 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000408 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000409 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800410 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000411 }
412 return total;
413}
414
415void Heap::GarbageCollectionEpilogue() {
Steve Block1e0659c2011-05-24 12:43:12 +0100416 LiveObjectList::GCEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000417#ifdef DEBUG
418 allow_allocation(true);
419 ZapFromSpace();
420
421 if (FLAG_verify_heap) {
422 Verify();
423 }
424
Steve Block44f0eee2011-05-26 01:26:41 +0100425 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000426 if (FLAG_print_handles) PrintHandles();
427 if (FLAG_gc_verbose) Print();
428 if (FLAG_code_stats) ReportCodeStatistics("After GC");
429#endif
430
Steve Block44f0eee2011-05-26 01:26:41 +0100431 isolate_->counters()->alive_after_last_gc()->Set(
432 static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000433
Steve Block44f0eee2011-05-26 01:26:41 +0100434 isolate_->counters()->symbol_table_capacity()->Set(
435 symbol_table()->Capacity());
436 isolate_->counters()->number_of_symbols()->Set(
437 symbol_table()->NumberOfElements());
Steve Blocka7e24c12009-10-30 11:49:00 +0000438#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
439 ReportStatisticsAfterGC();
440#endif
441#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100442 isolate_->debug()->AfterGarbageCollection();
Steve Blocka7e24c12009-10-30 11:49:00 +0000443#endif
444}
445
446
John Reck59135872010-11-02 12:39:01 -0700447void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000448 // Since we are ignoring the return value, the exact choice of space does
449 // not matter, so long as we do not specify NEW_SPACE, which would not
450 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100451 mark_compact_collector_.SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700452 CollectGarbage(OLD_POINTER_SPACE);
Steve Block44f0eee2011-05-26 01:26:41 +0100453 mark_compact_collector_.SetForceCompaction(false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000454}
455
456
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800457void Heap::CollectAllAvailableGarbage() {
458 // Since we are ignoring the return value, the exact choice of space does
459 // not matter, so long as we do not specify NEW_SPACE, which would not
460 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100461 mark_compact_collector()->SetForceCompaction(true);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800462
463 // Major GC would invoke weak handle callbacks on weakly reachable
464 // handles, but won't collect weakly reachable objects until next
465 // major GC. Therefore if we collect aggressively and weak handle callback
466 // has been invoked, we rerun major GC to release objects which become
467 // garbage.
468 // Note: as weak callbacks can execute arbitrary code, we cannot
469 // hope that eventually there will be no weak callbacks invocations.
470 // Therefore stop recollecting after several attempts.
471 const int kMaxNumberOfAttempts = 7;
472 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
473 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
474 break;
475 }
476 }
Steve Block44f0eee2011-05-26 01:26:41 +0100477 mark_compact_collector()->SetForceCompaction(false);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800478}
479
480
481bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000482 // The VM is in the GC state until exiting this function.
Steve Block44f0eee2011-05-26 01:26:41 +0100483 VMState state(isolate_, GC);
Steve Blocka7e24c12009-10-30 11:49:00 +0000484
485#ifdef DEBUG
486 // Reset the allocation timeout to the GC interval, but make sure to
487 // allow at least a few allocations after a collection. The reason
488 // for this is that we have a lot of allocation sequences and we
489 // assume that a garbage collection will allow the subsequent
490 // allocation attempts to go through.
491 allocation_timeout_ = Max(6, FLAG_gc_interval);
492#endif
493
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800494 bool next_gc_likely_to_collect_more = false;
495
Steve Block44f0eee2011-05-26 01:26:41 +0100496 { GCTracer tracer(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000497 GarbageCollectionPrologue();
498 // The GC count was incremented in the prologue. Tell the tracer about
499 // it.
500 tracer.set_gc_count(gc_count_);
501
Steve Blocka7e24c12009-10-30 11:49:00 +0000502 // Tell the tracer which collector we've selected.
503 tracer.set_collector(collector);
504
505 HistogramTimer* rate = (collector == SCAVENGER)
Steve Block44f0eee2011-05-26 01:26:41 +0100506 ? isolate_->counters()->gc_scavenger()
507 : isolate_->counters()->gc_compactor();
Steve Blocka7e24c12009-10-30 11:49:00 +0000508 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800509 next_gc_likely_to_collect_more =
510 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000511 rate->Stop();
512
513 GarbageCollectionEpilogue();
514 }
515
516
517#ifdef ENABLE_LOGGING_AND_PROFILING
518 if (FLAG_log_gc) HeapProfiler::WriteSample();
519#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800520
521 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000522}
523
524
525void Heap::PerformScavenge() {
Steve Block44f0eee2011-05-26 01:26:41 +0100526 GCTracer tracer(this);
John Reck59135872010-11-02 12:39:01 -0700527 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000528}
529
530
531#ifdef DEBUG
532// Helper class for verifying the symbol table.
533class SymbolTableVerifier : public ObjectVisitor {
534 public:
Steve Blocka7e24c12009-10-30 11:49:00 +0000535 void VisitPointers(Object** start, Object** end) {
536 // Visit all HeapObject pointers in [start, end).
537 for (Object** p = start; p < end; p++) {
538 if ((*p)->IsHeapObject()) {
539 // Check that the symbol is actually a symbol.
540 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
541 }
542 }
543 }
544};
545#endif // DEBUG
546
547
548static void VerifySymbolTable() {
549#ifdef DEBUG
550 SymbolTableVerifier verifier;
Steve Block44f0eee2011-05-26 01:26:41 +0100551 HEAP->symbol_table()->IterateElements(&verifier);
Steve Blocka7e24c12009-10-30 11:49:00 +0000552#endif // DEBUG
553}
554
555
Leon Clarkee46be812010-01-19 14:06:41 +0000556void Heap::ReserveSpace(
557 int new_space_size,
558 int pointer_space_size,
559 int data_space_size,
560 int code_space_size,
561 int map_space_size,
562 int cell_space_size,
563 int large_object_size) {
564 NewSpace* new_space = Heap::new_space();
565 PagedSpace* old_pointer_space = Heap::old_pointer_space();
566 PagedSpace* old_data_space = Heap::old_data_space();
567 PagedSpace* code_space = Heap::code_space();
568 PagedSpace* map_space = Heap::map_space();
569 PagedSpace* cell_space = Heap::cell_space();
570 LargeObjectSpace* lo_space = Heap::lo_space();
571 bool gc_performed = true;
572 while (gc_performed) {
573 gc_performed = false;
574 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100575 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000576 gc_performed = true;
577 }
578 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100579 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000580 gc_performed = true;
581 }
582 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100583 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000584 gc_performed = true;
585 }
586 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100587 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000588 gc_performed = true;
589 }
590 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100591 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000592 gc_performed = true;
593 }
594 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100595 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000596 gc_performed = true;
597 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100598 // We add a slack-factor of 2 in order to have space for a series of
599 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000600 large_object_size *= 2;
601 // The ReserveSpace method on the large object space checks how much
602 // we can expand the old generation. This includes expansion caused by
603 // allocation in the other spaces.
604 large_object_size += cell_space_size + map_space_size + code_space_size +
605 data_space_size + pointer_space_size;
606 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100607 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000608 gc_performed = true;
609 }
610 }
611}
612
613
Steve Blocka7e24c12009-10-30 11:49:00 +0000614void Heap::EnsureFromSpaceIsCommitted() {
615 if (new_space_.CommitFromSpaceIfNeeded()) return;
616
617 // Committing memory to from space failed.
618 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100619 PagedSpaces spaces;
620 for (PagedSpace* space = spaces.next();
621 space != NULL;
622 space = spaces.next()) {
623 space->RelinkPageListInChunkOrder(true);
624 }
625
Steve Blocka7e24c12009-10-30 11:49:00 +0000626 Shrink();
627 if (new_space_.CommitFromSpaceIfNeeded()) return;
628
629 // Committing memory to from space failed again.
630 // Memory is exhausted and we will die.
631 V8::FatalProcessOutOfMemory("Committing semi space failed.");
632}
633
634
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800635void Heap::ClearJSFunctionResultCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100636 if (isolate_->bootstrapper()->IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100637
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800638 Object* context = global_contexts_list_;
639 while (!context->IsUndefined()) {
640 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100641 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800642 Context::cast(context)->jsfunction_result_caches();
643 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100644 int length = caches->length();
645 for (int i = 0; i < length; i++) {
646 JSFunctionResultCache::cast(caches->get(i))->Clear();
647 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800648 // Get the next context:
649 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100650 }
Steve Block6ded16b2010-05-10 14:33:55 +0100651}
652
653
Steve Block44f0eee2011-05-26 01:26:41 +0100654
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100655void Heap::ClearNormalizedMapCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100656 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100657
658 Object* context = global_contexts_list_;
659 while (!context->IsUndefined()) {
660 Context::cast(context)->normalized_map_cache()->Clear();
661 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
662 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100663}
664
665
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100666#ifdef DEBUG
667
668enum PageWatermarkValidity {
669 ALL_VALID,
670 ALL_INVALID
671};
672
673static void VerifyPageWatermarkValidity(PagedSpace* space,
674 PageWatermarkValidity validity) {
675 PageIterator it(space, PageIterator::PAGES_IN_USE);
676 bool expected_value = (validity == ALL_VALID);
677 while (it.has_next()) {
678 Page* page = it.next();
679 ASSERT(page->IsWatermarkValid() == expected_value);
680 }
681}
682#endif
683
Steve Block8defd9f2010-07-08 12:39:36 +0100684void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
685 double survival_rate =
686 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
687 start_new_space_size;
688
689 if (survival_rate > kYoungSurvivalRateThreshold) {
690 high_survival_rate_period_length_++;
691 } else {
692 high_survival_rate_period_length_ = 0;
693 }
694
695 double survival_rate_diff = survival_rate_ - survival_rate;
696
697 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
698 set_survival_rate_trend(DECREASING);
699 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
700 set_survival_rate_trend(INCREASING);
701 } else {
702 set_survival_rate_trend(STABLE);
703 }
704
705 survival_rate_ = survival_rate;
706}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100707
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800708bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700709 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800710 bool next_gc_likely_to_collect_more = false;
711
Ben Murdochf87a2032010-10-22 12:50:53 +0100712 if (collector != SCAVENGER) {
Steve Block44f0eee2011-05-26 01:26:41 +0100713 PROFILE(isolate_, CodeMovingGCEvent());
Ben Murdochf87a2032010-10-22 12:50:53 +0100714 }
715
Steve Blocka7e24c12009-10-30 11:49:00 +0000716 VerifySymbolTable();
717 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
718 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100719 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000720 global_gc_prologue_callback_();
721 }
Steve Block6ded16b2010-05-10 14:33:55 +0100722
723 GCType gc_type =
724 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
725
726 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
727 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
728 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
729 }
730 }
731
Steve Blocka7e24c12009-10-30 11:49:00 +0000732 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100733
Ben Murdochf87a2032010-10-22 12:50:53 +0100734 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100735
Steve Blocka7e24c12009-10-30 11:49:00 +0000736 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100737 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000738 MarkCompact(tracer);
739
Steve Block8defd9f2010-07-08 12:39:36 +0100740 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
741 IsStableOrIncreasingSurvivalTrend();
742
743 UpdateSurvivalRateTrend(start_new_space_size);
744
John Reck59135872010-11-02 12:39:01 -0700745 intptr_t old_gen_size = PromotedSpaceSize();
746 old_gen_promotion_limit_ =
747 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
748 old_gen_allocation_limit_ =
749 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100750
John Reck59135872010-11-02 12:39:01 -0700751 if (high_survival_rate_during_scavenges &&
752 IsStableOrIncreasingSurvivalTrend()) {
753 // Stable high survival rates of young objects both during partial and
754 // full collection indicate that mutator is either building or modifying
755 // a structure with a long lifetime.
756 // In this case we aggressively raise old generation memory limits to
757 // postpone subsequent mark-sweep collection and thus trade memory
758 // space for the mutation speed.
759 old_gen_promotion_limit_ *= 2;
760 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100761 }
762
John Reck59135872010-11-02 12:39:01 -0700763 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100764 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100765 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100766 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100767 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100768
769 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000770 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000771
Steve Block44f0eee2011-05-26 01:26:41 +0100772 isolate_->counters()->objs_since_last_young()->Set(0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000773
John Reck59135872010-11-02 12:39:01 -0700774 if (collector == MARK_COMPACTOR) {
775 DisableAssertNoAllocation allow_allocation;
776 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800777 next_gc_likely_to_collect_more =
Steve Block44f0eee2011-05-26 01:26:41 +0100778 isolate_->global_handles()->PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700779 }
780
Steve Block3ce2e202009-11-05 08:53:23 +0000781 // Update relocatables.
782 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000783
784 if (collector == MARK_COMPACTOR) {
785 // Register the amount of external allocated memory.
786 amount_of_external_allocated_memory_at_last_global_gc_ =
787 amount_of_external_allocated_memory_;
788 }
789
Steve Block6ded16b2010-05-10 14:33:55 +0100790 GCCallbackFlags callback_flags = tracer->is_compacting()
791 ? kGCCallbackFlagCompacted
792 : kNoGCCallbackFlags;
793 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
794 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
795 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
796 }
797 }
798
Steve Blocka7e24c12009-10-30 11:49:00 +0000799 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
800 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100801 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000802 global_gc_epilogue_callback_();
803 }
804 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800805
806 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000807}
808
809
Steve Blocka7e24c12009-10-30 11:49:00 +0000810void Heap::MarkCompact(GCTracer* tracer) {
811 gc_state_ = MARK_COMPACT;
Steve Block44f0eee2011-05-26 01:26:41 +0100812 LOG(isolate_, ResourceEvent("markcompact", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000813
Steve Block44f0eee2011-05-26 01:26:41 +0100814 mark_compact_collector_.Prepare(tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000815
Steve Block44f0eee2011-05-26 01:26:41 +0100816 bool is_compacting = mark_compact_collector_.IsCompacting();
Steve Blocka7e24c12009-10-30 11:49:00 +0000817
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100818 if (is_compacting) {
819 mc_count_++;
820 } else {
821 ms_count_++;
822 }
823 tracer->set_full_gc_count(mc_count_ + ms_count_);
824
Steve Blocka7e24c12009-10-30 11:49:00 +0000825 MarkCompactPrologue(is_compacting);
826
Steve Block44f0eee2011-05-26 01:26:41 +0100827 is_safe_to_read_maps_ = false;
828 mark_compact_collector_.CollectGarbage();
829 is_safe_to_read_maps_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000830
Steve Block44f0eee2011-05-26 01:26:41 +0100831 LOG(isolate_, ResourceEvent("markcompact", "end"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000832
833 gc_state_ = NOT_IN_GC;
834
835 Shrink();
836
Steve Block44f0eee2011-05-26 01:26:41 +0100837 isolate_->counters()->objs_since_last_full()->Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100838
839 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000840}
841
842
843void Heap::MarkCompactPrologue(bool is_compacting) {
844 // At any old GC clear the keyed lookup cache to enable collection of unused
845 // maps.
Steve Block44f0eee2011-05-26 01:26:41 +0100846 isolate_->keyed_lookup_cache()->Clear();
847 isolate_->context_slot_cache()->Clear();
848 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000849
Steve Block44f0eee2011-05-26 01:26:41 +0100850 isolate_->compilation_cache()->MarkCompactPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000851
Kristian Monsen25f61362010-05-21 11:50:48 +0100852 CompletelyClearInstanceofCache();
853
Leon Clarkee46be812010-01-19 14:06:41 +0000854 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000855
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100856 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000857}
858
859
860Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700861 Object* obj = NULL; // Initialization to please compiler.
862 { MaybeObject* maybe_obj = code_space_->FindObject(a);
863 if (!maybe_obj->ToObject(&obj)) {
864 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
865 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000867 return obj;
868}
869
870
871// Helper class for copying HeapObjects
872class ScavengeVisitor: public ObjectVisitor {
873 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100874 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000875
876 void VisitPointer(Object** p) { ScavengePointer(p); }
877
878 void VisitPointers(Object** start, Object** end) {
879 // Copy all HeapObject pointers in [start, end)
880 for (Object** p = start; p < end; p++) ScavengePointer(p);
881 }
882
883 private:
884 void ScavengePointer(Object** p) {
885 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +0100886 if (!heap_->InNewSpace(object)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000887 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
888 reinterpret_cast<HeapObject*>(object));
889 }
Steve Block44f0eee2011-05-26 01:26:41 +0100890
891 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000892};
893
894
Steve Blocka7e24c12009-10-30 11:49:00 +0000895#ifdef DEBUG
896// Visitor class to verify pointers in code or data space do not point into
897// new space.
898class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
899 public:
900 void VisitPointers(Object** start, Object**end) {
901 for (Object** current = start; current < end; current++) {
902 if ((*current)->IsHeapObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100903 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000904 }
905 }
906 }
907};
908
909
910static void VerifyNonPointerSpacePointers() {
911 // Verify that there are no pointers to new space in spaces where we
912 // do not expect them.
913 VerifyNonPointerSpacePointersVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +0100914 HeapObjectIterator code_it(HEAP->code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000915 for (HeapObject* object = code_it.next();
916 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000917 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000918
Steve Block44f0eee2011-05-26 01:26:41 +0100919 HeapObjectIterator data_it(HEAP->old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000920 for (HeapObject* object = data_it.next();
921 object != NULL; object = data_it.next())
922 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000923}
924#endif
925
926
Steve Block6ded16b2010-05-10 14:33:55 +0100927void Heap::CheckNewSpaceExpansionCriteria() {
928 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
929 survived_since_last_expansion_ > new_space_.Capacity()) {
930 // Grow the size of new space if there is room to grow and enough
931 // data has survived scavenge since the last expansion.
932 new_space_.Grow();
933 survived_since_last_expansion_ = 0;
934 }
935}
936
937
Steve Blocka7e24c12009-10-30 11:49:00 +0000938void Heap::Scavenge() {
939#ifdef DEBUG
940 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
941#endif
942
943 gc_state_ = SCAVENGE;
944
Ben Murdoch8b112d22011-06-08 16:22:53 +0100945 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
946
Steve Block44f0eee2011-05-26 01:26:41 +0100947 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100948#ifdef DEBUG
949 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
950 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
951#endif
952
953 // We do not update an allocation watermark of the top page during linear
954 // allocation to avoid overhead. So to maintain the watermark invariant
955 // we have to manually cache the watermark and mark the top page as having an
956 // invalid watermark. This guarantees that dirty regions iteration will use a
957 // correct watermark even if a linear allocation happens.
958 old_pointer_space_->FlushTopPageWatermark();
959 map_space_->FlushTopPageWatermark();
960
Steve Blocka7e24c12009-10-30 11:49:00 +0000961 // Implements Cheney's copying algorithm
Steve Block44f0eee2011-05-26 01:26:41 +0100962 LOG(isolate_, ResourceEvent("scavenge", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000963
964 // Clear descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +0100965 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000966
967 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100968 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000969
Steve Block6ded16b2010-05-10 14:33:55 +0100970 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000971
972 // Flip the semispaces. After flipping, to space is empty, from space has
973 // live objects.
974 new_space_.Flip();
975 new_space_.ResetAllocationInfo();
976
977 // We need to sweep newly copied objects which can be either in the
978 // to space or promoted to the old generation. For to-space
979 // objects, we treat the bottom of the to space as a queue. Newly
980 // copied and unswept objects lie between a 'front' mark and the
981 // allocation pointer.
982 //
983 // Promoted objects can go into various old-generation spaces, and
984 // can be allocated internally in the spaces (from the free list).
985 // We treat the top of the to space as a queue of addresses of
986 // promoted objects. The addresses of newly promoted and unswept
987 // objects lie between a 'front' mark and a 'rear' mark that is
988 // updated as a side effect of promoting an object.
989 //
990 // There is guaranteed to be enough room at the top of the to space
991 // for the addresses of promoted objects: every object promoted
992 // frees up its size in bytes from the top of the new space, and
993 // objects are at least one pointer in size.
994 Address new_space_front = new_space_.ToSpaceLow();
Steve Block44f0eee2011-05-26 01:26:41 +0100995 promotion_queue_.Initialize(new_space_.ToSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +0000996
Steve Block44f0eee2011-05-26 01:26:41 +0100997 is_safe_to_read_maps_ = false;
998 ScavengeVisitor scavenge_visitor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000999 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001000 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001001
1002 // Copy objects reachable from the old generation. By definition,
1003 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001004 IterateDirtyRegions(old_pointer_space_,
Steve Block44f0eee2011-05-26 01:26:41 +01001005 &Heap::IteratePointersInDirtyRegion,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001006 &ScavengePointer,
1007 WATERMARK_CAN_BE_INVALID);
1008
1009 IterateDirtyRegions(map_space_,
1010 &IteratePointersInDirtyMapsRegion,
1011 &ScavengePointer,
1012 WATERMARK_CAN_BE_INVALID);
1013
1014 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001015
1016 // Copy objects reachable from cells by scavenging cell values directly.
1017 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001018 for (HeapObject* cell = cell_iterator.next();
1019 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001020 if (cell->IsJSGlobalPropertyCell()) {
1021 Address value_address =
1022 reinterpret_cast<Address>(cell) +
1023 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1024 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1025 }
1026 }
1027
Ben Murdochf87a2032010-10-22 12:50:53 +01001028 // Scavenge object reachable from the global contexts list directly.
1029 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1030
Leon Clarkee46be812010-01-19 14:06:41 +00001031 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1032
Steve Block6ded16b2010-05-10 14:33:55 +01001033 UpdateNewSpaceReferencesInExternalStringTable(
1034 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1035
Steve Block1e0659c2011-05-24 12:43:12 +01001036 LiveObjectList::UpdateReferencesForScavengeGC();
Steve Block44f0eee2011-05-26 01:26:41 +01001037 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Block1e0659c2011-05-24 12:43:12 +01001038
Leon Clarkee46be812010-01-19 14:06:41 +00001039 ASSERT(new_space_front == new_space_.top());
1040
Steve Block44f0eee2011-05-26 01:26:41 +01001041 is_safe_to_read_maps_ = true;
1042
Leon Clarkee46be812010-01-19 14:06:41 +00001043 // Set age mark.
1044 new_space_.set_age_mark(new_space_.top());
1045
1046 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001047 IncrementYoungSurvivorsCounter(static_cast<int>(
1048 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001049
Steve Block44f0eee2011-05-26 01:26:41 +01001050 LOG(isolate_, ResourceEvent("scavenge", "end"));
Leon Clarkee46be812010-01-19 14:06:41 +00001051
1052 gc_state_ = NOT_IN_GC;
1053}
1054
1055
Steve Block44f0eee2011-05-26 01:26:41 +01001056String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1057 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01001058 MapWord first_word = HeapObject::cast(*p)->map_word();
1059
1060 if (!first_word.IsForwardingAddress()) {
1061 // Unreachable external string can be finalized.
Steve Block44f0eee2011-05-26 01:26:41 +01001062 heap->FinalizeExternalString(String::cast(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001063 return NULL;
1064 }
1065
1066 // String is still reachable.
1067 return String::cast(first_word.ToForwardingAddress());
1068}
1069
1070
1071void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1072 ExternalStringTableUpdaterCallback updater_func) {
Steve Block44f0eee2011-05-26 01:26:41 +01001073 external_string_table_.Verify();
Leon Clarkee46be812010-01-19 14:06:41 +00001074
Steve Block44f0eee2011-05-26 01:26:41 +01001075 if (external_string_table_.new_space_strings_.is_empty()) return;
Leon Clarkee46be812010-01-19 14:06:41 +00001076
Steve Block44f0eee2011-05-26 01:26:41 +01001077 Object** start = &external_string_table_.new_space_strings_[0];
1078 Object** end = start + external_string_table_.new_space_strings_.length();
Leon Clarkee46be812010-01-19 14:06:41 +00001079 Object** last = start;
1080
1081 for (Object** p = start; p < end; ++p) {
Steve Block44f0eee2011-05-26 01:26:41 +01001082 ASSERT(InFromSpace(*p));
1083 String* target = updater_func(this, p);
Leon Clarkee46be812010-01-19 14:06:41 +00001084
Steve Block6ded16b2010-05-10 14:33:55 +01001085 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001086
Leon Clarkee46be812010-01-19 14:06:41 +00001087 ASSERT(target->IsExternalString());
1088
Steve Block44f0eee2011-05-26 01:26:41 +01001089 if (InNewSpace(target)) {
Leon Clarkee46be812010-01-19 14:06:41 +00001090 // String is still in new space. Update the table entry.
1091 *last = target;
1092 ++last;
1093 } else {
1094 // String got promoted. Move it to the old string list.
Steve Block44f0eee2011-05-26 01:26:41 +01001095 external_string_table_.AddOldString(target);
Leon Clarkee46be812010-01-19 14:06:41 +00001096 }
1097 }
1098
1099 ASSERT(last <= end);
Steve Block44f0eee2011-05-26 01:26:41 +01001100 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
Leon Clarkee46be812010-01-19 14:06:41 +00001101}
1102
1103
Steve Block44f0eee2011-05-26 01:26:41 +01001104static Object* ProcessFunctionWeakReferences(Heap* heap,
1105 Object* function,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001106 WeakObjectRetainer* retainer) {
Steve Block44f0eee2011-05-26 01:26:41 +01001107 Object* head = heap->undefined_value();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001108 JSFunction* tail = NULL;
1109 Object* candidate = function;
Steve Block44f0eee2011-05-26 01:26:41 +01001110 while (candidate != heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001111 // Check whether to keep the candidate in the list.
1112 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1113 Object* retain = retainer->RetainAs(candidate);
1114 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001115 if (head == heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001116 // First element in the list.
1117 head = candidate_function;
1118 } else {
1119 // Subsequent elements in the list.
1120 ASSERT(tail != NULL);
1121 tail->set_next_function_link(candidate_function);
1122 }
1123 // Retained function is new tail.
1124 tail = candidate_function;
1125 }
1126 // Move to next element in the list.
1127 candidate = candidate_function->next_function_link();
1128 }
1129
1130 // Terminate the list if there is one or more elements.
1131 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001132 tail->set_next_function_link(heap->undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001133 }
1134
1135 return head;
1136}
1137
1138
Ben Murdochf87a2032010-10-22 12:50:53 +01001139void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1140 Object* head = undefined_value();
1141 Context* tail = NULL;
1142 Object* candidate = global_contexts_list_;
Steve Block44f0eee2011-05-26 01:26:41 +01001143 while (candidate != undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001144 // Check whether to keep the candidate in the list.
1145 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1146 Object* retain = retainer->RetainAs(candidate);
1147 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001148 if (head == undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001149 // First element in the list.
1150 head = candidate_context;
1151 } else {
1152 // Subsequent elements in the list.
1153 ASSERT(tail != NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01001154 tail->set_unchecked(this,
1155 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001156 candidate_context,
1157 UPDATE_WRITE_BARRIER);
1158 }
1159 // Retained context is new tail.
1160 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001161
1162 // Process the weak list of optimized functions for the context.
1163 Object* function_list_head =
1164 ProcessFunctionWeakReferences(
Steve Block44f0eee2011-05-26 01:26:41 +01001165 this,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001166 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1167 retainer);
Steve Block44f0eee2011-05-26 01:26:41 +01001168 candidate_context->set_unchecked(this,
1169 Context::OPTIMIZED_FUNCTIONS_LIST,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001170 function_list_head,
1171 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001172 }
1173 // Move to next element in the list.
1174 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1175 }
1176
1177 // Terminate the list if there is one or more elements.
1178 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001179 tail->set_unchecked(this,
1180 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001181 Heap::undefined_value(),
1182 UPDATE_WRITE_BARRIER);
1183 }
1184
1185 // Update the head of the list of contexts.
Steve Block44f0eee2011-05-26 01:26:41 +01001186 global_contexts_list_ = head;
Ben Murdochf87a2032010-10-22 12:50:53 +01001187}
1188
1189
Iain Merrick75681382010-08-19 15:07:18 +01001190class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1191 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001192 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001193 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +01001194 if (!heap->InNewSpace(object)) return;
Iain Merrick75681382010-08-19 15:07:18 +01001195 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1196 reinterpret_cast<HeapObject*>(object));
1197 }
1198};
1199
1200
Leon Clarkee46be812010-01-19 14:06:41 +00001201Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1202 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001203 do {
1204 ASSERT(new_space_front <= new_space_.top());
1205
1206 // The addresses new_space_front and new_space_.top() define a
1207 // queue of unprocessed copied objects. Process them until the
1208 // queue is empty.
1209 while (new_space_front < new_space_.top()) {
1210 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001211 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001212 }
1213
1214 // Promote and process all the to-be-promoted objects.
Steve Block44f0eee2011-05-26 01:26:41 +01001215 while (!promotion_queue_.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001216 HeapObject* target;
1217 int size;
Steve Block44f0eee2011-05-26 01:26:41 +01001218 promotion_queue_.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001219
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001220 // Promoted object might be already partially visited
1221 // during dirty regions iteration. Thus we search specificly
1222 // for pointers to from semispace instead of looking for pointers
1223 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001224 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001225 IterateAndMarkPointersToFromSpace(target->address(),
1226 target->address() + size,
1227 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001228 }
1229
1230 // Take another spin if there are now unswept objects in new space
1231 // (there are currently no more unswept promoted objects).
1232 } while (new_space_front < new_space_.top());
1233
Leon Clarkee46be812010-01-19 14:06:41 +00001234 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001235}
1236
1237
Ben Murdoch8b112d22011-06-08 16:22:53 +01001238enum LoggingAndProfiling {
1239 LOGGING_AND_PROFILING_ENABLED,
1240 LOGGING_AND_PROFILING_DISABLED
1241};
1242
1243
1244typedef void (*ScavengingCallback)(Map* map,
1245 HeapObject** slot,
1246 HeapObject* object);
1247
1248
1249static Atomic32 scavenging_visitors_table_mode_;
1250static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
1251
1252
1253INLINE(static void DoScavengeObject(Map* map,
1254 HeapObject** slot,
1255 HeapObject* obj));
1256
1257
1258void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1259 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1260}
1261
1262
1263template<LoggingAndProfiling logging_and_profiling_mode>
Iain Merrick75681382010-08-19 15:07:18 +01001264class ScavengingVisitor : public StaticVisitorBase {
1265 public:
1266 static void Initialize() {
1267 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1268 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1269 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1270 table_.Register(kVisitByteArray, &EvacuateByteArray);
1271 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001272
Ben Murdochf87a2032010-10-22 12:50:53 +01001273 table_.Register(kVisitGlobalContext,
1274 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001275 template VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001276
1277 table_.Register(kVisitConsString,
1278 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001279 template VisitSpecialized<ConsString::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001280
1281 table_.Register(kVisitSharedFunctionInfo,
1282 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001283 template VisitSpecialized<SharedFunctionInfo::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001284
1285 table_.Register(kVisitJSFunction,
1286 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001287 template VisitSpecialized<JSFunction::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001288
1289 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1290 kVisitDataObject,
1291 kVisitDataObjectGeneric>();
1292
1293 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1294 kVisitJSObject,
1295 kVisitJSObjectGeneric>();
1296
1297 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1298 kVisitStruct,
1299 kVisitStructGeneric>();
1300 }
1301
Ben Murdoch8b112d22011-06-08 16:22:53 +01001302 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1303 return &table_;
Iain Merrick75681382010-08-19 15:07:18 +01001304 }
1305
Iain Merrick75681382010-08-19 15:07:18 +01001306 private:
1307 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1308 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1309
Steve Blocka7e24c12009-10-30 11:49:00 +00001310#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01001311 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
Iain Merrick75681382010-08-19 15:07:18 +01001312 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001313#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001314 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001315#endif
1316#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001317 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001318#endif
Iain Merrick75681382010-08-19 15:07:18 +01001319 if (should_record) {
Steve Block44f0eee2011-05-26 01:26:41 +01001320 if (heap->new_space()->Contains(obj)) {
1321 heap->new_space()->RecordAllocation(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001322 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001323 heap->new_space()->RecordPromotion(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001324 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001325 }
1326 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001327#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1328
Iain Merrick75681382010-08-19 15:07:18 +01001329 // Helper function used by CopyObject to copy a source object to an
1330 // allocated target object and update the forwarding pointer in the source
1331 // object. Returns the target object.
Steve Block44f0eee2011-05-26 01:26:41 +01001332 INLINE(static HeapObject* MigrateObject(Heap* heap,
1333 HeapObject* source,
Iain Merrick75681382010-08-19 15:07:18 +01001334 HeapObject* target,
1335 int size)) {
1336 // Copy the content of source to target.
Steve Block44f0eee2011-05-26 01:26:41 +01001337 heap->CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001338
Iain Merrick75681382010-08-19 15:07:18 +01001339 // Set the forwarding address.
1340 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001341
Ben Murdoch8b112d22011-06-08 16:22:53 +01001342 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001343#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001344 // Update NewSpace stats if necessary.
1345 RecordCopiedObject(heap, target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001346#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001347 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001348#if defined(ENABLE_LOGGING_AND_PROFILING)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001349 Isolate* isolate = heap->isolate();
1350 if (isolate->logger()->is_logging() ||
1351 isolate->cpu_profiler()->is_profiling()) {
1352 if (target->IsSharedFunctionInfo()) {
1353 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1354 source->address(), target->address()));
1355 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001356 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001357#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001358 }
1359
Iain Merrick75681382010-08-19 15:07:18 +01001360 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001361 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001362
1363
Iain Merrick75681382010-08-19 15:07:18 +01001364 template<ObjectContents object_contents, SizeRestriction size_restriction>
1365 static inline void EvacuateObject(Map* map,
1366 HeapObject** slot,
1367 HeapObject* object,
1368 int object_size) {
1369 ASSERT((size_restriction != SMALL) ||
1370 (object_size <= Page::kMaxHeapObjectSize));
1371 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001372
Steve Block44f0eee2011-05-26 01:26:41 +01001373 Heap* heap = map->heap();
1374 if (heap->ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001375 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001376
Iain Merrick75681382010-08-19 15:07:18 +01001377 if ((size_restriction != SMALL) &&
1378 (object_size > Page::kMaxHeapObjectSize)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001379 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001380 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001381 if (object_contents == DATA_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001382 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001383 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001384 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001385 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001386 }
1387
John Reck59135872010-11-02 12:39:01 -07001388 Object* result = NULL; // Initialization to please compiler.
1389 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001390 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001391 *slot = MigrateObject(heap, object , target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001392
Iain Merrick75681382010-08-19 15:07:18 +01001393 if (object_contents == POINTER_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001394 heap->promotion_queue()->insert(target, object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001395 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001396
Steve Block44f0eee2011-05-26 01:26:41 +01001397 heap->tracer()->increment_promoted_objects_size(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001398 return;
1399 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001400 }
John Reck59135872010-11-02 12:39:01 -07001401 Object* result =
Steve Block44f0eee2011-05-26 01:26:41 +01001402 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
1403 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001404 return;
1405 }
1406
Iain Merrick75681382010-08-19 15:07:18 +01001407
1408 static inline void EvacuateFixedArray(Map* map,
1409 HeapObject** slot,
1410 HeapObject* object) {
1411 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1412 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1413 slot,
1414 object,
1415 object_size);
1416 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001417
1418
Iain Merrick75681382010-08-19 15:07:18 +01001419 static inline void EvacuateByteArray(Map* map,
1420 HeapObject** slot,
1421 HeapObject* object) {
1422 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1423 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1424 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001425
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001426
Iain Merrick75681382010-08-19 15:07:18 +01001427 static inline void EvacuateSeqAsciiString(Map* map,
1428 HeapObject** slot,
1429 HeapObject* object) {
1430 int object_size = SeqAsciiString::cast(object)->
1431 SeqAsciiStringSize(map->instance_type());
1432 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1433 }
1434
1435
1436 static inline void EvacuateSeqTwoByteString(Map* map,
1437 HeapObject** slot,
1438 HeapObject* object) {
1439 int object_size = SeqTwoByteString::cast(object)->
1440 SeqTwoByteStringSize(map->instance_type());
1441 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1442 }
1443
1444
1445 static inline bool IsShortcutCandidate(int type) {
1446 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1447 }
1448
1449 static inline void EvacuateShortcutCandidate(Map* map,
1450 HeapObject** slot,
1451 HeapObject* object) {
1452 ASSERT(IsShortcutCandidate(map->instance_type()));
1453
Steve Block44f0eee2011-05-26 01:26:41 +01001454 if (ConsString::cast(object)->unchecked_second() ==
1455 map->heap()->empty_string()) {
Iain Merrick75681382010-08-19 15:07:18 +01001456 HeapObject* first =
1457 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1458
1459 *slot = first;
1460
Steve Block44f0eee2011-05-26 01:26:41 +01001461 if (!map->heap()->InNewSpace(first)) {
Iain Merrick75681382010-08-19 15:07:18 +01001462 object->set_map_word(MapWord::FromForwardingAddress(first));
1463 return;
1464 }
1465
1466 MapWord first_word = first->map_word();
1467 if (first_word.IsForwardingAddress()) {
1468 HeapObject* target = first_word.ToForwardingAddress();
1469
1470 *slot = target;
1471 object->set_map_word(MapWord::FromForwardingAddress(target));
1472 return;
1473 }
1474
Ben Murdoch8b112d22011-06-08 16:22:53 +01001475 DoScavengeObject(first->map(), slot, first);
Iain Merrick75681382010-08-19 15:07:18 +01001476 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1477 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001478 }
Iain Merrick75681382010-08-19 15:07:18 +01001479
1480 int object_size = ConsString::kSize;
1481 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001482 }
1483
Iain Merrick75681382010-08-19 15:07:18 +01001484 template<ObjectContents object_contents>
1485 class ObjectEvacuationStrategy {
1486 public:
1487 template<int object_size>
1488 static inline void VisitSpecialized(Map* map,
1489 HeapObject** slot,
1490 HeapObject* object) {
1491 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1492 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001493
Iain Merrick75681382010-08-19 15:07:18 +01001494 static inline void Visit(Map* map,
1495 HeapObject** slot,
1496 HeapObject* object) {
1497 int object_size = map->instance_size();
1498 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1499 }
1500 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001501
Ben Murdoch8b112d22011-06-08 16:22:53 +01001502 static VisitorDispatchTable<ScavengingCallback> table_;
Iain Merrick75681382010-08-19 15:07:18 +01001503};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001504
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001505
Ben Murdoch8b112d22011-06-08 16:22:53 +01001506template<LoggingAndProfiling logging_and_profiling_mode>
1507VisitorDispatchTable<ScavengingCallback>
1508 ScavengingVisitor<logging_and_profiling_mode>::table_;
1509
1510
1511static void InitializeScavengingVisitorsTables() {
1512 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
1513 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
1514 scavenging_visitors_table_.CopyFrom(
1515 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
1516 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
1517}
1518
1519
1520void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
Ben Murdochbea578b2011-06-08 20:04:28 +01001521#ifdef ENABLE_LOGGING_AND_PROFILING
Ben Murdoch8b112d22011-06-08 16:22:53 +01001522 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
1523 // Table was already updated by some isolate.
1524 return;
1525 }
1526
1527 if (isolate()->logger()->is_logging() ||
1528 isolate()->cpu_profiler()->is_profiling() ||
1529 (isolate()->heap_profiler() != NULL &&
1530 isolate()->heap_profiler()->is_profiling())) {
1531 // If one of the isolates is doing scavenge at this moment of time
1532 // it might see this table in an inconsitent state when
1533 // some of the callbacks point to
1534 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1535 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1536 // However this does not lead to any bugs as such isolate does not have
1537 // profiling enabled and any isolate with enabled profiling is guaranteed
1538 // to see the table in the consistent state.
1539 scavenging_visitors_table_.CopyFrom(
1540 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1541
1542 // We use Release_Store to prevent reordering of this write before writes
1543 // to the table.
1544 Release_Store(&scavenging_visitors_table_mode_,
1545 LOGGING_AND_PROFILING_ENABLED);
1546 }
Ben Murdochbea578b2011-06-08 20:04:28 +01001547#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01001548}
Steve Blocka7e24c12009-10-30 11:49:00 +00001549
1550
1551void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +01001552 ASSERT(HEAP->InFromSpace(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001553 MapWord first_word = object->map_word();
1554 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001555 Map* map = first_word.ToMap();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001556 DoScavengeObject(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001557}
1558
1559
John Reck59135872010-11-02 12:39:01 -07001560MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1561 int instance_size) {
1562 Object* result;
1563 { MaybeObject* maybe_result = AllocateRawMap();
1564 if (!maybe_result->ToObject(&result)) return maybe_result;
1565 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001566
1567 // Map::cast cannot be used due to uninitialized map field.
1568 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1569 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1570 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Steve Block44f0eee2011-05-26 01:26:41 +01001571 reinterpret_cast<Map*>(result)->set_visitor_id(
1572 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001573 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001574 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001575 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001576 reinterpret_cast<Map*>(result)->set_bit_field(0);
1577 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001578 return result;
1579}
1580
1581
John Reck59135872010-11-02 12:39:01 -07001582MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1583 Object* result;
1584 { MaybeObject* maybe_result = AllocateRawMap();
1585 if (!maybe_result->ToObject(&result)) return maybe_result;
1586 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001587
1588 Map* map = reinterpret_cast<Map*>(result);
1589 map->set_map(meta_map());
1590 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001591 map->set_visitor_id(
1592 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001593 map->set_prototype(null_value());
1594 map->set_constructor(null_value());
1595 map->set_instance_size(instance_size);
1596 map->set_inobject_properties(0);
1597 map->set_pre_allocated_property_fields(0);
1598 map->set_instance_descriptors(empty_descriptor_array());
1599 map->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001600 map->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001601 map->set_unused_property_fields(0);
1602 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001603 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001604
1605 // If the map object is aligned fill the padding area with Smi 0 objects.
1606 if (Map::kPadStart < Map::kSize) {
1607 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1608 0,
1609 Map::kSize - Map::kPadStart);
1610 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001611 return map;
1612}
1613
1614
John Reck59135872010-11-02 12:39:01 -07001615MaybeObject* Heap::AllocateCodeCache() {
1616 Object* result;
1617 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1618 if (!maybe_result->ToObject(&result)) return maybe_result;
1619 }
Steve Block6ded16b2010-05-10 14:33:55 +01001620 CodeCache* code_cache = CodeCache::cast(result);
1621 code_cache->set_default_cache(empty_fixed_array());
1622 code_cache->set_normal_type_cache(undefined_value());
1623 return code_cache;
1624}
1625
1626
Steve Blocka7e24c12009-10-30 11:49:00 +00001627const Heap::StringTypeTable Heap::string_type_table[] = {
1628#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1629 {type, size, k##camel_name##MapRootIndex},
1630 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1631#undef STRING_TYPE_ELEMENT
1632};
1633
1634
1635const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1636#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1637 {contents, k##name##RootIndex},
1638 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1639#undef CONSTANT_SYMBOL_ELEMENT
1640};
1641
1642
1643const Heap::StructTable Heap::struct_table[] = {
1644#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1645 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1646 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1647#undef STRUCT_TABLE_ELEMENT
1648};
1649
1650
1651bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001652 Object* obj;
1653 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1654 if (!maybe_obj->ToObject(&obj)) return false;
1655 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001656 // Map::cast cannot be used due to uninitialized map field.
1657 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1658 set_meta_map(new_meta_map);
1659 new_meta_map->set_map(new_meta_map);
1660
John Reck59135872010-11-02 12:39:01 -07001661 { MaybeObject* maybe_obj =
1662 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1663 if (!maybe_obj->ToObject(&obj)) return false;
1664 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001665 set_fixed_array_map(Map::cast(obj));
1666
John Reck59135872010-11-02 12:39:01 -07001667 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1668 if (!maybe_obj->ToObject(&obj)) return false;
1669 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001670 set_oddball_map(Map::cast(obj));
1671
Steve Block6ded16b2010-05-10 14:33:55 +01001672 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001673 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1674 if (!maybe_obj->ToObject(&obj)) return false;
1675 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001676 set_empty_fixed_array(FixedArray::cast(obj));
1677
John Reck59135872010-11-02 12:39:01 -07001678 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1679 if (!maybe_obj->ToObject(&obj)) return false;
1680 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001681 set_null_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001682 Oddball::cast(obj)->set_kind(Oddball::kNull);
Steve Blocka7e24c12009-10-30 11:49:00 +00001683
1684 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001685 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1686 if (!maybe_obj->ToObject(&obj)) return false;
1687 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001688 set_empty_descriptor_array(DescriptorArray::cast(obj));
1689
1690 // Fix the instance_descriptors for the existing maps.
1691 meta_map()->set_instance_descriptors(empty_descriptor_array());
1692 meta_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001693 meta_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001694
1695 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1696 fixed_array_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001697 fixed_array_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001698
1699 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1700 oddball_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001701 oddball_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001702
1703 // Fix prototype object for existing maps.
1704 meta_map()->set_prototype(null_value());
1705 meta_map()->set_constructor(null_value());
1706
1707 fixed_array_map()->set_prototype(null_value());
1708 fixed_array_map()->set_constructor(null_value());
1709
1710 oddball_map()->set_prototype(null_value());
1711 oddball_map()->set_constructor(null_value());
1712
John Reck59135872010-11-02 12:39:01 -07001713 { MaybeObject* maybe_obj =
1714 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1715 if (!maybe_obj->ToObject(&obj)) return false;
1716 }
Iain Merrick75681382010-08-19 15:07:18 +01001717 set_fixed_cow_array_map(Map::cast(obj));
1718 ASSERT(fixed_array_map() != fixed_cow_array_map());
1719
John Reck59135872010-11-02 12:39:01 -07001720 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1721 if (!maybe_obj->ToObject(&obj)) return false;
1722 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001723 set_heap_number_map(Map::cast(obj));
1724
John Reck59135872010-11-02 12:39:01 -07001725 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1726 if (!maybe_obj->ToObject(&obj)) return false;
1727 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001728 set_proxy_map(Map::cast(obj));
1729
1730 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1731 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001732 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1733 if (!maybe_obj->ToObject(&obj)) return false;
1734 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001735 roots_[entry.index] = Map::cast(obj);
1736 }
1737
John Reck59135872010-11-02 12:39:01 -07001738 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1739 if (!maybe_obj->ToObject(&obj)) return false;
1740 }
Steve Blockd0582a62009-12-15 09:54:21 +00001741 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001742 Map::cast(obj)->set_is_undetectable();
1743
John Reck59135872010-11-02 12:39:01 -07001744 { MaybeObject* maybe_obj =
1745 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1746 if (!maybe_obj->ToObject(&obj)) return false;
1747 }
Steve Blockd0582a62009-12-15 09:54:21 +00001748 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001749 Map::cast(obj)->set_is_undetectable();
1750
John Reck59135872010-11-02 12:39:01 -07001751 { MaybeObject* maybe_obj =
1752 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1753 if (!maybe_obj->ToObject(&obj)) return false;
1754 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001755 set_byte_array_map(Map::cast(obj));
1756
Ben Murdochb0fe1622011-05-05 13:52:32 +01001757 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1758 if (!maybe_obj->ToObject(&obj)) return false;
1759 }
1760 set_empty_byte_array(ByteArray::cast(obj));
1761
John Reck59135872010-11-02 12:39:01 -07001762 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01001763 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
John Reck59135872010-11-02 12:39:01 -07001764 if (!maybe_obj->ToObject(&obj)) return false;
1765 }
Steve Block44f0eee2011-05-26 01:26:41 +01001766 set_external_pixel_array_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001767
John Reck59135872010-11-02 12:39:01 -07001768 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1769 ExternalArray::kAlignedSize);
1770 if (!maybe_obj->ToObject(&obj)) return false;
1771 }
Steve Block3ce2e202009-11-05 08:53:23 +00001772 set_external_byte_array_map(Map::cast(obj));
1773
John Reck59135872010-11-02 12:39:01 -07001774 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1775 ExternalArray::kAlignedSize);
1776 if (!maybe_obj->ToObject(&obj)) return false;
1777 }
Steve Block3ce2e202009-11-05 08:53:23 +00001778 set_external_unsigned_byte_array_map(Map::cast(obj));
1779
John Reck59135872010-11-02 12:39:01 -07001780 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1781 ExternalArray::kAlignedSize);
1782 if (!maybe_obj->ToObject(&obj)) return false;
1783 }
Steve Block3ce2e202009-11-05 08:53:23 +00001784 set_external_short_array_map(Map::cast(obj));
1785
John Reck59135872010-11-02 12:39:01 -07001786 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1787 ExternalArray::kAlignedSize);
1788 if (!maybe_obj->ToObject(&obj)) return false;
1789 }
Steve Block3ce2e202009-11-05 08:53:23 +00001790 set_external_unsigned_short_array_map(Map::cast(obj));
1791
John Reck59135872010-11-02 12:39:01 -07001792 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1793 ExternalArray::kAlignedSize);
1794 if (!maybe_obj->ToObject(&obj)) return false;
1795 }
Steve Block3ce2e202009-11-05 08:53:23 +00001796 set_external_int_array_map(Map::cast(obj));
1797
John Reck59135872010-11-02 12:39:01 -07001798 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1799 ExternalArray::kAlignedSize);
1800 if (!maybe_obj->ToObject(&obj)) return false;
1801 }
Steve Block3ce2e202009-11-05 08:53:23 +00001802 set_external_unsigned_int_array_map(Map::cast(obj));
1803
John Reck59135872010-11-02 12:39:01 -07001804 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1805 ExternalArray::kAlignedSize);
1806 if (!maybe_obj->ToObject(&obj)) return false;
1807 }
Steve Block3ce2e202009-11-05 08:53:23 +00001808 set_external_float_array_map(Map::cast(obj));
1809
John Reck59135872010-11-02 12:39:01 -07001810 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1811 if (!maybe_obj->ToObject(&obj)) return false;
1812 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001813 set_code_map(Map::cast(obj));
1814
John Reck59135872010-11-02 12:39:01 -07001815 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1816 JSGlobalPropertyCell::kSize);
1817 if (!maybe_obj->ToObject(&obj)) return false;
1818 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001819 set_global_property_cell_map(Map::cast(obj));
1820
John Reck59135872010-11-02 12:39:01 -07001821 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1822 if (!maybe_obj->ToObject(&obj)) return false;
1823 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001824 set_one_pointer_filler_map(Map::cast(obj));
1825
John Reck59135872010-11-02 12:39:01 -07001826 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1827 if (!maybe_obj->ToObject(&obj)) return false;
1828 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001829 set_two_pointer_filler_map(Map::cast(obj));
1830
1831 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1832 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001833 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1834 if (!maybe_obj->ToObject(&obj)) return false;
1835 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001836 roots_[entry.index] = Map::cast(obj);
1837 }
1838
John Reck59135872010-11-02 12:39:01 -07001839 { MaybeObject* maybe_obj =
1840 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1841 if (!maybe_obj->ToObject(&obj)) return false;
1842 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001843 set_hash_table_map(Map::cast(obj));
1844
John Reck59135872010-11-02 12:39:01 -07001845 { MaybeObject* maybe_obj =
1846 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1847 if (!maybe_obj->ToObject(&obj)) return false;
1848 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001849 set_context_map(Map::cast(obj));
1850
John Reck59135872010-11-02 12:39:01 -07001851 { MaybeObject* maybe_obj =
1852 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1853 if (!maybe_obj->ToObject(&obj)) return false;
1854 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001855 set_catch_context_map(Map::cast(obj));
1856
John Reck59135872010-11-02 12:39:01 -07001857 { MaybeObject* maybe_obj =
1858 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1859 if (!maybe_obj->ToObject(&obj)) return false;
1860 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001861 Map* global_context_map = Map::cast(obj);
1862 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1863 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001864
John Reck59135872010-11-02 12:39:01 -07001865 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1866 SharedFunctionInfo::kAlignedSize);
1867 if (!maybe_obj->ToObject(&obj)) return false;
1868 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001869 set_shared_function_info_map(Map::cast(obj));
1870
Steve Block1e0659c2011-05-24 12:43:12 +01001871 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1872 JSMessageObject::kSize);
1873 if (!maybe_obj->ToObject(&obj)) return false;
1874 }
1875 set_message_object_map(Map::cast(obj));
1876
Steve Block44f0eee2011-05-26 01:26:41 +01001877 ASSERT(!InNewSpace(empty_fixed_array()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001878 return true;
1879}
1880
1881
John Reck59135872010-11-02 12:39:01 -07001882MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001883 // Statically ensure that it is safe to allocate heap numbers in paged
1884 // spaces.
1885 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1886 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1887
John Reck59135872010-11-02 12:39:01 -07001888 Object* result;
1889 { MaybeObject* maybe_result =
1890 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1891 if (!maybe_result->ToObject(&result)) return maybe_result;
1892 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001893
1894 HeapObject::cast(result)->set_map(heap_number_map());
1895 HeapNumber::cast(result)->set_value(value);
1896 return result;
1897}
1898
1899
John Reck59135872010-11-02 12:39:01 -07001900MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001901 // Use general version, if we're forced to always allocate.
1902 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1903
1904 // This version of AllocateHeapNumber is optimized for
1905 // allocation in new space.
1906 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1907 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001908 Object* result;
1909 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1910 if (!maybe_result->ToObject(&result)) return maybe_result;
1911 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001912 HeapObject::cast(result)->set_map(heap_number_map());
1913 HeapNumber::cast(result)->set_value(value);
1914 return result;
1915}
1916
1917
John Reck59135872010-11-02 12:39:01 -07001918MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1919 Object* result;
1920 { MaybeObject* maybe_result = AllocateRawCell();
1921 if (!maybe_result->ToObject(&result)) return maybe_result;
1922 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001923 HeapObject::cast(result)->set_map(global_property_cell_map());
1924 JSGlobalPropertyCell::cast(result)->set_value(value);
1925 return result;
1926}
1927
1928
John Reck59135872010-11-02 12:39:01 -07001929MaybeObject* Heap::CreateOddball(const char* to_string,
Steve Block44f0eee2011-05-26 01:26:41 +01001930 Object* to_number,
1931 byte kind) {
John Reck59135872010-11-02 12:39:01 -07001932 Object* result;
1933 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1934 if (!maybe_result->ToObject(&result)) return maybe_result;
1935 }
Steve Block44f0eee2011-05-26 01:26:41 +01001936 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
Steve Blocka7e24c12009-10-30 11:49:00 +00001937}
1938
1939
1940bool Heap::CreateApiObjects() {
1941 Object* obj;
1942
John Reck59135872010-11-02 12:39:01 -07001943 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1944 if (!maybe_obj->ToObject(&obj)) return false;
1945 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001946 set_neander_map(Map::cast(obj));
1947
Steve Block44f0eee2011-05-26 01:26:41 +01001948 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
John Reck59135872010-11-02 12:39:01 -07001949 if (!maybe_obj->ToObject(&obj)) return false;
1950 }
1951 Object* elements;
1952 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1953 if (!maybe_elements->ToObject(&elements)) return false;
1954 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001955 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1956 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1957 set_message_listeners(JSObject::cast(obj));
1958
1959 return true;
1960}
1961
1962
Steve Blocka7e24c12009-10-30 11:49:00 +00001963void Heap::CreateJSEntryStub() {
1964 JSEntryStub stub;
1965 set_js_entry_code(*stub.GetCode());
1966}
1967
1968
1969void Heap::CreateJSConstructEntryStub() {
1970 JSConstructEntryStub stub;
1971 set_js_construct_entry_code(*stub.GetCode());
1972}
1973
1974
1975void Heap::CreateFixedStubs() {
1976 // Here we create roots for fixed stubs. They are needed at GC
1977 // for cooking and uncooking (check out frames.cc).
1978 // The eliminates the need for doing dictionary lookup in the
1979 // stub cache for these stubs.
1980 HandleScope scope;
1981 // gcc-4.4 has problem generating correct code of following snippet:
Steve Block44f0eee2011-05-26 01:26:41 +01001982 // { JSEntryStub stub;
1983 // js_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001984 // }
Steve Block44f0eee2011-05-26 01:26:41 +01001985 // { JSConstructEntryStub stub;
1986 // js_construct_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001987 // }
1988 // To workaround the problem, make separate functions without inlining.
Steve Blocka7e24c12009-10-30 11:49:00 +00001989 Heap::CreateJSEntryStub();
1990 Heap::CreateJSConstructEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001991}
1992
1993
1994bool Heap::CreateInitialObjects() {
1995 Object* obj;
1996
1997 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001998 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1999 if (!maybe_obj->ToObject(&obj)) return false;
2000 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002001 set_minus_zero_value(obj);
2002 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2003
John Reck59135872010-11-02 12:39:01 -07002004 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2005 if (!maybe_obj->ToObject(&obj)) return false;
2006 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002007 set_nan_value(obj);
2008
John Reck59135872010-11-02 12:39:01 -07002009 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
2010 if (!maybe_obj->ToObject(&obj)) return false;
2011 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002012 set_undefined_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002013 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
Steve Blocka7e24c12009-10-30 11:49:00 +00002014 ASSERT(!InNewSpace(undefined_value()));
2015
2016 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07002017 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2018 if (!maybe_obj->ToObject(&obj)) return false;
2019 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002020 // Don't use set_symbol_table() due to asserts.
2021 roots_[kSymbolTableRootIndex] = obj;
2022
2023 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07002024 Object* symbol;
2025 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
2026 if (!maybe_symbol->ToObject(&symbol)) return false;
2027 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002028 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
2029 Oddball::cast(undefined_value())->set_to_number(nan_value());
2030
Steve Blocka7e24c12009-10-30 11:49:00 +00002031 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07002032 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01002033 Oddball::cast(null_value())->Initialize("null",
2034 Smi::FromInt(0),
2035 Oddball::kNull);
John Reck59135872010-11-02 12:39:01 -07002036 if (!maybe_obj->ToObject(&obj)) return false;
2037 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002038
Steve Block44f0eee2011-05-26 01:26:41 +01002039 { MaybeObject* maybe_obj = CreateOddball("true",
2040 Smi::FromInt(1),
2041 Oddball::kTrue);
John Reck59135872010-11-02 12:39:01 -07002042 if (!maybe_obj->ToObject(&obj)) return false;
2043 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002044 set_true_value(obj);
2045
Steve Block44f0eee2011-05-26 01:26:41 +01002046 { MaybeObject* maybe_obj = CreateOddball("false",
2047 Smi::FromInt(0),
2048 Oddball::kFalse);
John Reck59135872010-11-02 12:39:01 -07002049 if (!maybe_obj->ToObject(&obj)) return false;
2050 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002051 set_false_value(obj);
2052
Steve Block44f0eee2011-05-26 01:26:41 +01002053 { MaybeObject* maybe_obj = CreateOddball("hole",
2054 Smi::FromInt(-1),
2055 Oddball::kTheHole);
John Reck59135872010-11-02 12:39:01 -07002056 if (!maybe_obj->ToObject(&obj)) return false;
2057 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002058 set_the_hole_value(obj);
2059
Ben Murdoch086aeea2011-05-13 15:57:08 +01002060 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Steve Block44f0eee2011-05-26 01:26:41 +01002061 Smi::FromInt(-4),
2062 Oddball::kArgumentMarker);
Ben Murdoch086aeea2011-05-13 15:57:08 +01002063 if (!maybe_obj->ToObject(&obj)) return false;
2064 }
2065 set_arguments_marker(obj);
2066
Steve Block44f0eee2011-05-26 01:26:41 +01002067 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2068 Smi::FromInt(-2),
2069 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002070 if (!maybe_obj->ToObject(&obj)) return false;
2071 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002072 set_no_interceptor_result_sentinel(obj);
2073
Steve Block44f0eee2011-05-26 01:26:41 +01002074 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2075 Smi::FromInt(-3),
2076 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002077 if (!maybe_obj->ToObject(&obj)) return false;
2078 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002079 set_termination_exception(obj);
2080
2081 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002082 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2083 if (!maybe_obj->ToObject(&obj)) return false;
2084 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002085 set_empty_string(String::cast(obj));
2086
2087 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002088 { MaybeObject* maybe_obj =
2089 LookupAsciiSymbol(constant_symbol_table[i].contents);
2090 if (!maybe_obj->ToObject(&obj)) return false;
2091 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002092 roots_[constant_symbol_table[i].index] = String::cast(obj);
2093 }
2094
2095 // Allocate the hidden symbol which is used to identify the hidden properties
2096 // in JSObjects. The hash code has a special value so that it will not match
2097 // the empty string when searching for the property. It cannot be part of the
2098 // loop above because it needs to be allocated manually with the special
2099 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2100 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002101 { MaybeObject* maybe_obj =
2102 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2103 if (!maybe_obj->ToObject(&obj)) return false;
2104 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002105 hidden_symbol_ = String::cast(obj);
2106
2107 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07002108 { MaybeObject* maybe_obj =
2109 AllocateProxy((Address) &Accessors::ObjectPrototype);
2110 if (!maybe_obj->ToObject(&obj)) return false;
2111 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002112 set_prototype_accessors(Proxy::cast(obj));
2113
2114 // Allocate the code_stubs dictionary. The initial size is set to avoid
2115 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002116 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2117 if (!maybe_obj->ToObject(&obj)) return false;
2118 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002119 set_code_stubs(NumberDictionary::cast(obj));
2120
2121 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2122 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002123 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2124 if (!maybe_obj->ToObject(&obj)) return false;
2125 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002126 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2127
Kristian Monsen25f61362010-05-21 11:50:48 +01002128 set_instanceof_cache_function(Smi::FromInt(0));
2129 set_instanceof_cache_map(Smi::FromInt(0));
2130 set_instanceof_cache_answer(Smi::FromInt(0));
2131
Steve Blocka7e24c12009-10-30 11:49:00 +00002132 CreateFixedStubs();
2133
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002134 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002135 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2136 if (!maybe_obj->ToObject(&obj)) return false;
2137 }
Steve Block44f0eee2011-05-26 01:26:41 +01002138 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2139 obj);
John Reck59135872010-11-02 12:39:01 -07002140 if (!maybe_obj->ToObject(&obj)) return false;
2141 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002142 set_intrinsic_function_names(StringDictionary::cast(obj));
2143
Leon Clarkee46be812010-01-19 14:06:41 +00002144 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002145
Steve Block6ded16b2010-05-10 14:33:55 +01002146 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002147 { MaybeObject* maybe_obj =
2148 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2149 if (!maybe_obj->ToObject(&obj)) return false;
2150 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002151 set_single_character_string_cache(FixedArray::cast(obj));
2152
2153 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002154 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2155 if (!maybe_obj->ToObject(&obj)) return false;
2156 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002157 set_natives_source_cache(FixedArray::cast(obj));
2158
Steve Block44f0eee2011-05-26 01:26:41 +01002159 // Handling of script id generation is in FACTORY->NewScript.
Steve Blocka7e24c12009-10-30 11:49:00 +00002160 set_last_script_id(undefined_value());
2161
2162 // Initialize keyed lookup cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002163 isolate_->keyed_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002164
2165 // Initialize context slot cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002166 isolate_->context_slot_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002167
2168 // Initialize descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002169 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002170
2171 // Initialize compilation cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002172 isolate_->compilation_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002173
2174 return true;
2175}
2176
2177
John Reck59135872010-11-02 12:39:01 -07002178MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002179 // Compute the size of the number string cache based on the max heap size.
2180 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2181 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2182 int number_string_cache_size = max_semispace_size_ / 512;
2183 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002184 Object* obj;
2185 MaybeObject* maybe_obj =
2186 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2187 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2188 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002189}
2190
2191
2192void Heap::FlushNumberStringCache() {
2193 // Flush the number to string cache.
2194 int len = number_string_cache()->length();
2195 for (int i = 0; i < len; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01002196 number_string_cache()->set_undefined(this, i);
Leon Clarkee46be812010-01-19 14:06:41 +00002197 }
2198}
2199
2200
Steve Blocka7e24c12009-10-30 11:49:00 +00002201static inline int double_get_hash(double d) {
2202 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002203 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002204}
2205
2206
2207static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002208 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002209}
2210
2211
Steve Blocka7e24c12009-10-30 11:49:00 +00002212Object* Heap::GetNumberStringCache(Object* number) {
2213 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002214 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002215 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002216 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002217 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002218 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002219 }
2220 Object* key = number_string_cache()->get(hash * 2);
2221 if (key == number) {
2222 return String::cast(number_string_cache()->get(hash * 2 + 1));
2223 } else if (key->IsHeapNumber() &&
2224 number->IsHeapNumber() &&
2225 key->Number() == number->Number()) {
2226 return String::cast(number_string_cache()->get(hash * 2 + 1));
2227 }
2228 return undefined_value();
2229}
2230
2231
2232void Heap::SetNumberStringCache(Object* number, String* string) {
2233 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002234 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002235 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002236 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002237 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002238 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002239 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002240 number_string_cache()->set(hash * 2, number);
2241 }
2242 number_string_cache()->set(hash * 2 + 1, string);
2243}
2244
2245
John Reck59135872010-11-02 12:39:01 -07002246MaybeObject* Heap::NumberToString(Object* number,
2247 bool check_number_string_cache) {
Steve Block44f0eee2011-05-26 01:26:41 +01002248 isolate_->counters()->number_to_string_runtime()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002249 if (check_number_string_cache) {
2250 Object* cached = GetNumberStringCache(number);
2251 if (cached != undefined_value()) {
2252 return cached;
2253 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002254 }
2255
2256 char arr[100];
2257 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2258 const char* str;
2259 if (number->IsSmi()) {
2260 int num = Smi::cast(number)->value();
2261 str = IntToCString(num, buffer);
2262 } else {
2263 double num = HeapNumber::cast(number)->value();
2264 str = DoubleToCString(num, buffer);
2265 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002266
John Reck59135872010-11-02 12:39:01 -07002267 Object* js_string;
2268 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2269 if (maybe_js_string->ToObject(&js_string)) {
2270 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002271 }
John Reck59135872010-11-02 12:39:01 -07002272 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002273}
2274
2275
Steve Block3ce2e202009-11-05 08:53:23 +00002276Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2277 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2278}
2279
2280
2281Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2282 ExternalArrayType array_type) {
2283 switch (array_type) {
2284 case kExternalByteArray:
2285 return kExternalByteArrayMapRootIndex;
2286 case kExternalUnsignedByteArray:
2287 return kExternalUnsignedByteArrayMapRootIndex;
2288 case kExternalShortArray:
2289 return kExternalShortArrayMapRootIndex;
2290 case kExternalUnsignedShortArray:
2291 return kExternalUnsignedShortArrayMapRootIndex;
2292 case kExternalIntArray:
2293 return kExternalIntArrayMapRootIndex;
2294 case kExternalUnsignedIntArray:
2295 return kExternalUnsignedIntArrayMapRootIndex;
2296 case kExternalFloatArray:
2297 return kExternalFloatArrayMapRootIndex;
Steve Block44f0eee2011-05-26 01:26:41 +01002298 case kExternalPixelArray:
2299 return kExternalPixelArrayMapRootIndex;
Steve Block3ce2e202009-11-05 08:53:23 +00002300 default:
2301 UNREACHABLE();
2302 return kUndefinedValueRootIndex;
2303 }
2304}
2305
2306
John Reck59135872010-11-02 12:39:01 -07002307MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002308 // We need to distinguish the minus zero value and this cannot be
2309 // done after conversion to int. Doing this by comparing bit
2310 // patterns is faster than using fpclassify() et al.
2311 static const DoubleRepresentation minus_zero(-0.0);
2312
2313 DoubleRepresentation rep(value);
2314 if (rep.bits == minus_zero.bits) {
2315 return AllocateHeapNumber(-0.0, pretenure);
2316 }
2317
2318 int int_value = FastD2I(value);
2319 if (value == int_value && Smi::IsValid(int_value)) {
2320 return Smi::FromInt(int_value);
2321 }
2322
2323 // Materialize the value in the heap.
2324 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002325}
2326
2327
John Reck59135872010-11-02 12:39:01 -07002328MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002329 // Statically ensure that it is safe to allocate proxies in paged spaces.
2330 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2331 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002332 Object* result;
2333 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2334 if (!maybe_result->ToObject(&result)) return maybe_result;
2335 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002336
2337 Proxy::cast(result)->set_proxy(proxy);
2338 return result;
2339}
2340
2341
John Reck59135872010-11-02 12:39:01 -07002342MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2343 Object* result;
2344 { MaybeObject* maybe_result =
2345 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2346 if (!maybe_result->ToObject(&result)) return maybe_result;
2347 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002348
2349 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2350 share->set_name(name);
Steve Block44f0eee2011-05-26 01:26:41 +01002351 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
Steve Blocka7e24c12009-10-30 11:49:00 +00002352 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002353 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Block44f0eee2011-05-26 01:26:41 +01002354 Code* construct_stub = isolate_->builtins()->builtin(
2355 Builtins::kJSConstructStubGeneric);
Steve Blocka7e24c12009-10-30 11:49:00 +00002356 share->set_construct_stub(construct_stub);
2357 share->set_expected_nof_properties(0);
2358 share->set_length(0);
2359 share->set_formal_parameter_count(0);
2360 share->set_instance_class_name(Object_symbol());
2361 share->set_function_data(undefined_value());
2362 share->set_script(undefined_value());
2363 share->set_start_position_and_type(0);
2364 share->set_debug_info(undefined_value());
2365 share->set_inferred_name(empty_string());
2366 share->set_compiler_hints(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002367 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002368 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002369 share->set_this_property_assignments_count(0);
2370 share->set_this_property_assignments(undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002371 share->set_opt_count(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002372 share->set_num_literals(0);
2373 share->set_end_position(0);
2374 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002375 return result;
2376}
2377
2378
Steve Block1e0659c2011-05-24 12:43:12 +01002379MaybeObject* Heap::AllocateJSMessageObject(String* type,
2380 JSArray* arguments,
2381 int start_position,
2382 int end_position,
2383 Object* script,
2384 Object* stack_trace,
2385 Object* stack_frames) {
2386 Object* result;
2387 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2388 if (!maybe_result->ToObject(&result)) return maybe_result;
2389 }
2390 JSMessageObject* message = JSMessageObject::cast(result);
2391 message->set_properties(Heap::empty_fixed_array());
2392 message->set_elements(Heap::empty_fixed_array());
2393 message->set_type(type);
2394 message->set_arguments(arguments);
2395 message->set_start_position(start_position);
2396 message->set_end_position(end_position);
2397 message->set_script(script);
2398 message->set_stack_trace(stack_trace);
2399 message->set_stack_frames(stack_frames);
2400 return result;
2401}
2402
2403
2404
Steve Blockd0582a62009-12-15 09:54:21 +00002405// Returns true for a character in a range. Both limits are inclusive.
2406static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2407 // This makes uses of the the unsigned wraparound.
2408 return character - from <= to - from;
2409}
2410
2411
John Reck59135872010-11-02 12:39:01 -07002412MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Steve Block44f0eee2011-05-26 01:26:41 +01002413 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07002414 uint32_t c1,
2415 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002416 String* symbol;
2417 // Numeric strings have a different hash algorithm not known by
2418 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2419 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
Steve Block44f0eee2011-05-26 01:26:41 +01002420 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002421 return symbol;
2422 // Now we know the length is 2, we might as well make use of that fact
2423 // when building the new string.
2424 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2425 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002426 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002427 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
John Reck59135872010-11-02 12:39:01 -07002428 if (!maybe_result->ToObject(&result)) return maybe_result;
2429 }
Steve Blockd0582a62009-12-15 09:54:21 +00002430 char* dest = SeqAsciiString::cast(result)->GetChars();
2431 dest[0] = c1;
2432 dest[1] = c2;
2433 return result;
2434 } else {
John Reck59135872010-11-02 12:39:01 -07002435 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002436 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
John Reck59135872010-11-02 12:39:01 -07002437 if (!maybe_result->ToObject(&result)) return maybe_result;
2438 }
Steve Blockd0582a62009-12-15 09:54:21 +00002439 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2440 dest[0] = c1;
2441 dest[1] = c2;
2442 return result;
2443 }
2444}
2445
2446
John Reck59135872010-11-02 12:39:01 -07002447MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002448 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002449 if (first_length == 0) {
2450 return second;
2451 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002452
2453 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002454 if (second_length == 0) {
2455 return first;
2456 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002457
2458 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002459
2460 // Optimization for 2-byte strings often used as keys in a decompression
2461 // dictionary. Check whether we already have the string in the symbol
2462 // table to prevent creation of many unneccesary strings.
2463 if (length == 2) {
2464 unsigned c1 = first->Get(0);
2465 unsigned c2 = second->Get(0);
Steve Block44f0eee2011-05-26 01:26:41 +01002466 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blockd0582a62009-12-15 09:54:21 +00002467 }
2468
Steve Block6ded16b2010-05-10 14:33:55 +01002469 bool first_is_ascii = first->IsAsciiRepresentation();
2470 bool second_is_ascii = second->IsAsciiRepresentation();
2471 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002472
2473 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002474 // of the new cons string is too large.
2475 if (length > String::kMaxLength || length < 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01002476 isolate()->context()->mark_out_of_memory();
Steve Blocka7e24c12009-10-30 11:49:00 +00002477 return Failure::OutOfMemoryException();
2478 }
2479
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002480 bool is_ascii_data_in_two_byte_string = false;
2481 if (!is_ascii) {
2482 // At least one of the strings uses two-byte representation so we
2483 // can't use the fast case code for short ascii strings below, but
2484 // we can try to save memory if all chars actually fit in ascii.
2485 is_ascii_data_in_two_byte_string =
2486 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2487 if (is_ascii_data_in_two_byte_string) {
Steve Block44f0eee2011-05-26 01:26:41 +01002488 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002489 }
2490 }
2491
Steve Blocka7e24c12009-10-30 11:49:00 +00002492 // If the resulting string is small make a flat string.
2493 if (length < String::kMinNonFlatLength) {
2494 ASSERT(first->IsFlat());
2495 ASSERT(second->IsFlat());
2496 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002497 Object* result;
2498 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2499 if (!maybe_result->ToObject(&result)) return maybe_result;
2500 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002501 // Copy the characters into the new object.
2502 char* dest = SeqAsciiString::cast(result)->GetChars();
2503 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002504 const char* src;
2505 if (first->IsExternalString()) {
2506 src = ExternalAsciiString::cast(first)->resource()->data();
2507 } else {
2508 src = SeqAsciiString::cast(first)->GetChars();
2509 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002510 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2511 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002512 if (second->IsExternalString()) {
2513 src = ExternalAsciiString::cast(second)->resource()->data();
2514 } else {
2515 src = SeqAsciiString::cast(second)->GetChars();
2516 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002517 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2518 return result;
2519 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002520 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002521 Object* result;
2522 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2523 if (!maybe_result->ToObject(&result)) return maybe_result;
2524 }
Steve Block6ded16b2010-05-10 14:33:55 +01002525 // Copy the characters into the new object.
2526 char* dest = SeqAsciiString::cast(result)->GetChars();
2527 String::WriteToFlat(first, dest, 0, first_length);
2528 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block44f0eee2011-05-26 01:26:41 +01002529 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002530 return result;
2531 }
2532
John Reck59135872010-11-02 12:39:01 -07002533 Object* result;
2534 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2535 if (!maybe_result->ToObject(&result)) return maybe_result;
2536 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002537 // Copy the characters into the new object.
2538 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2539 String::WriteToFlat(first, dest, 0, first_length);
2540 String::WriteToFlat(second, dest + first_length, 0, second_length);
2541 return result;
2542 }
2543 }
2544
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002545 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2546 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002547
John Reck59135872010-11-02 12:39:01 -07002548 Object* result;
2549 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2550 if (!maybe_result->ToObject(&result)) return maybe_result;
2551 }
Leon Clarke4515c472010-02-03 11:58:03 +00002552
2553 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002554 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002555 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002556 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002557 cons_string->set_hash_field(String::kEmptyHashField);
2558 cons_string->set_first(first, mode);
2559 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002560 return result;
2561}
2562
2563
John Reck59135872010-11-02 12:39:01 -07002564MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002565 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002566 int end,
2567 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002568 int length = end - start;
2569
2570 if (length == 1) {
Steve Block44f0eee2011-05-26 01:26:41 +01002571 return LookupSingleCharacterStringFromCode(buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002572 } else if (length == 2) {
2573 // Optimization for 2-byte strings often used as keys in a decompression
2574 // dictionary. Check whether we already have the string in the symbol
2575 // table to prevent creation of many unneccesary strings.
2576 unsigned c1 = buffer->Get(start);
2577 unsigned c2 = buffer->Get(start + 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002578 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002579 }
2580
2581 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002582 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002583
John Reck59135872010-11-02 12:39:01 -07002584 Object* result;
2585 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2586 ? AllocateRawAsciiString(length, pretenure )
2587 : AllocateRawTwoByteString(length, pretenure);
2588 if (!maybe_result->ToObject(&result)) return maybe_result;
2589 }
Steve Blockd0582a62009-12-15 09:54:21 +00002590 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002591 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002592 if (buffer->IsAsciiRepresentation()) {
2593 ASSERT(string_result->IsAsciiRepresentation());
2594 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2595 String::WriteToFlat(buffer, dest, start, end);
2596 } else {
2597 ASSERT(string_result->IsTwoByteRepresentation());
2598 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2599 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002600 }
Steve Blockd0582a62009-12-15 09:54:21 +00002601
Steve Blocka7e24c12009-10-30 11:49:00 +00002602 return result;
2603}
2604
2605
John Reck59135872010-11-02 12:39:01 -07002606MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002607 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002608 size_t length = resource->length();
2609 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002610 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002611 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002612 }
2613
Steve Blockd0582a62009-12-15 09:54:21 +00002614 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002615 Object* result;
2616 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2617 if (!maybe_result->ToObject(&result)) return maybe_result;
2618 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002619
2620 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002621 external_string->set_length(static_cast<int>(length));
2622 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002623 external_string->set_resource(resource);
2624
2625 return result;
2626}
2627
2628
John Reck59135872010-11-02 12:39:01 -07002629MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002630 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002631 size_t length = resource->length();
2632 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002633 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002634 return Failure::OutOfMemoryException();
2635 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002636
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002637 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002638 // ASCII characters. If yes, we use a different string map.
2639 static const size_t kAsciiCheckLengthLimit = 32;
2640 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2641 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002642 Map* map = is_ascii ?
Steve Block44f0eee2011-05-26 01:26:41 +01002643 external_string_with_ascii_data_map() : external_string_map();
John Reck59135872010-11-02 12:39:01 -07002644 Object* result;
2645 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2646 if (!maybe_result->ToObject(&result)) return maybe_result;
2647 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002648
2649 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002650 external_string->set_length(static_cast<int>(length));
2651 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002652 external_string->set_resource(resource);
2653
2654 return result;
2655}
2656
2657
John Reck59135872010-11-02 12:39:01 -07002658MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002659 if (code <= String::kMaxAsciiCharCode) {
Steve Block44f0eee2011-05-26 01:26:41 +01002660 Object* value = single_character_string_cache()->get(code);
2661 if (value != undefined_value()) return value;
Steve Blocka7e24c12009-10-30 11:49:00 +00002662
2663 char buffer[1];
2664 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002665 Object* result;
2666 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002667
John Reck59135872010-11-02 12:39:01 -07002668 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002669 single_character_string_cache()->set(code, result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002670 return result;
2671 }
2672
John Reck59135872010-11-02 12:39:01 -07002673 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002674 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
John Reck59135872010-11-02 12:39:01 -07002675 if (!maybe_result->ToObject(&result)) return maybe_result;
2676 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002677 String* answer = String::cast(result);
2678 answer->Set(0, code);
2679 return answer;
2680}
2681
2682
John Reck59135872010-11-02 12:39:01 -07002683MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002684 if (length < 0 || length > ByteArray::kMaxLength) {
2685 return Failure::OutOfMemoryException();
2686 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002687 if (pretenure == NOT_TENURED) {
2688 return AllocateByteArray(length);
2689 }
2690 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002691 Object* result;
2692 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2693 ? old_data_space_->AllocateRaw(size)
2694 : lo_space_->AllocateRaw(size);
2695 if (!maybe_result->ToObject(&result)) return maybe_result;
2696 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002697
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002698 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2699 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002700 return result;
2701}
2702
2703
John Reck59135872010-11-02 12:39:01 -07002704MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002705 if (length < 0 || length > ByteArray::kMaxLength) {
2706 return Failure::OutOfMemoryException();
2707 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002708 int size = ByteArray::SizeFor(length);
2709 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002710 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002711 Object* result;
2712 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2713 if (!maybe_result->ToObject(&result)) return maybe_result;
2714 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002715
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002716 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2717 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002718 return result;
2719}
2720
2721
2722void Heap::CreateFillerObjectAt(Address addr, int size) {
2723 if (size == 0) return;
2724 HeapObject* filler = HeapObject::FromAddress(addr);
2725 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002726 filler->set_map(one_pointer_filler_map());
2727 } else if (size == 2 * kPointerSize) {
2728 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002729 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002730 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002731 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2732 }
2733}
2734
2735
John Reck59135872010-11-02 12:39:01 -07002736MaybeObject* Heap::AllocateExternalArray(int length,
2737 ExternalArrayType array_type,
2738 void* external_pointer,
2739 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002740 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002741 Object* result;
2742 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2743 space,
2744 OLD_DATA_SPACE);
2745 if (!maybe_result->ToObject(&result)) return maybe_result;
2746 }
Steve Block3ce2e202009-11-05 08:53:23 +00002747
2748 reinterpret_cast<ExternalArray*>(result)->set_map(
2749 MapForExternalArrayType(array_type));
2750 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2751 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2752 external_pointer);
2753
2754 return result;
2755}
2756
2757
John Reck59135872010-11-02 12:39:01 -07002758MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2759 Code::Flags flags,
Steve Block44f0eee2011-05-26 01:26:41 +01002760 Handle<Object> self_reference,
2761 bool immovable) {
Leon Clarkeac952652010-07-15 11:15:24 +01002762 // Allocate ByteArray before the Code object, so that we do not risk
2763 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002764 Object* reloc_info;
2765 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2766 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2767 }
Leon Clarkeac952652010-07-15 11:15:24 +01002768
Steve Block44f0eee2011-05-26 01:26:41 +01002769 // Compute size.
Leon Clarkeac952652010-07-15 11:15:24 +01002770 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002771 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002772 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002773 MaybeObject* maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002774 // Large code objects and code objects which should stay at a fixed address
2775 // are allocated in large object space.
2776 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
John Reck59135872010-11-02 12:39:01 -07002777 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002778 } else {
John Reck59135872010-11-02 12:39:01 -07002779 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002780 }
2781
John Reck59135872010-11-02 12:39:01 -07002782 Object* result;
2783 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002784
2785 // Initialize the object
2786 HeapObject::cast(result)->set_map(code_map());
2787 Code* code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002788 ASSERT(!isolate_->code_range()->exists() ||
2789 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002790 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002791 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002792 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002793 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2794 code->set_check_type(RECEIVER_MAP_CHECK);
2795 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002796 code->set_deoptimization_data(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002797 // Allow self references to created code object by patching the handle to
2798 // point to the newly allocated Code object.
2799 if (!self_reference.is_null()) {
2800 *(self_reference.location()) = code;
2801 }
2802 // Migrate generated code.
2803 // The generated code can contain Object** values (typically from handles)
2804 // that are dereferenced during the copy to point directly to the actual heap
2805 // objects. These pointers can include references to the code object itself,
2806 // through the self_reference parameter.
2807 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002808
2809#ifdef DEBUG
2810 code->Verify();
2811#endif
2812 return code;
2813}
2814
2815
John Reck59135872010-11-02 12:39:01 -07002816MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002817 // Allocate an object the same size as the code object.
2818 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002819 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002820 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002821 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002822 } else {
John Reck59135872010-11-02 12:39:01 -07002823 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002824 }
2825
John Reck59135872010-11-02 12:39:01 -07002826 Object* result;
2827 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002828
2829 // Copy code object.
2830 Address old_addr = code->address();
2831 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002832 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002833 // Relocate the copy.
2834 Code* new_code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002835 ASSERT(!isolate_->code_range()->exists() ||
2836 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002837 new_code->Relocate(new_addr - old_addr);
2838 return new_code;
2839}
2840
2841
John Reck59135872010-11-02 12:39:01 -07002842MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002843 // Allocate ByteArray before the Code object, so that we do not risk
2844 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002845 Object* reloc_info_array;
2846 { MaybeObject* maybe_reloc_info_array =
2847 AllocateByteArray(reloc_info.length(), TENURED);
2848 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2849 return maybe_reloc_info_array;
2850 }
2851 }
Leon Clarkeac952652010-07-15 11:15:24 +01002852
2853 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002854
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002855 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002856
2857 Address old_addr = code->address();
2858
2859 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002860 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002861
John Reck59135872010-11-02 12:39:01 -07002862 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002863 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002864 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002865 } else {
John Reck59135872010-11-02 12:39:01 -07002866 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002867 }
2868
John Reck59135872010-11-02 12:39:01 -07002869 Object* result;
2870 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002871
2872 // Copy code object.
2873 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2874
2875 // Copy header and instructions.
2876 memcpy(new_addr, old_addr, relocation_offset);
2877
Steve Block6ded16b2010-05-10 14:33:55 +01002878 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002879 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002880
Leon Clarkeac952652010-07-15 11:15:24 +01002881 // Copy patched rinfo.
2882 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002883
2884 // Relocate the copy.
Steve Block44f0eee2011-05-26 01:26:41 +01002885 ASSERT(!isolate_->code_range()->exists() ||
2886 isolate_->code_range()->contains(code->address()));
Steve Block6ded16b2010-05-10 14:33:55 +01002887 new_code->Relocate(new_addr - old_addr);
2888
2889#ifdef DEBUG
2890 code->Verify();
2891#endif
2892 return new_code;
2893}
2894
2895
John Reck59135872010-11-02 12:39:01 -07002896MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002897 ASSERT(gc_state_ == NOT_IN_GC);
2898 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002899 // If allocation failures are disallowed, we may allocate in a different
2900 // space when new space is full and the object is not a large object.
2901 AllocationSpace retry_space =
2902 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002903 Object* result;
2904 { MaybeObject* maybe_result =
2905 AllocateRaw(map->instance_size(), space, retry_space);
2906 if (!maybe_result->ToObject(&result)) return maybe_result;
2907 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002908 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002909#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01002910 isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
Steve Block3ce2e202009-11-05 08:53:23 +00002911#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002912 return result;
2913}
2914
2915
John Reck59135872010-11-02 12:39:01 -07002916MaybeObject* Heap::InitializeFunction(JSFunction* function,
2917 SharedFunctionInfo* shared,
2918 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002919 ASSERT(!prototype->IsMap());
2920 function->initialize_properties();
2921 function->initialize_elements();
2922 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002923 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002924 function->set_prototype_or_initial_map(prototype);
2925 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002926 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002927 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002928 return function;
2929}
2930
2931
John Reck59135872010-11-02 12:39:01 -07002932MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002933 // Allocate the prototype. Make sure to use the object function
2934 // from the function's context, since the function can be from a
2935 // different context.
2936 JSFunction* object_function =
2937 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002938 Object* prototype;
2939 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2940 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2941 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002942 // When creating the prototype for the function we must set its
2943 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002944 Object* result;
2945 { MaybeObject* maybe_result =
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002946 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
2947 constructor_symbol(), function, DONT_ENUM);
John Reck59135872010-11-02 12:39:01 -07002948 if (!maybe_result->ToObject(&result)) return maybe_result;
2949 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002950 return prototype;
2951}
2952
2953
John Reck59135872010-11-02 12:39:01 -07002954MaybeObject* Heap::AllocateFunction(Map* function_map,
2955 SharedFunctionInfo* shared,
2956 Object* prototype,
2957 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002958 AllocationSpace space =
2959 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002960 Object* result;
2961 { MaybeObject* maybe_result = Allocate(function_map, space);
2962 if (!maybe_result->ToObject(&result)) return maybe_result;
2963 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002964 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2965}
2966
2967
John Reck59135872010-11-02 12:39:01 -07002968MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002969 // To get fast allocation and map sharing for arguments objects we
2970 // allocate them based on an arguments boilerplate.
2971
Steve Block44f0eee2011-05-26 01:26:41 +01002972 JSObject* boilerplate;
2973 int arguments_object_size;
2974 bool strict_mode_callee = callee->IsJSFunction() &&
2975 JSFunction::cast(callee)->shared()->strict_mode();
2976 if (strict_mode_callee) {
2977 boilerplate =
2978 isolate()->context()->global_context()->
2979 strict_mode_arguments_boilerplate();
2980 arguments_object_size = kArgumentsObjectSizeStrict;
2981 } else {
2982 boilerplate =
2983 isolate()->context()->global_context()->arguments_boilerplate();
2984 arguments_object_size = kArgumentsObjectSize;
2985 }
2986
Steve Blocka7e24c12009-10-30 11:49:00 +00002987 // This calls Copy directly rather than using Heap::AllocateRaw so we
2988 // duplicate the check here.
2989 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2990
Leon Clarkee46be812010-01-19 14:06:41 +00002991 // Check that the size of the boilerplate matches our
2992 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2993 // on the size being a known constant.
Steve Block44f0eee2011-05-26 01:26:41 +01002994 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
Leon Clarkee46be812010-01-19 14:06:41 +00002995
2996 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002997 Object* result;
2998 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01002999 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
John Reck59135872010-11-02 12:39:01 -07003000 if (!maybe_result->ToObject(&result)) return maybe_result;
3001 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003002
3003 // Copy the content. The arguments boilerplate doesn't have any
3004 // fields that point to new space so it's safe to skip the write
3005 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003006 CopyBlock(HeapObject::cast(result)->address(),
3007 boilerplate->address(),
Steve Block44f0eee2011-05-26 01:26:41 +01003008 JSObject::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003009
Steve Block44f0eee2011-05-26 01:26:41 +01003010 // Set the length property.
3011 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Steve Blocka7e24c12009-10-30 11:49:00 +00003012 Smi::FromInt(length),
3013 SKIP_WRITE_BARRIER);
Steve Block44f0eee2011-05-26 01:26:41 +01003014 // Set the callee property for non-strict mode arguments object only.
3015 if (!strict_mode_callee) {
3016 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3017 callee);
3018 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003019
3020 // Check the state of the object
3021 ASSERT(JSObject::cast(result)->HasFastProperties());
3022 ASSERT(JSObject::cast(result)->HasFastElements());
3023
3024 return result;
3025}
3026
3027
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003028static bool HasDuplicates(DescriptorArray* descriptors) {
3029 int count = descriptors->number_of_descriptors();
3030 if (count > 1) {
3031 String* prev_key = descriptors->GetKey(0);
3032 for (int i = 1; i != count; i++) {
3033 String* current_key = descriptors->GetKey(i);
3034 if (prev_key == current_key) return true;
3035 prev_key = current_key;
3036 }
3037 }
3038 return false;
3039}
3040
3041
John Reck59135872010-11-02 12:39:01 -07003042MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003043 ASSERT(!fun->has_initial_map());
3044
3045 // First create a new map with the size and number of in-object properties
3046 // suggested by the function.
3047 int instance_size = fun->shared()->CalculateInstanceSize();
3048 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07003049 Object* map_obj;
Steve Block44f0eee2011-05-26 01:26:41 +01003050 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
John Reck59135872010-11-02 12:39:01 -07003051 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3052 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003053
3054 // Fetch or allocate prototype.
3055 Object* prototype;
3056 if (fun->has_instance_prototype()) {
3057 prototype = fun->instance_prototype();
3058 } else {
John Reck59135872010-11-02 12:39:01 -07003059 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3060 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3061 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003062 }
3063 Map* map = Map::cast(map_obj);
3064 map->set_inobject_properties(in_object_properties);
3065 map->set_unused_property_fields(in_object_properties);
3066 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01003067 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003068
Andrei Popescu402d9372010-02-26 13:31:12 +00003069 // If the function has only simple this property assignments add
3070 // field descriptors for these to the initial map as the object
3071 // cannot be constructed without having these properties. Guard by
3072 // the inline_new flag so we only change the map if we generate a
3073 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00003074 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00003075 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003076 int count = fun->shared()->this_property_assignments_count();
3077 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003078 // Inline constructor can only handle inobject properties.
3079 fun->shared()->ForbidInlineConstructor();
3080 } else {
John Reck59135872010-11-02 12:39:01 -07003081 Object* descriptors_obj;
3082 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3083 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3084 return maybe_descriptors_obj;
3085 }
3086 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003087 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3088 for (int i = 0; i < count; i++) {
3089 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3090 ASSERT(name->IsSymbol());
3091 FieldDescriptor field(name, i, NONE);
3092 field.SetEnumerationIndex(i);
3093 descriptors->Set(i, &field);
3094 }
3095 descriptors->SetNextEnumerationIndex(count);
3096 descriptors->SortUnchecked();
3097
3098 // The descriptors may contain duplicates because the compiler does not
3099 // guarantee the uniqueness of property names (it would have required
3100 // quadratic time). Once the descriptors are sorted we can check for
3101 // duplicates in linear time.
3102 if (HasDuplicates(descriptors)) {
3103 fun->shared()->ForbidInlineConstructor();
3104 } else {
3105 map->set_instance_descriptors(descriptors);
3106 map->set_pre_allocated_property_fields(count);
3107 map->set_unused_property_fields(in_object_properties - count);
3108 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003109 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003110 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003111
3112 fun->shared()->StartInobjectSlackTracking(map);
3113
Steve Blocka7e24c12009-10-30 11:49:00 +00003114 return map;
3115}
3116
3117
3118void Heap::InitializeJSObjectFromMap(JSObject* obj,
3119 FixedArray* properties,
3120 Map* map) {
3121 obj->set_properties(properties);
3122 obj->initialize_elements();
3123 // TODO(1240798): Initialize the object's body using valid initial values
3124 // according to the object's initial map. For example, if the map's
3125 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3126 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3127 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3128 // verification code has to cope with (temporarily) invalid objects. See
3129 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003130 Object* filler;
3131 // We cannot always fill with one_pointer_filler_map because objects
3132 // created from API functions expect their internal fields to be initialized
3133 // with undefined_value.
3134 if (map->constructor()->IsJSFunction() &&
3135 JSFunction::cast(map->constructor())->shared()->
3136 IsInobjectSlackTrackingInProgress()) {
3137 // We might want to shrink the object later.
3138 ASSERT(obj->GetInternalFieldCount() == 0);
3139 filler = Heap::one_pointer_filler_map();
3140 } else {
3141 filler = Heap::undefined_value();
3142 }
3143 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003144}
3145
3146
John Reck59135872010-11-02 12:39:01 -07003147MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003148 // JSFunctions should be allocated using AllocateFunction to be
3149 // properly initialized.
3150 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3151
Steve Block8defd9f2010-07-08 12:39:36 +01003152 // Both types of global objects should be allocated using
3153 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003154 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3155 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3156
3157 // Allocate the backing storage for the properties.
3158 int prop_size =
3159 map->pre_allocated_property_fields() +
3160 map->unused_property_fields() -
3161 map->inobject_properties();
3162 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003163 Object* properties;
3164 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3165 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3166 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003167
3168 // Allocate the JSObject.
3169 AllocationSpace space =
3170 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3171 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003172 Object* obj;
3173 { MaybeObject* maybe_obj = Allocate(map, space);
3174 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3175 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003176
3177 // Initialize the JSObject.
3178 InitializeJSObjectFromMap(JSObject::cast(obj),
3179 FixedArray::cast(properties),
3180 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003181 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003182 return obj;
3183}
3184
3185
John Reck59135872010-11-02 12:39:01 -07003186MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3187 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003188 // Allocate the initial map if absent.
3189 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003190 Object* initial_map;
3191 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3192 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3193 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003194 constructor->set_initial_map(Map::cast(initial_map));
3195 Map::cast(initial_map)->set_constructor(constructor);
3196 }
3197 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003198 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003199 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003200#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003201 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003202 Object* non_failure;
3203 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3204#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003205 return result;
3206}
3207
3208
John Reck59135872010-11-02 12:39:01 -07003209MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003210 ASSERT(constructor->has_initial_map());
3211 Map* map = constructor->initial_map();
3212
3213 // Make sure no field properties are described in the initial map.
3214 // This guarantees us that normalizing the properties does not
3215 // require us to change property values to JSGlobalPropertyCells.
3216 ASSERT(map->NextFreePropertyIndex() == 0);
3217
3218 // Make sure we don't have a ton of pre-allocated slots in the
3219 // global objects. They will be unused once we normalize the object.
3220 ASSERT(map->unused_property_fields() == 0);
3221 ASSERT(map->inobject_properties() == 0);
3222
3223 // Initial size of the backing store to avoid resize of the storage during
3224 // bootstrapping. The size differs between the JS global object ad the
3225 // builtins object.
3226 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3227
3228 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003229 Object* obj;
3230 { MaybeObject* maybe_obj =
3231 StringDictionary::Allocate(
3232 map->NumberOfDescribedProperties() * 2 + initial_size);
3233 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3234 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003235 StringDictionary* dictionary = StringDictionary::cast(obj);
3236
3237 // The global object might be created from an object template with accessors.
3238 // Fill these accessors into the dictionary.
3239 DescriptorArray* descs = map->instance_descriptors();
3240 for (int i = 0; i < descs->number_of_descriptors(); i++) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003241 PropertyDetails details(descs->GetDetails(i));
Steve Blocka7e24c12009-10-30 11:49:00 +00003242 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3243 PropertyDetails d =
3244 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3245 Object* value = descs->GetCallbacksObject(i);
Steve Block44f0eee2011-05-26 01:26:41 +01003246 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
John Reck59135872010-11-02 12:39:01 -07003247 if (!maybe_value->ToObject(&value)) return maybe_value;
3248 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003249
John Reck59135872010-11-02 12:39:01 -07003250 Object* result;
3251 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3252 if (!maybe_result->ToObject(&result)) return maybe_result;
3253 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003254 dictionary = StringDictionary::cast(result);
3255 }
3256
3257 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003258 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3259 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3260 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003261 JSObject* global = JSObject::cast(obj);
3262 InitializeJSObjectFromMap(global, dictionary, map);
3263
3264 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003265 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3266 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3267 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003268 Map* new_map = Map::cast(obj);
3269
3270 // Setup the global object as a normalized object.
3271 global->set_map(new_map);
Steve Block44f0eee2011-05-26 01:26:41 +01003272 global->map()->set_instance_descriptors(empty_descriptor_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00003273 global->set_properties(dictionary);
3274
3275 // Make sure result is a global object with properties in dictionary.
3276 ASSERT(global->IsGlobalObject());
3277 ASSERT(!global->HasFastProperties());
3278 return global;
3279}
3280
3281
John Reck59135872010-11-02 12:39:01 -07003282MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003283 // Never used to copy functions. If functions need to be copied we
3284 // have to be careful to clear the literals array.
3285 ASSERT(!source->IsJSFunction());
3286
3287 // Make the clone.
3288 Map* map = source->map();
3289 int object_size = map->instance_size();
3290 Object* clone;
3291
3292 // If we're forced to always allocate, we use the general allocation
3293 // functions which may leave us with an object in old space.
3294 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003295 { MaybeObject* maybe_clone =
3296 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3297 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3298 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003299 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003300 CopyBlock(clone_address,
3301 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003302 object_size);
3303 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003304 RecordWrites(clone_address,
3305 JSObject::kHeaderSize,
3306 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003307 } else {
John Reck59135872010-11-02 12:39:01 -07003308 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3309 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3310 }
Steve Block44f0eee2011-05-26 01:26:41 +01003311 ASSERT(InNewSpace(clone));
Steve Blocka7e24c12009-10-30 11:49:00 +00003312 // Since we know the clone is allocated in new space, we can copy
3313 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003314 CopyBlock(HeapObject::cast(clone)->address(),
3315 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003316 object_size);
3317 }
3318
3319 FixedArray* elements = FixedArray::cast(source->elements());
3320 FixedArray* properties = FixedArray::cast(source->properties());
3321 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003322 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003323 Object* elem;
3324 { MaybeObject* maybe_elem =
3325 (elements->map() == fixed_cow_array_map()) ?
3326 elements : CopyFixedArray(elements);
3327 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3328 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003329 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3330 }
3331 // Update properties if necessary.
3332 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003333 Object* prop;
3334 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3335 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3336 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003337 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3338 }
3339 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003340#ifdef ENABLE_LOGGING_AND_PROFILING
Steve Block44f0eee2011-05-26 01:26:41 +01003341 isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
Steve Block3ce2e202009-11-05 08:53:23 +00003342#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003343 return clone;
3344}
3345
3346
John Reck59135872010-11-02 12:39:01 -07003347MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3348 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003349 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003350 Map* map = constructor->initial_map();
3351
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003352 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003353 // objects allocated using the constructor.
3354 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003355 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003356
3357 // Allocate the backing storage for the properties.
3358 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003359 Object* properties;
3360 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3361 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3362 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003363
3364 // Reset the map for the object.
3365 object->set_map(constructor->initial_map());
3366
3367 // Reinitialize the object from the constructor map.
3368 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3369 return object;
3370}
3371
3372
John Reck59135872010-11-02 12:39:01 -07003373MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3374 PretenureFlag pretenure) {
3375 Object* result;
3376 { MaybeObject* maybe_result =
3377 AllocateRawAsciiString(string.length(), pretenure);
3378 if (!maybe_result->ToObject(&result)) return maybe_result;
3379 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003380
3381 // Copy the characters into the new object.
3382 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3383 for (int i = 0; i < string.length(); i++) {
3384 string_result->SeqAsciiStringSet(i, string[i]);
3385 }
3386 return result;
3387}
3388
3389
Steve Block9fac8402011-05-12 15:51:54 +01003390MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3391 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003392 // V8 only supports characters in the Basic Multilingual Plane.
3393 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003394 // Count the number of characters in the UTF-8 string and check if
3395 // it is an ASCII string.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003396 Access<UnicodeCache::Utf8Decoder>
3397 decoder(isolate_->unicode_cache()->utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003398 decoder->Reset(string.start(), string.length());
3399 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003400 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003401 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003402 chars++;
3403 }
3404
John Reck59135872010-11-02 12:39:01 -07003405 Object* result;
3406 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3407 if (!maybe_result->ToObject(&result)) return maybe_result;
3408 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003409
3410 // Convert and copy the characters into the new object.
3411 String* string_result = String::cast(result);
3412 decoder->Reset(string.start(), string.length());
3413 for (int i = 0; i < chars; i++) {
3414 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003415 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003416 string_result->Set(i, r);
3417 }
3418 return result;
3419}
3420
3421
John Reck59135872010-11-02 12:39:01 -07003422MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3423 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003424 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003425 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003426 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003427 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003428 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003429 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003430 }
John Reck59135872010-11-02 12:39:01 -07003431 Object* result;
3432 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003433
3434 // Copy the characters into the new object, which may be either ASCII or
3435 // UTF-16.
3436 String* string_result = String::cast(result);
3437 for (int i = 0; i < string.length(); i++) {
3438 string_result->Set(i, string[i]);
3439 }
3440 return result;
3441}
3442
3443
3444Map* Heap::SymbolMapForString(String* string) {
3445 // If the string is in new space it cannot be used as a symbol.
3446 if (InNewSpace(string)) return NULL;
3447
3448 // Find the corresponding symbol map for strings.
3449 Map* map = string->map();
Steve Block44f0eee2011-05-26 01:26:41 +01003450 if (map == ascii_string_map()) {
3451 return ascii_symbol_map();
3452 }
3453 if (map == string_map()) {
3454 return symbol_map();
3455 }
3456 if (map == cons_string_map()) {
3457 return cons_symbol_map();
3458 }
3459 if (map == cons_ascii_string_map()) {
3460 return cons_ascii_symbol_map();
3461 }
3462 if (map == external_string_map()) {
3463 return external_symbol_map();
3464 }
3465 if (map == external_ascii_string_map()) {
3466 return external_ascii_symbol_map();
3467 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003468 if (map == external_string_with_ascii_data_map()) {
3469 return external_symbol_with_ascii_data_map();
3470 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003471
3472 // No match found.
3473 return NULL;
3474}
3475
3476
John Reck59135872010-11-02 12:39:01 -07003477MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3478 int chars,
3479 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003480 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003481 // Ensure the chars matches the number of characters in the buffer.
3482 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3483 // Determine whether the string is ascii.
3484 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003485 while (buffer->has_more()) {
3486 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3487 is_ascii = false;
3488 break;
3489 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003490 }
3491 buffer->Rewind();
3492
3493 // Compute map and object size.
3494 int size;
3495 Map* map;
3496
3497 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003498 if (chars > SeqAsciiString::kMaxLength) {
3499 return Failure::OutOfMemoryException();
3500 }
Steve Blockd0582a62009-12-15 09:54:21 +00003501 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003502 size = SeqAsciiString::SizeFor(chars);
3503 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003504 if (chars > SeqTwoByteString::kMaxLength) {
3505 return Failure::OutOfMemoryException();
3506 }
Steve Blockd0582a62009-12-15 09:54:21 +00003507 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003508 size = SeqTwoByteString::SizeFor(chars);
3509 }
3510
3511 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003512 Object* result;
3513 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3514 ? lo_space_->AllocateRaw(size)
3515 : old_data_space_->AllocateRaw(size);
3516 if (!maybe_result->ToObject(&result)) return maybe_result;
3517 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003518
3519 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003520 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003521 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003522 answer->set_length(chars);
3523 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003524
3525 ASSERT_EQ(size, answer->Size());
3526
3527 // Fill in the characters.
3528 for (int i = 0; i < chars; i++) {
3529 answer->Set(i, buffer->GetNext());
3530 }
3531 return answer;
3532}
3533
3534
John Reck59135872010-11-02 12:39:01 -07003535MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003536 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3537 return Failure::OutOfMemoryException();
3538 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003539
3540 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003541 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003542
Leon Clarkee46be812010-01-19 14:06:41 +00003543 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3544 AllocationSpace retry_space = OLD_DATA_SPACE;
3545
Steve Blocka7e24c12009-10-30 11:49:00 +00003546 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003547 if (size > kMaxObjectSizeInNewSpace) {
3548 // Allocate in large object space, retry space will be ignored.
3549 space = LO_SPACE;
3550 } else if (size > MaxObjectSizeInPagedSpace()) {
3551 // Allocate in new space, retry in large object space.
3552 retry_space = LO_SPACE;
3553 }
3554 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3555 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003556 }
John Reck59135872010-11-02 12:39:01 -07003557 Object* result;
3558 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3559 if (!maybe_result->ToObject(&result)) return maybe_result;
3560 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003561
Steve Blocka7e24c12009-10-30 11:49:00 +00003562 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003563 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003564 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003565 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003566 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3567 return result;
3568}
3569
3570
John Reck59135872010-11-02 12:39:01 -07003571MaybeObject* Heap::AllocateRawTwoByteString(int length,
3572 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003573 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3574 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003575 }
Leon Clarkee46be812010-01-19 14:06:41 +00003576 int size = SeqTwoByteString::SizeFor(length);
3577 ASSERT(size <= SeqTwoByteString::kMaxSize);
3578 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3579 AllocationSpace retry_space = OLD_DATA_SPACE;
3580
3581 if (space == NEW_SPACE) {
3582 if (size > kMaxObjectSizeInNewSpace) {
3583 // Allocate in large object space, retry space will be ignored.
3584 space = LO_SPACE;
3585 } else if (size > MaxObjectSizeInPagedSpace()) {
3586 // Allocate in new space, retry in large object space.
3587 retry_space = LO_SPACE;
3588 }
3589 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3590 space = LO_SPACE;
3591 }
John Reck59135872010-11-02 12:39:01 -07003592 Object* result;
3593 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3594 if (!maybe_result->ToObject(&result)) return maybe_result;
3595 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003596
Steve Blocka7e24c12009-10-30 11:49:00 +00003597 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003598 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003599 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003600 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003601 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3602 return result;
3603}
3604
3605
John Reck59135872010-11-02 12:39:01 -07003606MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003607 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003608 Object* result;
3609 { MaybeObject* maybe_result =
3610 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3611 if (!maybe_result->ToObject(&result)) return maybe_result;
3612 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003613 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003614 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3615 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003616 return result;
3617}
3618
3619
John Reck59135872010-11-02 12:39:01 -07003620MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003621 if (length < 0 || length > FixedArray::kMaxLength) {
3622 return Failure::OutOfMemoryException();
3623 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003624 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003625 // Use the general function if we're forced to always allocate.
3626 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3627 // Allocate the raw data for a fixed array.
3628 int size = FixedArray::SizeFor(length);
3629 return size <= kMaxObjectSizeInNewSpace
3630 ? new_space_.AllocateRaw(size)
3631 : lo_space_->AllocateRawFixedArray(size);
3632}
3633
3634
John Reck59135872010-11-02 12:39:01 -07003635MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003636 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003637 Object* obj;
3638 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3639 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3640 }
Steve Block44f0eee2011-05-26 01:26:41 +01003641 if (InNewSpace(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003642 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003643 dst->set_map(map);
3644 CopyBlock(dst->address() + kPointerSize,
3645 src->address() + kPointerSize,
3646 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003647 return obj;
3648 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003649 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003650 FixedArray* result = FixedArray::cast(obj);
3651 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003652
Steve Blocka7e24c12009-10-30 11:49:00 +00003653 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003654 AssertNoAllocation no_gc;
3655 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003656 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3657 return result;
3658}
3659
3660
John Reck59135872010-11-02 12:39:01 -07003661MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003662 ASSERT(length >= 0);
3663 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003664 Object* result;
3665 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3666 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003667 }
John Reck59135872010-11-02 12:39:01 -07003668 // Initialize header.
3669 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3670 array->set_map(fixed_array_map());
3671 array->set_length(length);
3672 // Initialize body.
Steve Block44f0eee2011-05-26 01:26:41 +01003673 ASSERT(!InNewSpace(undefined_value()));
John Reck59135872010-11-02 12:39:01 -07003674 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003675 return result;
3676}
3677
3678
John Reck59135872010-11-02 12:39:01 -07003679MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003680 if (length < 0 || length > FixedArray::kMaxLength) {
3681 return Failure::OutOfMemoryException();
3682 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003683
Leon Clarkee46be812010-01-19 14:06:41 +00003684 AllocationSpace space =
3685 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003686 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003687 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3688 // Too big for new space.
3689 space = LO_SPACE;
3690 } else if (space == OLD_POINTER_SPACE &&
3691 size > MaxObjectSizeInPagedSpace()) {
3692 // Too big for old pointer space.
3693 space = LO_SPACE;
3694 }
3695
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003696 AllocationSpace retry_space =
3697 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3698
3699 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003700}
3701
3702
John Reck59135872010-11-02 12:39:01 -07003703MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
Steve Block44f0eee2011-05-26 01:26:41 +01003704 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07003705 int length,
3706 PretenureFlag pretenure,
3707 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003708 ASSERT(length >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003709 ASSERT(heap->empty_fixed_array()->IsFixedArray());
3710 if (length == 0) return heap->empty_fixed_array();
Steve Block6ded16b2010-05-10 14:33:55 +01003711
Steve Block44f0eee2011-05-26 01:26:41 +01003712 ASSERT(!heap->InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003713 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003714 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003715 if (!maybe_result->ToObject(&result)) return maybe_result;
3716 }
Steve Block6ded16b2010-05-10 14:33:55 +01003717
Steve Block44f0eee2011-05-26 01:26:41 +01003718 HeapObject::cast(result)->set_map(heap->fixed_array_map());
Steve Block6ded16b2010-05-10 14:33:55 +01003719 FixedArray* array = FixedArray::cast(result);
3720 array->set_length(length);
3721 MemsetPointer(array->data_start(), filler, length);
3722 return array;
3723}
3724
3725
John Reck59135872010-11-02 12:39:01 -07003726MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003727 return AllocateFixedArrayWithFiller(this,
3728 length,
3729 pretenure,
3730 undefined_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003731}
3732
3733
John Reck59135872010-11-02 12:39:01 -07003734MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3735 PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003736 return AllocateFixedArrayWithFiller(this,
3737 length,
3738 pretenure,
3739 the_hole_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003740}
3741
3742
John Reck59135872010-11-02 12:39:01 -07003743MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003744 if (length == 0) return empty_fixed_array();
3745
John Reck59135872010-11-02 12:39:01 -07003746 Object* obj;
3747 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3748 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3749 }
Steve Block6ded16b2010-05-10 14:33:55 +01003750
3751 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3752 FixedArray::cast(obj)->set_length(length);
3753 return obj;
3754}
3755
3756
John Reck59135872010-11-02 12:39:01 -07003757MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3758 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003759 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003760 if (!maybe_result->ToObject(&result)) return maybe_result;
3761 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003762 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003763 ASSERT(result->IsHashTable());
3764 return result;
3765}
3766
3767
John Reck59135872010-11-02 12:39:01 -07003768MaybeObject* Heap::AllocateGlobalContext() {
3769 Object* result;
3770 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003771 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003772 if (!maybe_result->ToObject(&result)) return maybe_result;
3773 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003774 Context* context = reinterpret_cast<Context*>(result);
3775 context->set_map(global_context_map());
3776 ASSERT(context->IsGlobalContext());
3777 ASSERT(result->IsContext());
3778 return result;
3779}
3780
3781
John Reck59135872010-11-02 12:39:01 -07003782MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003783 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003784 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003785 { MaybeObject* maybe_result = AllocateFixedArray(length);
John Reck59135872010-11-02 12:39:01 -07003786 if (!maybe_result->ToObject(&result)) return maybe_result;
3787 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003788 Context* context = reinterpret_cast<Context*>(result);
3789 context->set_map(context_map());
3790 context->set_closure(function);
3791 context->set_fcontext(context);
3792 context->set_previous(NULL);
3793 context->set_extension(NULL);
3794 context->set_global(function->context()->global());
3795 ASSERT(!context->IsGlobalContext());
3796 ASSERT(context->is_function_context());
3797 ASSERT(result->IsContext());
3798 return result;
3799}
3800
3801
John Reck59135872010-11-02 12:39:01 -07003802MaybeObject* Heap::AllocateWithContext(Context* previous,
3803 JSObject* extension,
3804 bool is_catch_context) {
3805 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003806 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003807 if (!maybe_result->ToObject(&result)) return maybe_result;
3808 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003809 Context* context = reinterpret_cast<Context*>(result);
Steve Block44f0eee2011-05-26 01:26:41 +01003810 context->set_map(is_catch_context ? catch_context_map() :
3811 context_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003812 context->set_closure(previous->closure());
3813 context->set_fcontext(previous->fcontext());
3814 context->set_previous(previous);
3815 context->set_extension(extension);
3816 context->set_global(previous->global());
3817 ASSERT(!context->IsGlobalContext());
3818 ASSERT(!context->is_function_context());
3819 ASSERT(result->IsContext());
3820 return result;
3821}
3822
3823
John Reck59135872010-11-02 12:39:01 -07003824MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003825 Map* map;
3826 switch (type) {
Steve Block44f0eee2011-05-26 01:26:41 +01003827#define MAKE_CASE(NAME, Name, name) \
3828 case NAME##_TYPE: map = name##_map(); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00003829STRUCT_LIST(MAKE_CASE)
3830#undef MAKE_CASE
3831 default:
3832 UNREACHABLE();
3833 return Failure::InternalError();
3834 }
3835 int size = map->instance_size();
3836 AllocationSpace space =
3837 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003838 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003839 { MaybeObject* maybe_result = Allocate(map, space);
John Reck59135872010-11-02 12:39:01 -07003840 if (!maybe_result->ToObject(&result)) return maybe_result;
3841 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003842 Struct::cast(result)->InitializeBody(size);
3843 return result;
3844}
3845
3846
3847bool Heap::IdleNotification() {
3848 static const int kIdlesBeforeScavenge = 4;
3849 static const int kIdlesBeforeMarkSweep = 7;
3850 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003851 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003852 static const unsigned int kGCsBetweenCleanup = 4;
Steve Block44f0eee2011-05-26 01:26:41 +01003853
3854 if (!last_idle_notification_gc_count_init_) {
3855 last_idle_notification_gc_count_ = gc_count_;
3856 last_idle_notification_gc_count_init_ = true;
3857 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003858
Steve Block6ded16b2010-05-10 14:33:55 +01003859 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003860 bool finished = false;
3861
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003862 // Reset the number of idle notifications received when a number of
3863 // GCs have taken place. This allows another round of cleanup based
3864 // on idle notifications if enough work has been carried out to
3865 // provoke a number of garbage collections.
Steve Block44f0eee2011-05-26 01:26:41 +01003866 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
3867 number_idle_notifications_ =
3868 Min(number_idle_notifications_ + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00003869 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003870 number_idle_notifications_ = 0;
3871 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003872 }
3873
Steve Block44f0eee2011-05-26 01:26:41 +01003874 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003875 if (contexts_disposed_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01003876 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003877 CollectAllGarbage(false);
3878 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003879 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003880 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003881 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003882 last_idle_notification_gc_count_ = gc_count_;
3883 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003884 // Before doing the mark-sweep collections we clear the
3885 // compilation cache to avoid hanging on to source code and
3886 // generated code for cached functions.
Steve Block44f0eee2011-05-26 01:26:41 +01003887 isolate_->compilation_cache()->Clear();
Steve Blockd0582a62009-12-15 09:54:21 +00003888
Steve Blocka7e24c12009-10-30 11:49:00 +00003889 CollectAllGarbage(false);
3890 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003891 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003892
Steve Block44f0eee2011-05-26 01:26:41 +01003893 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003894 CollectAllGarbage(true);
3895 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01003896 last_idle_notification_gc_count_ = gc_count_;
3897 number_idle_notifications_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003898 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003899 } else if (contexts_disposed_ > 0) {
3900 if (FLAG_expose_gc) {
3901 contexts_disposed_ = 0;
3902 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003903 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01003904 CollectAllGarbage(false);
Steve Block44f0eee2011-05-26 01:26:41 +01003905 last_idle_notification_gc_count_ = gc_count_;
Steve Block6ded16b2010-05-10 14:33:55 +01003906 }
3907 // If this is the first idle notification, we reset the
3908 // notification count to avoid letting idle notifications for
3909 // context disposal garbage collections start a potentially too
3910 // aggressive idle GC cycle.
Steve Block44f0eee2011-05-26 01:26:41 +01003911 if (number_idle_notifications_ <= 1) {
3912 number_idle_notifications_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +01003913 uncommit = false;
3914 }
Steve Block44f0eee2011-05-26 01:26:41 +01003915 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003916 // If we have received more than kIdlesBeforeMarkCompact idle
3917 // notifications we do not perform any cleanup because we don't
3918 // expect to gain much by doing so.
3919 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003920 }
3921
Steve Block6ded16b2010-05-10 14:33:55 +01003922 // Make sure that we have no pending context disposals and
3923 // conditionally uncommit from space.
3924 ASSERT(contexts_disposed_ == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003925 if (uncommit) UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003926 return finished;
3927}
3928
3929
3930#ifdef DEBUG
3931
3932void Heap::Print() {
3933 if (!HasBeenSetup()) return;
Steve Block44f0eee2011-05-26 01:26:41 +01003934 isolate()->PrintStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00003935 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003936 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3937 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003938}
3939
3940
3941void Heap::ReportCodeStatistics(const char* title) {
3942 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3943 PagedSpace::ResetCodeStatistics();
3944 // We do not look for code in new space, map space, or old space. If code
3945 // somehow ends up in those spaces, we would miss it here.
3946 code_space_->CollectCodeStatistics();
3947 lo_space_->CollectCodeStatistics();
3948 PagedSpace::ReportCodeStatistics();
3949}
3950
3951
3952// This function expects that NewSpace's allocated objects histogram is
3953// populated (via a call to CollectStatistics or else as a side effect of a
3954// just-completed scavenge collection).
3955void Heap::ReportHeapStatistics(const char* title) {
3956 USE(title);
3957 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3958 title, gc_count_);
3959 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003960 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3961 old_gen_promotion_limit_);
3962 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3963 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003964
3965 PrintF("\n");
3966 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
Steve Block44f0eee2011-05-26 01:26:41 +01003967 isolate_->global_handles()->PrintStats();
Steve Blocka7e24c12009-10-30 11:49:00 +00003968 PrintF("\n");
3969
3970 PrintF("Heap statistics : ");
Steve Block44f0eee2011-05-26 01:26:41 +01003971 isolate_->memory_allocator()->ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00003972 PrintF("To space : ");
3973 new_space_.ReportStatistics();
3974 PrintF("Old pointer space : ");
3975 old_pointer_space_->ReportStatistics();
3976 PrintF("Old data space : ");
3977 old_data_space_->ReportStatistics();
3978 PrintF("Code space : ");
3979 code_space_->ReportStatistics();
3980 PrintF("Map space : ");
3981 map_space_->ReportStatistics();
3982 PrintF("Cell space : ");
3983 cell_space_->ReportStatistics();
3984 PrintF("Large object space : ");
3985 lo_space_->ReportStatistics();
3986 PrintF(">>>>>> ========================================= >>>>>>\n");
3987}
3988
3989#endif // DEBUG
3990
3991bool Heap::Contains(HeapObject* value) {
3992 return Contains(value->address());
3993}
3994
3995
3996bool Heap::Contains(Address addr) {
3997 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3998 return HasBeenSetup() &&
3999 (new_space_.ToSpaceContains(addr) ||
4000 old_pointer_space_->Contains(addr) ||
4001 old_data_space_->Contains(addr) ||
4002 code_space_->Contains(addr) ||
4003 map_space_->Contains(addr) ||
4004 cell_space_->Contains(addr) ||
4005 lo_space_->SlowContains(addr));
4006}
4007
4008
4009bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4010 return InSpace(value->address(), space);
4011}
4012
4013
4014bool Heap::InSpace(Address addr, AllocationSpace space) {
4015 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4016 if (!HasBeenSetup()) return false;
4017
4018 switch (space) {
4019 case NEW_SPACE:
4020 return new_space_.ToSpaceContains(addr);
4021 case OLD_POINTER_SPACE:
4022 return old_pointer_space_->Contains(addr);
4023 case OLD_DATA_SPACE:
4024 return old_data_space_->Contains(addr);
4025 case CODE_SPACE:
4026 return code_space_->Contains(addr);
4027 case MAP_SPACE:
4028 return map_space_->Contains(addr);
4029 case CELL_SPACE:
4030 return cell_space_->Contains(addr);
4031 case LO_SPACE:
4032 return lo_space_->SlowContains(addr);
4033 }
4034
4035 return false;
4036}
4037
4038
4039#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004040static void DummyScavengePointer(HeapObject** p) {
4041}
4042
4043
4044static void VerifyPointersUnderWatermark(
4045 PagedSpace* space,
4046 DirtyRegionCallback visit_dirty_region) {
4047 PageIterator it(space, PageIterator::PAGES_IN_USE);
4048
4049 while (it.has_next()) {
4050 Page* page = it.next();
4051 Address start = page->ObjectAreaStart();
4052 Address end = page->AllocationWatermark();
4053
Steve Block44f0eee2011-05-26 01:26:41 +01004054 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004055 start,
4056 end,
4057 visit_dirty_region,
4058 &DummyScavengePointer);
4059 }
4060}
4061
4062
4063static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4064 LargeObjectIterator it(space);
4065 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4066 if (object->IsFixedArray()) {
4067 Address slot_address = object->address();
4068 Address end = object->address() + object->Size();
4069
4070 while (slot_address < end) {
4071 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4072 // When we are not in GC the Heap::InNewSpace() predicate
4073 // checks that pointers which satisfy predicate point into
4074 // the active semispace.
Steve Block44f0eee2011-05-26 01:26:41 +01004075 HEAP->InNewSpace(*slot);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004076 slot_address += kPointerSize;
4077 }
4078 }
4079 }
4080}
4081
4082
Steve Blocka7e24c12009-10-30 11:49:00 +00004083void Heap::Verify() {
4084 ASSERT(HasBeenSetup());
4085
4086 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004087 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004088
4089 new_space_.Verify();
4090
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004091 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4092 old_pointer_space_->Verify(&dirty_regions_visitor);
4093 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004094
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004095 VerifyPointersUnderWatermark(old_pointer_space_,
4096 &IteratePointersInDirtyRegion);
4097 VerifyPointersUnderWatermark(map_space_,
4098 &IteratePointersInDirtyMapsRegion);
4099 VerifyPointersUnderWatermark(lo_space_);
4100
4101 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4102 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4103
4104 VerifyPointersVisitor no_dirty_regions_visitor;
4105 old_data_space_->Verify(&no_dirty_regions_visitor);
4106 code_space_->Verify(&no_dirty_regions_visitor);
4107 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004108
4109 lo_space_->Verify();
4110}
4111#endif // DEBUG
4112
4113
John Reck59135872010-11-02 12:39:01 -07004114MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004115 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004116 Object* new_table;
4117 { MaybeObject* maybe_new_table =
4118 symbol_table()->LookupSymbol(string, &symbol);
4119 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4120 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004121 // Can't use set_symbol_table because SymbolTable::cast knows that
4122 // SymbolTable is a singleton and checks for identity.
4123 roots_[kSymbolTableRootIndex] = new_table;
4124 ASSERT(symbol != NULL);
4125 return symbol;
4126}
4127
4128
Steve Block9fac8402011-05-12 15:51:54 +01004129MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4130 Object* symbol = NULL;
4131 Object* new_table;
4132 { MaybeObject* maybe_new_table =
4133 symbol_table()->LookupAsciiSymbol(string, &symbol);
4134 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4135 }
4136 // Can't use set_symbol_table because SymbolTable::cast knows that
4137 // SymbolTable is a singleton and checks for identity.
4138 roots_[kSymbolTableRootIndex] = new_table;
4139 ASSERT(symbol != NULL);
4140 return symbol;
4141}
4142
4143
4144MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4145 Object* symbol = NULL;
4146 Object* new_table;
4147 { MaybeObject* maybe_new_table =
4148 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4149 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4150 }
4151 // Can't use set_symbol_table because SymbolTable::cast knows that
4152 // SymbolTable is a singleton and checks for identity.
4153 roots_[kSymbolTableRootIndex] = new_table;
4154 ASSERT(symbol != NULL);
4155 return symbol;
4156}
4157
4158
John Reck59135872010-11-02 12:39:01 -07004159MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004160 if (string->IsSymbol()) return string;
4161 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004162 Object* new_table;
4163 { MaybeObject* maybe_new_table =
4164 symbol_table()->LookupString(string, &symbol);
4165 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4166 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004167 // Can't use set_symbol_table because SymbolTable::cast knows that
4168 // SymbolTable is a singleton and checks for identity.
4169 roots_[kSymbolTableRootIndex] = new_table;
4170 ASSERT(symbol != NULL);
4171 return symbol;
4172}
4173
4174
4175bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4176 if (string->IsSymbol()) {
4177 *symbol = string;
4178 return true;
4179 }
4180 return symbol_table()->LookupSymbolIfExists(string, symbol);
4181}
4182
4183
4184#ifdef DEBUG
4185void Heap::ZapFromSpace() {
Steve Block1e0659c2011-05-24 12:43:12 +01004186 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
Steve Blocka7e24c12009-10-30 11:49:00 +00004187 for (Address a = new_space_.FromSpaceLow();
4188 a < new_space_.FromSpaceHigh();
4189 a += kPointerSize) {
4190 Memory::Address_at(a) = kFromSpaceZapValue;
4191 }
4192}
4193#endif // DEBUG
4194
4195
Steve Block44f0eee2011-05-26 01:26:41 +01004196bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4197 Address start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004198 Address end,
4199 ObjectSlotCallback copy_object_func) {
4200 Address slot_address = start;
4201 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004202
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004203 while (slot_address < end) {
4204 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004205 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004206 ASSERT((*slot)->IsHeapObject());
4207 copy_object_func(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004208 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004209 ASSERT((*slot)->IsHeapObject());
4210 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004211 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004212 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004213 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004214 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004215 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004216}
4217
4218
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004219// Compute start address of the first map following given addr.
4220static inline Address MapStartAlign(Address addr) {
4221 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4222 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4223}
Steve Blocka7e24c12009-10-30 11:49:00 +00004224
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004225
4226// Compute end address of the first map preceding given addr.
4227static inline Address MapEndAlign(Address addr) {
4228 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4229 return page + ((addr - page) / Map::kSize * Map::kSize);
4230}
4231
4232
4233static bool IteratePointersInDirtyMaps(Address start,
4234 Address end,
4235 ObjectSlotCallback copy_object_func) {
4236 ASSERT(MapStartAlign(start) == start);
4237 ASSERT(MapEndAlign(end) == end);
4238
4239 Address map_address = start;
4240 bool pointers_to_new_space_found = false;
4241
Steve Block44f0eee2011-05-26 01:26:41 +01004242 Heap* heap = HEAP;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004243 while (map_address < end) {
Steve Block44f0eee2011-05-26 01:26:41 +01004244 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004245 ASSERT(Memory::Object_at(map_address)->IsMap());
4246
4247 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4248 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4249
Steve Block44f0eee2011-05-26 01:26:41 +01004250 if (Heap::IteratePointersInDirtyRegion(heap,
4251 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004252 pointer_fields_end,
4253 copy_object_func)) {
4254 pointers_to_new_space_found = true;
4255 }
4256
4257 map_address += Map::kSize;
4258 }
4259
4260 return pointers_to_new_space_found;
4261}
4262
4263
4264bool Heap::IteratePointersInDirtyMapsRegion(
Steve Block44f0eee2011-05-26 01:26:41 +01004265 Heap* heap,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004266 Address start,
4267 Address end,
4268 ObjectSlotCallback copy_object_func) {
4269 Address map_aligned_start = MapStartAlign(start);
4270 Address map_aligned_end = MapEndAlign(end);
4271
4272 bool contains_pointers_to_new_space = false;
4273
4274 if (map_aligned_start != start) {
4275 Address prev_map = map_aligned_start - Map::kSize;
4276 ASSERT(Memory::Object_at(prev_map)->IsMap());
4277
4278 Address pointer_fields_start =
4279 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4280
4281 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004282 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004283
4284 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004285 IteratePointersInDirtyRegion(heap,
4286 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004287 pointer_fields_end,
4288 copy_object_func)
4289 || contains_pointers_to_new_space;
4290 }
4291
4292 contains_pointers_to_new_space =
4293 IteratePointersInDirtyMaps(map_aligned_start,
4294 map_aligned_end,
4295 copy_object_func)
4296 || contains_pointers_to_new_space;
4297
4298 if (map_aligned_end != end) {
4299 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4300
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004301 Address pointer_fields_start =
4302 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004303
4304 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004305 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004306
4307 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004308 IteratePointersInDirtyRegion(heap,
4309 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004310 pointer_fields_end,
4311 copy_object_func)
4312 || contains_pointers_to_new_space;
4313 }
4314
4315 return contains_pointers_to_new_space;
4316}
4317
4318
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004319void Heap::IterateAndMarkPointersToFromSpace(Address start,
4320 Address end,
4321 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004322 Address slot_address = start;
4323 Page* page = Page::FromAddress(start);
4324
4325 uint32_t marks = page->GetRegionMarks();
4326
4327 while (slot_address < end) {
4328 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004329 if (InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004330 ASSERT((*slot)->IsHeapObject());
4331 callback(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004332 if (InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004333 ASSERT((*slot)->IsHeapObject());
4334 marks |= page->GetRegionMaskForAddress(slot_address);
4335 }
4336 }
4337 slot_address += kPointerSize;
4338 }
4339
4340 page->SetRegionMarks(marks);
4341}
4342
4343
4344uint32_t Heap::IterateDirtyRegions(
4345 uint32_t marks,
4346 Address area_start,
4347 Address area_end,
4348 DirtyRegionCallback visit_dirty_region,
4349 ObjectSlotCallback copy_object_func) {
4350 uint32_t newmarks = 0;
4351 uint32_t mask = 1;
4352
4353 if (area_start >= area_end) {
4354 return newmarks;
4355 }
4356
4357 Address region_start = area_start;
4358
4359 // area_start does not necessarily coincide with start of the first region.
4360 // Thus to calculate the beginning of the next region we have to align
4361 // area_start by Page::kRegionSize.
4362 Address second_region =
4363 reinterpret_cast<Address>(
4364 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4365 ~Page::kRegionAlignmentMask);
4366
4367 // Next region might be beyond area_end.
4368 Address region_end = Min(second_region, area_end);
4369
4370 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004371 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004372 newmarks |= mask;
4373 }
4374 }
4375 mask <<= 1;
4376
4377 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4378 region_start = region_end;
4379 region_end = region_start + Page::kRegionSize;
4380
4381 while (region_end <= area_end) {
4382 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004383 if (visit_dirty_region(this,
4384 region_start,
4385 region_end,
4386 copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004387 newmarks |= mask;
4388 }
4389 }
4390
4391 region_start = region_end;
4392 region_end = region_start + Page::kRegionSize;
4393
4394 mask <<= 1;
4395 }
4396
4397 if (region_start != area_end) {
4398 // A small piece of area left uniterated because area_end does not coincide
4399 // with region end. Check whether region covering last part of area is
4400 // dirty.
4401 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004402 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004403 newmarks |= mask;
4404 }
4405 }
4406 }
4407
4408 return newmarks;
4409}
4410
4411
4412
4413void Heap::IterateDirtyRegions(
4414 PagedSpace* space,
4415 DirtyRegionCallback visit_dirty_region,
4416 ObjectSlotCallback copy_object_func,
4417 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004418
4419 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004420
Steve Blocka7e24c12009-10-30 11:49:00 +00004421 while (it.has_next()) {
4422 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004423 uint32_t marks = page->GetRegionMarks();
4424
4425 if (marks != Page::kAllRegionsCleanMarks) {
4426 Address start = page->ObjectAreaStart();
4427
4428 // Do not try to visit pointers beyond page allocation watermark.
4429 // Page can contain garbage pointers there.
4430 Address end;
4431
4432 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4433 page->IsWatermarkValid()) {
4434 end = page->AllocationWatermark();
4435 } else {
4436 end = page->CachedAllocationWatermark();
4437 }
4438
4439 ASSERT(space == old_pointer_space_ ||
4440 (space == map_space_ &&
4441 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4442
4443 page->SetRegionMarks(IterateDirtyRegions(marks,
4444 start,
4445 end,
4446 visit_dirty_region,
4447 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004448 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004449
4450 // Mark page watermark as invalid to maintain watermark validity invariant.
4451 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4452 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004453 }
4454}
4455
4456
Steve Blockd0582a62009-12-15 09:54:21 +00004457void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4458 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004459 IterateWeakRoots(v, mode);
4460}
4461
4462
4463void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004464 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004465 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004466 if (mode != VISIT_ALL_IN_SCAVENGE) {
4467 // Scavenge collections have special processing for this.
Steve Block44f0eee2011-05-26 01:26:41 +01004468 external_string_table_.Iterate(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004469 }
4470 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004471}
4472
4473
Steve Blockd0582a62009-12-15 09:54:21 +00004474void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004475 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004476 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004477
Iain Merrick75681382010-08-19 15:07:18 +01004478 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004479 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004480
Steve Block44f0eee2011-05-26 01:26:41 +01004481 isolate_->bootstrapper()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004482 v->Synchronize("bootstrapper");
Steve Block44f0eee2011-05-26 01:26:41 +01004483 isolate_->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004484 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004485 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004486 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004487
4488#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +01004489 isolate_->debug()->Iterate(v);
Steve Blocka7e24c12009-10-30 11:49:00 +00004490#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004491 v->Synchronize("debug");
Steve Block44f0eee2011-05-26 01:26:41 +01004492 isolate_->compilation_cache()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004493 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004494
4495 // Iterate over local handles in handle scopes.
Steve Block44f0eee2011-05-26 01:26:41 +01004496 isolate_->handle_scope_implementer()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004497 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004498
Leon Clarkee46be812010-01-19 14:06:41 +00004499 // Iterate over the builtin code objects and code stubs in the
4500 // heap. Note that it is not necessary to iterate over code objects
4501 // on scavenge collections.
4502 if (mode != VISIT_ALL_IN_SCAVENGE) {
Steve Block44f0eee2011-05-26 01:26:41 +01004503 isolate_->builtins()->IterateBuiltins(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004504 }
Steve Blockd0582a62009-12-15 09:54:21 +00004505 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004506
4507 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004508 if (mode == VISIT_ONLY_STRONG) {
Steve Block44f0eee2011-05-26 01:26:41 +01004509 isolate_->global_handles()->IterateStrongRoots(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004510 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004511 isolate_->global_handles()->IterateAllRoots(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004512 }
4513 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004514
4515 // Iterate over pointers being held by inactive threads.
Steve Block44f0eee2011-05-26 01:26:41 +01004516 isolate_->thread_manager()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004517 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004518
4519 // Iterate over the pointers the Serialization/Deserialization code is
4520 // holding.
4521 // During garbage collection this keeps the partial snapshot cache alive.
4522 // During deserialization of the startup snapshot this creates the partial
4523 // snapshot cache and deserializes the objects it refers to. During
4524 // serialization this does nothing, since the partial snapshot cache is
4525 // empty. However the next thing we do is create the partial snapshot,
4526 // filling up the partial snapshot cache with objects it needs as we go.
4527 SerializerDeserializer::Iterate(v);
4528 // We don't do a v->Synchronize call here, because in debug mode that will
4529 // output a flag to the snapshot. However at this point the serializer and
4530 // deserializer are deliberately a little unsynchronized (see above) so the
4531 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004532}
Steve Blocka7e24c12009-10-30 11:49:00 +00004533
4534
Steve Blocka7e24c12009-10-30 11:49:00 +00004535// TODO(1236194): Since the heap size is configurable on the command line
4536// and through the API, we should gracefully handle the case that the heap
4537// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004538bool Heap::ConfigureHeap(int max_semispace_size,
4539 int max_old_gen_size,
4540 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004541 if (HasBeenSetup()) return false;
4542
Steve Block3ce2e202009-11-05 08:53:23 +00004543 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4544
4545 if (Snapshot::IsEnabled()) {
4546 // If we are using a snapshot we always reserve the default amount
4547 // of memory for each semispace because code in the snapshot has
4548 // write-barrier code that relies on the size and alignment of new
4549 // space. We therefore cannot use a larger max semispace size
4550 // than the default reserved semispace size.
4551 if (max_semispace_size_ > reserved_semispace_size_) {
4552 max_semispace_size_ = reserved_semispace_size_;
4553 }
4554 } else {
4555 // If we are not using snapshots we reserve space for the actual
4556 // max semispace size.
4557 reserved_semispace_size_ = max_semispace_size_;
4558 }
4559
4560 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004561 if (max_executable_size > 0) {
4562 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4563 }
4564
4565 // The max executable size must be less than or equal to the max old
4566 // generation size.
4567 if (max_executable_size_ > max_old_generation_size_) {
4568 max_executable_size_ = max_old_generation_size_;
4569 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004570
4571 // The new space size must be a power of two to support single-bit testing
4572 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004573 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4574 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4575 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4576 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004577
4578 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004579 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004580
Steve Block44f0eee2011-05-26 01:26:41 +01004581 configured_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004582 return true;
4583}
4584
4585
4586bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004587 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4588 FLAG_max_old_space_size * MB,
4589 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004590}
4591
4592
Ben Murdochbb769b22010-08-11 14:56:33 +01004593void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004594 *stats->start_marker = HeapStats::kStartMarker;
4595 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004596 *stats->new_space_size = new_space_.SizeAsInt();
4597 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004598 *stats->old_pointer_space_size = old_pointer_space_->Size();
4599 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4600 *stats->old_data_space_size = old_data_space_->Size();
4601 *stats->old_data_space_capacity = old_data_space_->Capacity();
4602 *stats->code_space_size = code_space_->Size();
4603 *stats->code_space_capacity = code_space_->Capacity();
4604 *stats->map_space_size = map_space_->Size();
4605 *stats->map_space_capacity = map_space_->Capacity();
4606 *stats->cell_space_size = cell_space_->Size();
4607 *stats->cell_space_capacity = cell_space_->Capacity();
4608 *stats->lo_space_size = lo_space_->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01004609 isolate_->global_handles()->RecordStats(stats);
4610 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
Ben Murdochbb769b22010-08-11 14:56:33 +01004611 *stats->memory_allocator_capacity =
Steve Block44f0eee2011-05-26 01:26:41 +01004612 isolate()->memory_allocator()->Size() +
4613 isolate()->memory_allocator()->Available();
Iain Merrick75681382010-08-19 15:07:18 +01004614 *stats->os_error = OS::GetLastError();
Steve Block44f0eee2011-05-26 01:26:41 +01004615 isolate()->memory_allocator()->Available();
Ben Murdochbb769b22010-08-11 14:56:33 +01004616 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004617 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004618 for (HeapObject* obj = iterator.next();
4619 obj != NULL;
4620 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004621 InstanceType type = obj->map()->instance_type();
4622 ASSERT(0 <= type && type <= LAST_TYPE);
4623 stats->objects_per_type[type]++;
4624 stats->size_per_type[type] += obj->Size();
4625 }
4626 }
Steve Blockd0582a62009-12-15 09:54:21 +00004627}
4628
4629
Ben Murdochf87a2032010-10-22 12:50:53 +01004630intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004631 return old_pointer_space_->Size()
4632 + old_data_space_->Size()
4633 + code_space_->Size()
4634 + map_space_->Size()
4635 + cell_space_->Size()
4636 + lo_space_->Size();
4637}
4638
4639
4640int Heap::PromotedExternalMemorySize() {
4641 if (amount_of_external_allocated_memory_
4642 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4643 return amount_of_external_allocated_memory_
4644 - amount_of_external_allocated_memory_at_last_global_gc_;
4645}
4646
Steve Block44f0eee2011-05-26 01:26:41 +01004647#ifdef DEBUG
4648
4649// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4650static const int kMarkTag = 2;
4651
4652
4653class HeapDebugUtils {
4654 public:
4655 explicit HeapDebugUtils(Heap* heap)
4656 : search_for_any_global_(false),
4657 search_target_(NULL),
4658 found_target_(false),
4659 object_stack_(20),
4660 heap_(heap) {
4661 }
4662
4663 class MarkObjectVisitor : public ObjectVisitor {
4664 public:
4665 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4666
4667 void VisitPointers(Object** start, Object** end) {
4668 // Copy all HeapObject pointers in [start, end)
4669 for (Object** p = start; p < end; p++) {
4670 if ((*p)->IsHeapObject())
4671 utils_->MarkObjectRecursively(p);
4672 }
4673 }
4674
4675 HeapDebugUtils* utils_;
4676 };
4677
4678 void MarkObjectRecursively(Object** p) {
4679 if (!(*p)->IsHeapObject()) return;
4680
4681 HeapObject* obj = HeapObject::cast(*p);
4682
4683 Object* map = obj->map();
4684
4685 if (!map->IsHeapObject()) return; // visited before
4686
4687 if (found_target_) return; // stop if target found
4688 object_stack_.Add(obj);
4689 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
4690 (!search_for_any_global_ && (obj == search_target_))) {
4691 found_target_ = true;
4692 return;
4693 }
4694
4695 // not visited yet
4696 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4697
4698 Address map_addr = map_p->address();
4699
4700 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4701
4702 MarkObjectRecursively(&map);
4703
4704 MarkObjectVisitor mark_visitor(this);
4705
4706 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4707 &mark_visitor);
4708
4709 if (!found_target_) // don't pop if found the target
4710 object_stack_.RemoveLast();
4711 }
4712
4713
4714 class UnmarkObjectVisitor : public ObjectVisitor {
4715 public:
4716 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4717
4718 void VisitPointers(Object** start, Object** end) {
4719 // Copy all HeapObject pointers in [start, end)
4720 for (Object** p = start; p < end; p++) {
4721 if ((*p)->IsHeapObject())
4722 utils_->UnmarkObjectRecursively(p);
4723 }
4724 }
4725
4726 HeapDebugUtils* utils_;
4727 };
4728
4729
4730 void UnmarkObjectRecursively(Object** p) {
4731 if (!(*p)->IsHeapObject()) return;
4732
4733 HeapObject* obj = HeapObject::cast(*p);
4734
4735 Object* map = obj->map();
4736
4737 if (map->IsHeapObject()) return; // unmarked already
4738
4739 Address map_addr = reinterpret_cast<Address>(map);
4740
4741 map_addr -= kMarkTag;
4742
4743 ASSERT_TAG_ALIGNED(map_addr);
4744
4745 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4746
4747 obj->set_map(reinterpret_cast<Map*>(map_p));
4748
4749 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4750
4751 UnmarkObjectVisitor unmark_visitor(this);
4752
4753 obj->IterateBody(Map::cast(map_p)->instance_type(),
4754 obj->SizeFromMap(Map::cast(map_p)),
4755 &unmark_visitor);
4756 }
4757
4758
4759 void MarkRootObjectRecursively(Object** root) {
4760 if (search_for_any_global_) {
4761 ASSERT(search_target_ == NULL);
4762 } else {
4763 ASSERT(search_target_->IsHeapObject());
4764 }
4765 found_target_ = false;
4766 object_stack_.Clear();
4767
4768 MarkObjectRecursively(root);
4769 UnmarkObjectRecursively(root);
4770
4771 if (found_target_) {
4772 PrintF("=====================================\n");
4773 PrintF("==== Path to object ====\n");
4774 PrintF("=====================================\n\n");
4775
4776 ASSERT(!object_stack_.is_empty());
4777 for (int i = 0; i < object_stack_.length(); i++) {
4778 if (i > 0) PrintF("\n |\n |\n V\n\n");
4779 Object* obj = object_stack_[i];
4780 obj->Print();
4781 }
4782 PrintF("=====================================\n");
4783 }
4784 }
4785
4786 // Helper class for visiting HeapObjects recursively.
4787 class MarkRootVisitor: public ObjectVisitor {
4788 public:
4789 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4790
4791 void VisitPointers(Object** start, Object** end) {
4792 // Visit all HeapObject pointers in [start, end)
4793 for (Object** p = start; p < end; p++) {
4794 if ((*p)->IsHeapObject())
4795 utils_->MarkRootObjectRecursively(p);
4796 }
4797 }
4798
4799 HeapDebugUtils* utils_;
4800 };
4801
4802 bool search_for_any_global_;
4803 Object* search_target_;
4804 bool found_target_;
4805 List<Object*> object_stack_;
4806 Heap* heap_;
4807
4808 friend class Heap;
4809};
4810
4811#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00004812
4813bool Heap::Setup(bool create_heap_objects) {
Steve Block44f0eee2011-05-26 01:26:41 +01004814#ifdef DEBUG
4815 debug_utils_ = new HeapDebugUtils(this);
4816#endif
4817
Steve Blocka7e24c12009-10-30 11:49:00 +00004818 // Initialize heap spaces and initial maps and objects. Whenever something
4819 // goes wrong, just return false. The caller should check the results and
4820 // call Heap::TearDown() to release allocated memory.
4821 //
4822 // If the heap is not yet configured (eg, through the API), configure it.
4823 // Configuration is based on the flags new-space-size (really the semispace
4824 // size) and old-space-size if set or the initial values of semispace_size_
4825 // and old_generation_size_ otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +01004826 if (!configured_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004827 if (!ConfigureHeapDefault()) return false;
4828 }
4829
Steve Block44f0eee2011-05-26 01:26:41 +01004830 gc_initializer_mutex->Lock();
4831 static bool initialized_gc = false;
4832 if (!initialized_gc) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01004833 initialized_gc = true;
4834 InitializeScavengingVisitorsTables();
4835 NewSpaceScavenger::Initialize();
4836 MarkCompactCollector::Initialize();
Steve Block44f0eee2011-05-26 01:26:41 +01004837 }
4838 gc_initializer_mutex->Unlock();
Iain Merrick75681382010-08-19 15:07:18 +01004839
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004840 MarkMapPointersAsEncoded(false);
4841
Steve Blocka7e24c12009-10-30 11:49:00 +00004842 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004843 // space. The chunk is double the size of the requested reserved
4844 // new space size to ensure that we can find a pair of semispaces that
4845 // are contiguous and aligned to their size.
Steve Block44f0eee2011-05-26 01:26:41 +01004846 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
4847 return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004848 void* chunk =
Steve Block44f0eee2011-05-26 01:26:41 +01004849 isolate_->memory_allocator()->ReserveInitialChunk(
4850 4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004851 if (chunk == NULL) return false;
4852
4853 // Align the pair of semispaces to their size, which must be a power
4854 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004855 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004856 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4857 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4858 return false;
4859 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004860
4861 // Initialize old pointer space.
4862 old_pointer_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004863 new OldSpace(this,
4864 max_old_generation_size_,
4865 OLD_POINTER_SPACE,
4866 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004867 if (old_pointer_space_ == NULL) return false;
4868 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4869
4870 // Initialize old data space.
4871 old_data_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004872 new OldSpace(this,
4873 max_old_generation_size_,
4874 OLD_DATA_SPACE,
4875 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004876 if (old_data_space_ == NULL) return false;
4877 if (!old_data_space_->Setup(NULL, 0)) return false;
4878
4879 // Initialize the code space, set its maximum capacity to the old
4880 // generation size. It needs executable memory.
4881 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4882 // virtual address space, so that they can call each other with near calls.
4883 if (code_range_size_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01004884 if (!isolate_->code_range()->Setup(code_range_size_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004885 return false;
4886 }
4887 }
4888
4889 code_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01004890 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004891 if (code_space_ == NULL) return false;
4892 if (!code_space_->Setup(NULL, 0)) return false;
4893
4894 // Initialize map space.
Steve Block44f0eee2011-05-26 01:26:41 +01004895 map_space_ = new MapSpace(this, FLAG_use_big_map_space
Leon Clarkee46be812010-01-19 14:06:41 +00004896 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004897 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4898 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004899 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004900 if (map_space_ == NULL) return false;
4901 if (!map_space_->Setup(NULL, 0)) return false;
4902
4903 // Initialize global property cell space.
Steve Block44f0eee2011-05-26 01:26:41 +01004904 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004905 if (cell_space_ == NULL) return false;
4906 if (!cell_space_->Setup(NULL, 0)) return false;
4907
4908 // The large object code space may contain code or data. We set the memory
4909 // to be non-executable here for safety, but this means we need to enable it
4910 // explicitly when allocating large code objects.
Steve Block44f0eee2011-05-26 01:26:41 +01004911 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004912 if (lo_space_ == NULL) return false;
4913 if (!lo_space_->Setup()) return false;
4914
4915 if (create_heap_objects) {
4916 // Create initial maps.
4917 if (!CreateInitialMaps()) return false;
4918 if (!CreateApiObjects()) return false;
4919
4920 // Create initial objects
4921 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004922
4923 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004924 }
4925
Steve Block44f0eee2011-05-26 01:26:41 +01004926 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
4927 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004928
Steve Block3ce2e202009-11-05 08:53:23 +00004929#ifdef ENABLE_LOGGING_AND_PROFILING
4930 // This should be called only after initial objects have been created.
Steve Block44f0eee2011-05-26 01:26:41 +01004931 isolate_->producer_heap_profile()->Setup();
Steve Block3ce2e202009-11-05 08:53:23 +00004932#endif
4933
Steve Blocka7e24c12009-10-30 11:49:00 +00004934 return true;
4935}
4936
4937
Steve Blockd0582a62009-12-15 09:54:21 +00004938void Heap::SetStackLimits() {
Steve Block44f0eee2011-05-26 01:26:41 +01004939 ASSERT(isolate_ != NULL);
4940 ASSERT(isolate_ == isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00004941 // On 64 bit machines, pointers are generally out of range of Smis. We write
4942 // something that looks like an out of range Smi to the GC.
4943
Steve Blockd0582a62009-12-15 09:54:21 +00004944 // Set up the special root array entries containing the stack limits.
4945 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004946 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004947 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01004948 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blockd0582a62009-12-15 09:54:21 +00004949 roots_[kRealStackLimitRootIndex] =
4950 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01004951 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004952}
4953
4954
4955void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004956 if (FLAG_print_cumulative_gc_stat) {
4957 PrintF("\n\n");
4958 PrintF("gc_count=%d ", gc_count_);
4959 PrintF("mark_sweep_count=%d ", ms_count_);
4960 PrintF("mark_compact_count=%d ", mc_count_);
Steve Block44f0eee2011-05-26 01:26:41 +01004961 PrintF("max_gc_pause=%d ", get_max_gc_pause());
4962 PrintF("min_in_mutator=%d ", get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004963 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
Steve Block44f0eee2011-05-26 01:26:41 +01004964 get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004965 PrintF("\n\n");
4966 }
4967
Steve Block44f0eee2011-05-26 01:26:41 +01004968 isolate_->global_handles()->TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00004969
Steve Block44f0eee2011-05-26 01:26:41 +01004970 external_string_table_.TearDown();
Leon Clarkee46be812010-01-19 14:06:41 +00004971
Steve Blocka7e24c12009-10-30 11:49:00 +00004972 new_space_.TearDown();
4973
4974 if (old_pointer_space_ != NULL) {
4975 old_pointer_space_->TearDown();
4976 delete old_pointer_space_;
4977 old_pointer_space_ = NULL;
4978 }
4979
4980 if (old_data_space_ != NULL) {
4981 old_data_space_->TearDown();
4982 delete old_data_space_;
4983 old_data_space_ = NULL;
4984 }
4985
4986 if (code_space_ != NULL) {
4987 code_space_->TearDown();
4988 delete code_space_;
4989 code_space_ = NULL;
4990 }
4991
4992 if (map_space_ != NULL) {
4993 map_space_->TearDown();
4994 delete map_space_;
4995 map_space_ = NULL;
4996 }
4997
4998 if (cell_space_ != NULL) {
4999 cell_space_->TearDown();
5000 delete cell_space_;
5001 cell_space_ = NULL;
5002 }
5003
5004 if (lo_space_ != NULL) {
5005 lo_space_->TearDown();
5006 delete lo_space_;
5007 lo_space_ = NULL;
5008 }
5009
Steve Block44f0eee2011-05-26 01:26:41 +01005010 isolate_->memory_allocator()->TearDown();
5011
5012#ifdef DEBUG
5013 delete debug_utils_;
5014 debug_utils_ = NULL;
5015#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005016}
5017
5018
5019void Heap::Shrink() {
5020 // Try to shrink all paged spaces.
5021 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005022 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5023 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00005024}
5025
5026
5027#ifdef ENABLE_HEAP_PROTECTION
5028
5029void Heap::Protect() {
5030 if (HasBeenSetup()) {
5031 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005032 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5033 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00005034 }
5035}
5036
5037
5038void Heap::Unprotect() {
5039 if (HasBeenSetup()) {
5040 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005041 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5042 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00005043 }
5044}
5045
5046#endif
5047
5048
Steve Block6ded16b2010-05-10 14:33:55 +01005049void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5050 ASSERT(callback != NULL);
5051 GCPrologueCallbackPair pair(callback, gc_type);
5052 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5053 return gc_prologue_callbacks_.Add(pair);
5054}
5055
5056
5057void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5058 ASSERT(callback != NULL);
5059 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5060 if (gc_prologue_callbacks_[i].callback == callback) {
5061 gc_prologue_callbacks_.Remove(i);
5062 return;
5063 }
5064 }
5065 UNREACHABLE();
5066}
5067
5068
5069void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5070 ASSERT(callback != NULL);
5071 GCEpilogueCallbackPair pair(callback, gc_type);
5072 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5073 return gc_epilogue_callbacks_.Add(pair);
5074}
5075
5076
5077void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5078 ASSERT(callback != NULL);
5079 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5080 if (gc_epilogue_callbacks_[i].callback == callback) {
5081 gc_epilogue_callbacks_.Remove(i);
5082 return;
5083 }
5084 }
5085 UNREACHABLE();
5086}
5087
5088
Steve Blocka7e24c12009-10-30 11:49:00 +00005089#ifdef DEBUG
5090
5091class PrintHandleVisitor: public ObjectVisitor {
5092 public:
5093 void VisitPointers(Object** start, Object** end) {
5094 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01005095 PrintF(" handle %p to %p\n",
5096 reinterpret_cast<void*>(p),
5097 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00005098 }
5099};
5100
5101void Heap::PrintHandles() {
5102 PrintF("Handles:\n");
5103 PrintHandleVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +01005104 isolate_->handle_scope_implementer()->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +00005105}
5106
5107#endif
5108
5109
5110Space* AllSpaces::next() {
5111 switch (counter_++) {
5112 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005113 return HEAP->new_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005114 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005115 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005116 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005117 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005118 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005119 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005120 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005121 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005122 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005123 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005124 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005125 return HEAP->lo_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005126 default:
5127 return NULL;
5128 }
5129}
5130
5131
5132PagedSpace* PagedSpaces::next() {
5133 switch (counter_++) {
5134 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005135 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005136 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005137 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005138 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005139 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005140 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005141 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005142 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005143 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005144 default:
5145 return NULL;
5146 }
5147}
5148
5149
5150
5151OldSpace* OldSpaces::next() {
5152 switch (counter_++) {
5153 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005154 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005155 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005156 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005157 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005158 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005159 default:
5160 return NULL;
5161 }
5162}
5163
5164
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005165SpaceIterator::SpaceIterator()
5166 : current_space_(FIRST_SPACE),
5167 iterator_(NULL),
5168 size_func_(NULL) {
5169}
5170
5171
5172SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5173 : current_space_(FIRST_SPACE),
5174 iterator_(NULL),
5175 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005176}
5177
5178
5179SpaceIterator::~SpaceIterator() {
5180 // Delete active iterator if any.
5181 delete iterator_;
5182}
5183
5184
5185bool SpaceIterator::has_next() {
5186 // Iterate until no more spaces.
5187 return current_space_ != LAST_SPACE;
5188}
5189
5190
5191ObjectIterator* SpaceIterator::next() {
5192 if (iterator_ != NULL) {
5193 delete iterator_;
5194 iterator_ = NULL;
5195 // Move to the next space
5196 current_space_++;
5197 if (current_space_ > LAST_SPACE) {
5198 return NULL;
5199 }
5200 }
5201
5202 // Return iterator for the new current space.
5203 return CreateIterator();
5204}
5205
5206
5207// Create an iterator for the space to iterate.
5208ObjectIterator* SpaceIterator::CreateIterator() {
5209 ASSERT(iterator_ == NULL);
5210
5211 switch (current_space_) {
5212 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005213 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005214 break;
5215 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005216 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005217 break;
5218 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005219 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005220 break;
5221 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005222 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005223 break;
5224 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005225 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005226 break;
5227 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005228 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005229 break;
5230 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005231 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005232 break;
5233 }
5234
5235 // Return the newly allocated iterator;
5236 ASSERT(iterator_ != NULL);
5237 return iterator_;
5238}
5239
5240
Ben Murdochb0fe1622011-05-05 13:52:32 +01005241class HeapObjectsFilter {
5242 public:
5243 virtual ~HeapObjectsFilter() {}
5244 virtual bool SkipObject(HeapObject* object) = 0;
5245};
5246
5247
5248class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005249 public:
5250 FreeListNodesFilter() {
5251 MarkFreeListNodes();
5252 }
5253
Ben Murdochb0fe1622011-05-05 13:52:32 +01005254 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005255 if (object->IsMarked()) {
5256 object->ClearMark();
5257 return true;
5258 } else {
5259 return false;
5260 }
5261 }
5262
5263 private:
5264 void MarkFreeListNodes() {
Steve Block44f0eee2011-05-26 01:26:41 +01005265 Heap* heap = HEAP;
5266 heap->old_pointer_space()->MarkFreeListNodes();
5267 heap->old_data_space()->MarkFreeListNodes();
5268 MarkCodeSpaceFreeListNodes(heap);
5269 heap->map_space()->MarkFreeListNodes();
5270 heap->cell_space()->MarkFreeListNodes();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005271 }
5272
Steve Block44f0eee2011-05-26 01:26:41 +01005273 void MarkCodeSpaceFreeListNodes(Heap* heap) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005274 // For code space, using FreeListNode::IsFreeListNode is OK.
Steve Block44f0eee2011-05-26 01:26:41 +01005275 HeapObjectIterator iter(heap->code_space());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005276 for (HeapObject* obj = iter.next_object();
5277 obj != NULL;
5278 obj = iter.next_object()) {
5279 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5280 }
5281 }
5282
5283 AssertNoAllocation no_alloc;
5284};
5285
5286
Ben Murdochb0fe1622011-05-05 13:52:32 +01005287class UnreachableObjectsFilter : public HeapObjectsFilter {
5288 public:
5289 UnreachableObjectsFilter() {
5290 MarkUnreachableObjects();
5291 }
5292
5293 bool SkipObject(HeapObject* object) {
5294 if (object->IsMarked()) {
5295 object->ClearMark();
5296 return true;
5297 } else {
5298 return false;
5299 }
5300 }
5301
5302 private:
5303 class UnmarkingVisitor : public ObjectVisitor {
5304 public:
5305 UnmarkingVisitor() : list_(10) {}
5306
5307 void VisitPointers(Object** start, Object** end) {
5308 for (Object** p = start; p < end; p++) {
5309 if (!(*p)->IsHeapObject()) continue;
5310 HeapObject* obj = HeapObject::cast(*p);
5311 if (obj->IsMarked()) {
5312 obj->ClearMark();
5313 list_.Add(obj);
5314 }
5315 }
5316 }
5317
5318 bool can_process() { return !list_.is_empty(); }
5319
5320 void ProcessNext() {
5321 HeapObject* obj = list_.RemoveLast();
5322 obj->Iterate(this);
5323 }
5324
5325 private:
5326 List<HeapObject*> list_;
5327 };
5328
5329 void MarkUnreachableObjects() {
5330 HeapIterator iterator;
5331 for (HeapObject* obj = iterator.next();
5332 obj != NULL;
5333 obj = iterator.next()) {
5334 obj->SetMark();
5335 }
5336 UnmarkingVisitor visitor;
Steve Block44f0eee2011-05-26 01:26:41 +01005337 HEAP->IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005338 while (visitor.can_process())
5339 visitor.ProcessNext();
5340 }
5341
5342 AssertNoAllocation no_alloc;
5343};
5344
5345
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005346HeapIterator::HeapIterator()
5347 : filtering_(HeapIterator::kNoFiltering),
5348 filter_(NULL) {
5349 Init();
5350}
5351
5352
Ben Murdochb0fe1622011-05-05 13:52:32 +01005353HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005354 : filtering_(filtering),
5355 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005356 Init();
5357}
5358
5359
5360HeapIterator::~HeapIterator() {
5361 Shutdown();
5362}
5363
5364
5365void HeapIterator::Init() {
5366 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005367 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5368 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5369 switch (filtering_) {
5370 case kFilterFreeListNodes:
5371 filter_ = new FreeListNodesFilter;
5372 break;
5373 case kFilterUnreachable:
5374 filter_ = new UnreachableObjectsFilter;
5375 break;
5376 default:
5377 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005378 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005379 object_iterator_ = space_iterator_->next();
5380}
5381
5382
5383void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005384#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005385 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005386 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005387 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005388 ASSERT(object_iterator_ == NULL);
5389 }
5390#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005391 // Make sure the last iterator is deallocated.
5392 delete space_iterator_;
5393 space_iterator_ = NULL;
5394 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005395 delete filter_;
5396 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005397}
5398
5399
Leon Clarked91b9f72010-01-27 17:25:45 +00005400HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005401 if (filter_ == NULL) return NextObject();
5402
5403 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005404 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005405 return obj;
5406}
5407
5408
5409HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005410 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005411 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005412
Leon Clarked91b9f72010-01-27 17:25:45 +00005413 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005414 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005415 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005416 } else {
5417 // Go though the spaces looking for one that has objects.
5418 while (space_iterator_->has_next()) {
5419 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005420 if (HeapObject* obj = object_iterator_->next_object()) {
5421 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005422 }
5423 }
5424 }
5425 // Done with the last space.
5426 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005427 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005428}
5429
5430
5431void HeapIterator::reset() {
5432 // Restart the iterator.
5433 Shutdown();
5434 Init();
5435}
5436
5437
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005438#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
Steve Blocka7e24c12009-10-30 11:49:00 +00005439
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005440Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00005441
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005442class PathTracer::MarkVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00005443 public:
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005444 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00005445 void VisitPointers(Object** start, Object** end) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005446 // Scan all HeapObject pointers in [start, end)
5447 for (Object** p = start; !tracer_->found() && (p < end); p++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005448 if ((*p)->IsHeapObject())
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005449 tracer_->MarkRecursively(p, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005450 }
5451 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005452
5453 private:
5454 PathTracer* tracer_;
Steve Blocka7e24c12009-10-30 11:49:00 +00005455};
5456
Steve Blocka7e24c12009-10-30 11:49:00 +00005457
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005458class PathTracer::UnmarkVisitor: public ObjectVisitor {
5459 public:
5460 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5461 void VisitPointers(Object** start, Object** end) {
5462 // Scan all HeapObject pointers in [start, end)
5463 for (Object** p = start; p < end; p++) {
5464 if ((*p)->IsHeapObject())
5465 tracer_->UnmarkRecursively(p, this);
5466 }
5467 }
5468
5469 private:
5470 PathTracer* tracer_;
5471};
5472
5473
5474void PathTracer::VisitPointers(Object** start, Object** end) {
5475 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5476 // Visit all HeapObject pointers in [start, end)
5477 for (Object** p = start; !done && (p < end); p++) {
5478 if ((*p)->IsHeapObject()) {
5479 TracePathFrom(p);
5480 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5481 }
5482 }
5483}
5484
5485
5486void PathTracer::Reset() {
5487 found_target_ = false;
5488 object_stack_.Clear();
5489}
5490
5491
5492void PathTracer::TracePathFrom(Object** root) {
5493 ASSERT((search_target_ == kAnyGlobalObject) ||
5494 search_target_->IsHeapObject());
5495 found_target_in_trace_ = false;
5496 object_stack_.Clear();
5497
5498 MarkVisitor mark_visitor(this);
5499 MarkRecursively(root, &mark_visitor);
5500
5501 UnmarkVisitor unmark_visitor(this);
5502 UnmarkRecursively(root, &unmark_visitor);
5503
5504 ProcessResults();
5505}
5506
5507
5508void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005509 if (!(*p)->IsHeapObject()) return;
5510
5511 HeapObject* obj = HeapObject::cast(*p);
5512
5513 Object* map = obj->map();
5514
5515 if (!map->IsHeapObject()) return; // visited before
5516
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005517 if (found_target_in_trace_) return; // stop if target found
5518 object_stack_.Add(obj);
5519 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5520 (obj == search_target_)) {
5521 found_target_in_trace_ = true;
5522 found_target_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00005523 return;
5524 }
5525
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005526 bool is_global_context = obj->IsGlobalContext();
5527
Steve Blocka7e24c12009-10-30 11:49:00 +00005528 // not visited yet
5529 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5530
5531 Address map_addr = map_p->address();
5532
5533 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5534
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005535 // Scan the object body.
5536 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5537 // This is specialized to scan Context's properly.
5538 Object** start = reinterpret_cast<Object**>(obj->address() +
5539 Context::kHeaderSize);
5540 Object** end = reinterpret_cast<Object**>(obj->address() +
5541 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5542 mark_visitor->VisitPointers(start, end);
5543 } else {
5544 obj->IterateBody(map_p->instance_type(),
5545 obj->SizeFromMap(map_p),
5546 mark_visitor);
5547 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005548
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005549 // Scan the map after the body because the body is a lot more interesting
5550 // when doing leak detection.
5551 MarkRecursively(&map, mark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005552
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005553 if (!found_target_in_trace_) // don't pop if found the target
5554 object_stack_.RemoveLast();
Steve Blocka7e24c12009-10-30 11:49:00 +00005555}
5556
5557
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005558void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005559 if (!(*p)->IsHeapObject()) return;
5560
5561 HeapObject* obj = HeapObject::cast(*p);
5562
5563 Object* map = obj->map();
5564
5565 if (map->IsHeapObject()) return; // unmarked already
5566
5567 Address map_addr = reinterpret_cast<Address>(map);
5568
5569 map_addr -= kMarkTag;
5570
5571 ASSERT_TAG_ALIGNED(map_addr);
5572
5573 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5574
5575 obj->set_map(reinterpret_cast<Map*>(map_p));
5576
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005577 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005578
5579 obj->IterateBody(Map::cast(map_p)->instance_type(),
5580 obj->SizeFromMap(Map::cast(map_p)),
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005581 unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005582}
5583
5584
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005585void PathTracer::ProcessResults() {
5586 if (found_target_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005587 PrintF("=====================================\n");
5588 PrintF("==== Path to object ====\n");
5589 PrintF("=====================================\n\n");
5590
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005591 ASSERT(!object_stack_.is_empty());
5592 for (int i = 0; i < object_stack_.length(); i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005593 if (i > 0) PrintF("\n |\n |\n V\n\n");
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005594 Object* obj = object_stack_[i];
5595#ifdef OBJECT_PRINT
Steve Blocka7e24c12009-10-30 11:49:00 +00005596 obj->Print();
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005597#else
5598 obj->ShortPrint();
5599#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005600 }
5601 PrintF("=====================================\n");
5602 }
5603}
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005604#endif // DEBUG || LIVE_OBJECT_LIST
Steve Blocka7e24c12009-10-30 11:49:00 +00005605
5606
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005607#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00005608// Triggers a depth-first traversal of reachable objects from roots
5609// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005610void Heap::TracePathToObject(Object* target) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005611 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5612 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005613}
5614
5615
5616// Triggers a depth-first traversal of reachable objects from roots
5617// and finds a path to any global object and prints it. Useful for
5618// determining the source for leaks of global objects.
5619void Heap::TracePathToGlobal() {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005620 PathTracer tracer(PathTracer::kAnyGlobalObject,
5621 PathTracer::FIND_ALL,
5622 VISIT_ALL);
5623 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005624}
5625#endif
5626
5627
Ben Murdochf87a2032010-10-22 12:50:53 +01005628static intptr_t CountTotalHolesSize() {
5629 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005630 OldSpaces spaces;
5631 for (OldSpace* space = spaces.next();
5632 space != NULL;
5633 space = spaces.next()) {
5634 holes_size += space->Waste() + space->AvailableFree();
5635 }
5636 return holes_size;
5637}
5638
5639
Steve Block44f0eee2011-05-26 01:26:41 +01005640GCTracer::GCTracer(Heap* heap)
Steve Blocka7e24c12009-10-30 11:49:00 +00005641 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005642 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005643 gc_count_(0),
5644 full_gc_count_(0),
5645 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005646 marked_count_(0),
5647 allocated_since_last_gc_(0),
5648 spent_in_mutator_(0),
Steve Block44f0eee2011-05-26 01:26:41 +01005649 promoted_objects_size_(0),
5650 heap_(heap) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005651 // These two fields reflect the state of the previous full collection.
5652 // Set them before they are changed by the collector.
Steve Block44f0eee2011-05-26 01:26:41 +01005653 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
5654 previous_marked_count_ =
5655 heap_->mark_compact_collector_.previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005656 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005657 start_time_ = OS::TimeCurrentMillis();
Steve Block44f0eee2011-05-26 01:26:41 +01005658 start_size_ = heap_->SizeOfObjects();
Leon Clarkef7060e22010-06-03 12:02:55 +01005659
5660 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5661 scopes_[i] = 0;
5662 }
5663
5664 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5665
Steve Block44f0eee2011-05-26 01:26:41 +01005666 allocated_since_last_gc_ =
5667 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01005668
Steve Block44f0eee2011-05-26 01:26:41 +01005669 if (heap_->last_gc_end_timestamp_ > 0) {
5670 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005671 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005672}
5673
5674
5675GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005676 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005677 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5678
Steve Block44f0eee2011-05-26 01:26:41 +01005679 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005680
Steve Block44f0eee2011-05-26 01:26:41 +01005681 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
5682 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005683
Steve Block44f0eee2011-05-26 01:26:41 +01005684 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005685
5686 // Update cumulative GC statistics if required.
5687 if (FLAG_print_cumulative_gc_stat) {
Steve Block44f0eee2011-05-26 01:26:41 +01005688 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
5689 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
5690 heap_->alive_after_last_gc_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005691 if (!first_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01005692 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
5693 static_cast<int>(spent_in_mutator_));
Leon Clarkef7060e22010-06-03 12:02:55 +01005694 }
5695 }
5696
5697 if (!FLAG_trace_gc_nvp) {
5698 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5699
5700 PrintF("%s %.1f -> %.1f MB, ",
5701 CollectorString(),
5702 static_cast<double>(start_size_) / MB,
5703 SizeOfHeapObjects());
5704
5705 if (external_time > 0) PrintF("%d / ", external_time);
5706 PrintF("%d ms.\n", time);
5707 } else {
5708 PrintF("pause=%d ", time);
5709 PrintF("mutator=%d ",
5710 static_cast<int>(spent_in_mutator_));
5711
5712 PrintF("gc=");
5713 switch (collector_) {
5714 case SCAVENGER:
5715 PrintF("s");
5716 break;
5717 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005718 PrintF("%s",
5719 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
Leon Clarkef7060e22010-06-03 12:02:55 +01005720 break;
5721 default:
5722 UNREACHABLE();
5723 }
5724 PrintF(" ");
5725
5726 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5727 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5728 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005729 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005730 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5731
Ben Murdochf87a2032010-10-22 12:50:53 +01005732 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
Steve Block44f0eee2011-05-26 01:26:41 +01005733 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
Ben Murdochf87a2032010-10-22 12:50:53 +01005734 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5735 in_free_list_or_wasted_before_gc_);
5736 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005737
Ben Murdochf87a2032010-10-22 12:50:53 +01005738 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5739 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005740
5741 PrintF("\n");
5742 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005743
5744#if defined(ENABLE_LOGGING_AND_PROFILING)
Steve Block44f0eee2011-05-26 01:26:41 +01005745 heap_->PrintShortHeapStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00005746#endif
5747}
5748
5749
5750const char* GCTracer::CollectorString() {
5751 switch (collector_) {
5752 case SCAVENGER:
5753 return "Scavenge";
5754 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01005755 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
5756 : "Mark-sweep";
Steve Blocka7e24c12009-10-30 11:49:00 +00005757 }
5758 return "Unknown GC";
5759}
5760
5761
5762int KeyedLookupCache::Hash(Map* map, String* name) {
5763 // Uses only lower 32 bits if pointers are larger.
5764 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005765 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005766 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005767}
5768
5769
5770int KeyedLookupCache::Lookup(Map* map, String* name) {
5771 int index = Hash(map, name);
5772 Key& key = keys_[index];
5773 if ((key.map == map) && key.name->Equals(name)) {
5774 return field_offsets_[index];
5775 }
Steve Block44f0eee2011-05-26 01:26:41 +01005776 return kNotFound;
Steve Blocka7e24c12009-10-30 11:49:00 +00005777}
5778
5779
5780void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5781 String* symbol;
Steve Block44f0eee2011-05-26 01:26:41 +01005782 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005783 int index = Hash(map, symbol);
5784 Key& key = keys_[index];
5785 key.map = map;
5786 key.name = symbol;
5787 field_offsets_[index] = field_offset;
5788 }
5789}
5790
5791
5792void KeyedLookupCache::Clear() {
5793 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5794}
5795
5796
Steve Blocka7e24c12009-10-30 11:49:00 +00005797void DescriptorLookupCache::Clear() {
5798 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5799}
5800
5801
Steve Blocka7e24c12009-10-30 11:49:00 +00005802#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005803void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005804 ASSERT(FLAG_gc_greedy);
Steve Block44f0eee2011-05-26 01:26:41 +01005805 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +01005806 if (disallow_allocation_failure()) return;
5807 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005808}
5809#endif
5810
5811
Steve Block44f0eee2011-05-26 01:26:41 +01005812TranscendentalCache::SubCache::SubCache(Type t)
5813 : type_(t),
5814 isolate_(Isolate::Current()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005815 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5816 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5817 for (int i = 0; i < kCacheSize; i++) {
5818 elements_[i].in[0] = in0;
5819 elements_[i].in[1] = in1;
5820 elements_[i].output = NULL;
5821 }
5822}
5823
5824
Steve Blocka7e24c12009-10-30 11:49:00 +00005825void TranscendentalCache::Clear() {
5826 for (int i = 0; i < kNumberOfCaches; i++) {
5827 if (caches_[i] != NULL) {
5828 delete caches_[i];
5829 caches_[i] = NULL;
5830 }
5831 }
5832}
5833
5834
Leon Clarkee46be812010-01-19 14:06:41 +00005835void ExternalStringTable::CleanUp() {
5836 int last = 0;
5837 for (int i = 0; i < new_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005838 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5839 if (heap_->InNewSpace(new_space_strings_[i])) {
Leon Clarkee46be812010-01-19 14:06:41 +00005840 new_space_strings_[last++] = new_space_strings_[i];
5841 } else {
5842 old_space_strings_.Add(new_space_strings_[i]);
5843 }
5844 }
5845 new_space_strings_.Rewind(last);
5846 last = 0;
5847 for (int i = 0; i < old_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01005848 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5849 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
Leon Clarkee46be812010-01-19 14:06:41 +00005850 old_space_strings_[last++] = old_space_strings_[i];
5851 }
5852 old_space_strings_.Rewind(last);
5853 Verify();
5854}
5855
5856
5857void ExternalStringTable::TearDown() {
5858 new_space_strings_.Free();
5859 old_space_strings_.Free();
5860}
5861
5862
Steve Blocka7e24c12009-10-30 11:49:00 +00005863} } // namespace v8::internal