blob: f88ebda53d117dfd5b283b513f7743824f1d408c [file] [log] [blame]
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
Steve Block1e0659c2011-05-24 12:43:12 +010038#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "mark-compact.h"
40#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010041#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010042#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080043#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000045#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000046#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010047#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010048#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000049#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000050#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000051#endif
52
Steve Block6ded16b2010-05-10 14:33:55 +010053
Steve Blocka7e24c12009-10-30 11:49:00 +000054namespace v8 {
55namespace internal {
56
57
58String* Heap::hidden_symbol_;
59Object* Heap::roots_[Heap::kRootListLength];
Ben Murdochf87a2032010-10-22 12:50:53 +010060Object* Heap::global_contexts_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +000061
John Reck59135872010-11-02 12:39:01 -070062
Steve Blocka7e24c12009-10-30 11:49:00 +000063NewSpace Heap::new_space_;
64OldSpace* Heap::old_pointer_space_ = NULL;
65OldSpace* Heap::old_data_space_ = NULL;
66OldSpace* Heap::code_space_ = NULL;
67MapSpace* Heap::map_space_ = NULL;
68CellSpace* Heap::cell_space_ = NULL;
69LargeObjectSpace* Heap::lo_space_ = NULL;
70
John Reck59135872010-11-02 12:39:01 -070071static const intptr_t kMinimumPromotionLimit = 2 * MB;
72static const intptr_t kMinimumAllocationLimit = 8 * MB;
73
Ben Murdochf87a2032010-10-22 12:50:53 +010074intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
75intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
Steve Blocka7e24c12009-10-30 11:49:00 +000076
77int Heap::old_gen_exhausted_ = false;
78
79int Heap::amount_of_external_allocated_memory_ = 0;
80int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
81
82// semispace_size_ should be a power of 2 and old_generation_size_ should be
83// a multiple of Page::kPageSize.
84#if defined(ANDROID)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080085static const int default_max_semispace_size_ = 2*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010086intptr_t Heap::max_old_generation_size_ = 192*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000087int Heap::initial_semispace_size_ = 128*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010088intptr_t Heap::code_range_size_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -080089intptr_t Heap::max_executable_size_ = max_old_generation_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +000090#elif defined(V8_TARGET_ARCH_X64)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080091static const int default_max_semispace_size_ = 16*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010092intptr_t Heap::max_old_generation_size_ = 1*GB;
Steve Blocka7e24c12009-10-30 11:49:00 +000093int Heap::initial_semispace_size_ = 1*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010094intptr_t Heap::code_range_size_ = 512*MB;
Russell Brenner90bac252010-11-18 13:33:46 -080095intptr_t Heap::max_executable_size_ = 256*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000096#else
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080097static const int default_max_semispace_size_ = 8*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010098intptr_t Heap::max_old_generation_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000099int Heap::initial_semispace_size_ = 512*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +0100100intptr_t Heap::code_range_size_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800101intptr_t Heap::max_executable_size_ = 128*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +0000102#endif
103
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800104// Allow build-time customization of the max semispace size. Building
105// V8 with snapshots and a non-default max semispace size is much
106// easier if you can define it as part of the build environment.
107#if defined(V8_MAX_SEMISPACE_SIZE)
108int Heap::max_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
109#else
110int Heap::max_semispace_size_ = default_max_semispace_size_;
111#endif
112
Steve Block3ce2e202009-11-05 08:53:23 +0000113// The snapshot semispace size will be the default semispace size if
114// snapshotting is used and will be the requested semispace size as
115// set up by ConfigureHeap otherwise.
116int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
117
Steve Block6ded16b2010-05-10 14:33:55 +0100118List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
119List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
120
Steve Blocka7e24c12009-10-30 11:49:00 +0000121GCCallback Heap::global_gc_prologue_callback_ = NULL;
122GCCallback Heap::global_gc_epilogue_callback_ = NULL;
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100123HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000124
125// Variables set based on semispace_size_ and old_generation_size_ in
126// ConfigureHeap.
Steve Block3ce2e202009-11-05 08:53:23 +0000127
128// Will be 4 * reserved_semispace_size_ to ensure that young
129// generation can be aligned to its size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000130int Heap::survived_since_last_expansion_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100131intptr_t Heap::external_allocation_limit_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000132
133Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
134
135int Heap::mc_count_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100136int Heap::ms_count_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000137int Heap::gc_count_ = 0;
138
Leon Clarkef7060e22010-06-03 12:02:55 +0100139GCTracer* Heap::tracer_ = NULL;
140
Steve Block6ded16b2010-05-10 14:33:55 +0100141int Heap::unflattened_strings_length_ = 0;
142
Steve Blocka7e24c12009-10-30 11:49:00 +0000143int Heap::always_allocate_scope_depth_ = 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000144int Heap::linear_allocation_scope_depth_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100145int Heap::contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000146
Steve Block8defd9f2010-07-08 12:39:36 +0100147int Heap::young_survivors_after_last_gc_ = 0;
148int Heap::high_survival_rate_period_length_ = 0;
149double Heap::survival_rate_ = 0;
150Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
151Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
152
Steve Blocka7e24c12009-10-30 11:49:00 +0000153#ifdef DEBUG
154bool Heap::allocation_allowed_ = true;
155
156int Heap::allocation_timeout_ = 0;
157bool Heap::disallow_allocation_failure_ = false;
158#endif // DEBUG
159
Ben Murdochf87a2032010-10-22 12:50:53 +0100160intptr_t GCTracer::alive_after_last_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100161double GCTracer::last_gc_end_timestamp_ = 0.0;
162int GCTracer::max_gc_pause_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100163intptr_t GCTracer::max_alive_after_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100164int GCTracer::min_in_mutator_ = kMaxInt;
Steve Blocka7e24c12009-10-30 11:49:00 +0000165
Ben Murdochf87a2032010-10-22 12:50:53 +0100166intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000167 if (!HasBeenSetup()) return 0;
168
169 return new_space_.Capacity() +
170 old_pointer_space_->Capacity() +
171 old_data_space_->Capacity() +
172 code_space_->Capacity() +
173 map_space_->Capacity() +
174 cell_space_->Capacity();
175}
176
177
Ben Murdochf87a2032010-10-22 12:50:53 +0100178intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000179 if (!HasBeenSetup()) return 0;
180
181 return new_space_.CommittedMemory() +
182 old_pointer_space_->CommittedMemory() +
183 old_data_space_->CommittedMemory() +
184 code_space_->CommittedMemory() +
185 map_space_->CommittedMemory() +
186 cell_space_->CommittedMemory() +
187 lo_space_->Size();
188}
189
Russell Brenner90bac252010-11-18 13:33:46 -0800190intptr_t Heap::CommittedMemoryExecutable() {
191 if (!HasBeenSetup()) return 0;
192
193 return MemoryAllocator::SizeExecutable();
194}
195
Steve Block3ce2e202009-11-05 08:53:23 +0000196
Ben Murdochf87a2032010-10-22 12:50:53 +0100197intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000198 if (!HasBeenSetup()) return 0;
199
200 return new_space_.Available() +
201 old_pointer_space_->Available() +
202 old_data_space_->Available() +
203 code_space_->Available() +
204 map_space_->Available() +
205 cell_space_->Available();
206}
207
208
209bool Heap::HasBeenSetup() {
210 return old_pointer_space_ != NULL &&
211 old_data_space_ != NULL &&
212 code_space_ != NULL &&
213 map_space_ != NULL &&
214 cell_space_ != NULL &&
215 lo_space_ != NULL;
216}
217
218
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100219int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
220 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
221 ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
222 MapWord map_word = object->map_word();
223 map_word.ClearMark();
224 map_word.ClearOverflow();
225 return object->SizeFromMap(map_word.ToMap());
226}
227
228
229int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
230 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
231 ASSERT(MarkCompactCollector::are_map_pointers_encoded());
232 uint32_t marker = Memory::uint32_at(object->address());
233 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
234 return kIntSize;
235 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
236 return Memory::int_at(object->address() + kIntSize);
237 } else {
238 MapWord map_word = object->map_word();
239 Address map_address = map_word.DecodeMapAddress(Heap::map_space());
240 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
241 return object->SizeFromMap(map);
242 }
243}
244
245
Steve Blocka7e24c12009-10-30 11:49:00 +0000246GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
247 // Is global GC requested?
248 if (space != NEW_SPACE || FLAG_gc_global) {
249 Counters::gc_compactor_caused_by_request.Increment();
250 return MARK_COMPACTOR;
251 }
252
253 // Is enough data promoted to justify a global GC?
254 if (OldGenerationPromotionLimitReached()) {
255 Counters::gc_compactor_caused_by_promoted_data.Increment();
256 return MARK_COMPACTOR;
257 }
258
259 // Have allocation in OLD and LO failed?
260 if (old_gen_exhausted_) {
261 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
262 return MARK_COMPACTOR;
263 }
264
265 // Is there enough space left in OLD to guarantee that a scavenge can
266 // succeed?
267 //
268 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
269 // for object promotion. It counts only the bytes that the memory
270 // allocator has not yet allocated from the OS and assigned to any space,
271 // and does not count available bytes already in the old space or code
272 // space. Undercounting is safe---we may get an unrequested full GC when
273 // a scavenge would have succeeded.
274 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
275 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
276 return MARK_COMPACTOR;
277 }
278
279 // Default
280 return SCAVENGER;
281}
282
283
284// TODO(1238405): Combine the infrastructure for --heap-stats and
285// --log-gc to avoid the complicated preprocessor and flag testing.
286#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
287void Heap::ReportStatisticsBeforeGC() {
288 // Heap::ReportHeapStatistics will also log NewSpace statistics when
289 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
290 // following logic is used to avoid double logging.
291#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
292 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
293 if (FLAG_heap_stats) {
294 ReportHeapStatistics("Before GC");
295 } else if (FLAG_log_gc) {
296 new_space_.ReportStatistics();
297 }
298 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
299#elif defined(DEBUG)
300 if (FLAG_heap_stats) {
301 new_space_.CollectStatistics();
302 ReportHeapStatistics("Before GC");
303 new_space_.ClearHistograms();
304 }
305#elif defined(ENABLE_LOGGING_AND_PROFILING)
306 if (FLAG_log_gc) {
307 new_space_.CollectStatistics();
308 new_space_.ReportStatistics();
309 new_space_.ClearHistograms();
310 }
311#endif
312}
313
314
315#if defined(ENABLE_LOGGING_AND_PROFILING)
316void Heap::PrintShortHeapStatistics() {
317 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100318 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
319 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000320 MemoryAllocator::Size(),
321 MemoryAllocator::Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100322 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
323 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000324 Heap::new_space_.Size(),
325 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100326 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
327 ", available: %8" V8_PTR_PREFIX "d"
328 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000329 old_pointer_space_->Size(),
330 old_pointer_space_->Available(),
331 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100332 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
333 ", available: %8" V8_PTR_PREFIX "d"
334 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000335 old_data_space_->Size(),
336 old_data_space_->Available(),
337 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100338 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
339 ", available: %8" V8_PTR_PREFIX "d"
340 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000341 code_space_->Size(),
342 code_space_->Available(),
343 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100344 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
345 ", available: %8" V8_PTR_PREFIX "d"
346 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000347 map_space_->Size(),
348 map_space_->Available(),
349 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100350 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
351 ", available: %8" V8_PTR_PREFIX "d"
352 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000353 cell_space_->Size(),
354 cell_space_->Available(),
355 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100356 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
357 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000358 lo_space_->Size(),
359 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000360}
361#endif
362
363
364// TODO(1238405): Combine the infrastructure for --heap-stats and
365// --log-gc to avoid the complicated preprocessor and flag testing.
366void Heap::ReportStatisticsAfterGC() {
367 // Similar to the before GC, we use some complicated logic to ensure that
368 // NewSpace statistics are logged exactly once when --log-gc is turned on.
369#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
370 if (FLAG_heap_stats) {
371 new_space_.CollectStatistics();
372 ReportHeapStatistics("After GC");
373 } else if (FLAG_log_gc) {
374 new_space_.ReportStatistics();
375 }
376#elif defined(DEBUG)
377 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
378#elif defined(ENABLE_LOGGING_AND_PROFILING)
379 if (FLAG_log_gc) new_space_.ReportStatistics();
380#endif
381}
382#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
383
384
385void Heap::GarbageCollectionPrologue() {
386 TranscendentalCache::Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100387 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000388 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100389 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000390#ifdef DEBUG
391 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
392 allow_allocation(false);
393
394 if (FLAG_verify_heap) {
395 Verify();
396 }
397
398 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000399#endif
400
401#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
402 ReportStatisticsBeforeGC();
403#endif
Steve Block1e0659c2011-05-24 12:43:12 +0100404
405 LiveObjectList::GCPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000406}
407
Ben Murdochf87a2032010-10-22 12:50:53 +0100408intptr_t Heap::SizeOfObjects() {
409 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000411 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800412 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000413 }
414 return total;
415}
416
417void Heap::GarbageCollectionEpilogue() {
Steve Block1e0659c2011-05-24 12:43:12 +0100418 LiveObjectList::GCEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000419#ifdef DEBUG
420 allow_allocation(true);
421 ZapFromSpace();
422
423 if (FLAG_verify_heap) {
424 Verify();
425 }
426
427 if (FLAG_print_global_handles) GlobalHandles::Print();
428 if (FLAG_print_handles) PrintHandles();
429 if (FLAG_gc_verbose) Print();
430 if (FLAG_code_stats) ReportCodeStatistics("After GC");
431#endif
432
Ben Murdochf87a2032010-10-22 12:50:53 +0100433 Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000434
435 Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
436 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
437#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
438 ReportStatisticsAfterGC();
439#endif
440#ifdef ENABLE_DEBUGGER_SUPPORT
441 Debug::AfterGarbageCollection();
442#endif
443}
444
445
John Reck59135872010-11-02 12:39:01 -0700446void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000447 // Since we are ignoring the return value, the exact choice of space does
448 // not matter, so long as we do not specify NEW_SPACE, which would not
449 // cause a full GC.
450 MarkCompactCollector::SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700451 CollectGarbage(OLD_POINTER_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +0000452 MarkCompactCollector::SetForceCompaction(false);
453}
454
455
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800456void Heap::CollectAllAvailableGarbage() {
457 // Since we are ignoring the return value, the exact choice of space does
458 // not matter, so long as we do not specify NEW_SPACE, which would not
459 // cause a full GC.
460 MarkCompactCollector::SetForceCompaction(true);
461
462 // Major GC would invoke weak handle callbacks on weakly reachable
463 // handles, but won't collect weakly reachable objects until next
464 // major GC. Therefore if we collect aggressively and weak handle callback
465 // has been invoked, we rerun major GC to release objects which become
466 // garbage.
467 // Note: as weak callbacks can execute arbitrary code, we cannot
468 // hope that eventually there will be no weak callbacks invocations.
469 // Therefore stop recollecting after several attempts.
470 const int kMaxNumberOfAttempts = 7;
471 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
472 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
473 break;
474 }
475 }
476 MarkCompactCollector::SetForceCompaction(false);
477}
478
479
480bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 // The VM is in the GC state until exiting this function.
482 VMState state(GC);
483
484#ifdef DEBUG
485 // Reset the allocation timeout to the GC interval, but make sure to
486 // allow at least a few allocations after a collection. The reason
487 // for this is that we have a lot of allocation sequences and we
488 // assume that a garbage collection will allow the subsequent
489 // allocation attempts to go through.
490 allocation_timeout_ = Max(6, FLAG_gc_interval);
491#endif
492
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800493 bool next_gc_likely_to_collect_more = false;
494
Steve Blocka7e24c12009-10-30 11:49:00 +0000495 { GCTracer tracer;
496 GarbageCollectionPrologue();
497 // The GC count was incremented in the prologue. Tell the tracer about
498 // it.
499 tracer.set_gc_count(gc_count_);
500
Steve Blocka7e24c12009-10-30 11:49:00 +0000501 // Tell the tracer which collector we've selected.
502 tracer.set_collector(collector);
503
504 HistogramTimer* rate = (collector == SCAVENGER)
505 ? &Counters::gc_scavenger
506 : &Counters::gc_compactor;
507 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800508 next_gc_likely_to_collect_more =
509 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000510 rate->Stop();
511
512 GarbageCollectionEpilogue();
513 }
514
515
516#ifdef ENABLE_LOGGING_AND_PROFILING
517 if (FLAG_log_gc) HeapProfiler::WriteSample();
Ben Murdochf87a2032010-10-22 12:50:53 +0100518 if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
Steve Blocka7e24c12009-10-30 11:49:00 +0000519#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800520
521 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000522}
523
524
525void Heap::PerformScavenge() {
526 GCTracer tracer;
John Reck59135872010-11-02 12:39:01 -0700527 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000528}
529
530
531#ifdef DEBUG
532// Helper class for verifying the symbol table.
533class SymbolTableVerifier : public ObjectVisitor {
534 public:
535 SymbolTableVerifier() { }
536 void VisitPointers(Object** start, Object** end) {
537 // Visit all HeapObject pointers in [start, end).
538 for (Object** p = start; p < end; p++) {
539 if ((*p)->IsHeapObject()) {
540 // Check that the symbol is actually a symbol.
541 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
542 }
543 }
544 }
545};
546#endif // DEBUG
547
548
549static void VerifySymbolTable() {
550#ifdef DEBUG
551 SymbolTableVerifier verifier;
552 Heap::symbol_table()->IterateElements(&verifier);
553#endif // DEBUG
554}
555
556
Leon Clarkee46be812010-01-19 14:06:41 +0000557void Heap::ReserveSpace(
558 int new_space_size,
559 int pointer_space_size,
560 int data_space_size,
561 int code_space_size,
562 int map_space_size,
563 int cell_space_size,
564 int large_object_size) {
565 NewSpace* new_space = Heap::new_space();
566 PagedSpace* old_pointer_space = Heap::old_pointer_space();
567 PagedSpace* old_data_space = Heap::old_data_space();
568 PagedSpace* code_space = Heap::code_space();
569 PagedSpace* map_space = Heap::map_space();
570 PagedSpace* cell_space = Heap::cell_space();
571 LargeObjectSpace* lo_space = Heap::lo_space();
572 bool gc_performed = true;
573 while (gc_performed) {
574 gc_performed = false;
575 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100576 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000577 gc_performed = true;
578 }
579 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100580 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000581 gc_performed = true;
582 }
583 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100584 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000585 gc_performed = true;
586 }
587 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100588 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000589 gc_performed = true;
590 }
591 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100592 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000593 gc_performed = true;
594 }
595 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100596 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000597 gc_performed = true;
598 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100599 // We add a slack-factor of 2 in order to have space for a series of
600 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000601 large_object_size *= 2;
602 // The ReserveSpace method on the large object space checks how much
603 // we can expand the old generation. This includes expansion caused by
604 // allocation in the other spaces.
605 large_object_size += cell_space_size + map_space_size + code_space_size +
606 data_space_size + pointer_space_size;
607 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100608 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000609 gc_performed = true;
610 }
611 }
612}
613
614
Steve Blocka7e24c12009-10-30 11:49:00 +0000615void Heap::EnsureFromSpaceIsCommitted() {
616 if (new_space_.CommitFromSpaceIfNeeded()) return;
617
618 // Committing memory to from space failed.
619 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100620 PagedSpaces spaces;
621 for (PagedSpace* space = spaces.next();
622 space != NULL;
623 space = spaces.next()) {
624 space->RelinkPageListInChunkOrder(true);
625 }
626
Steve Blocka7e24c12009-10-30 11:49:00 +0000627 Shrink();
628 if (new_space_.CommitFromSpaceIfNeeded()) return;
629
630 // Committing memory to from space failed again.
631 // Memory is exhausted and we will die.
632 V8::FatalProcessOutOfMemory("Committing semi space failed.");
633}
634
635
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800636void Heap::ClearJSFunctionResultCaches() {
637 if (Bootstrapper::IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100638
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800639 Object* context = global_contexts_list_;
640 while (!context->IsUndefined()) {
641 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100642 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800643 Context::cast(context)->jsfunction_result_caches();
644 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100645 int length = caches->length();
646 for (int i = 0; i < length; i++) {
647 JSFunctionResultCache::cast(caches->get(i))->Clear();
648 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800649 // Get the next context:
650 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100651 }
Steve Block6ded16b2010-05-10 14:33:55 +0100652}
653
654
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100655void Heap::ClearNormalizedMapCaches() {
656 if (Bootstrapper::IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100657
658 Object* context = global_contexts_list_;
659 while (!context->IsUndefined()) {
660 Context::cast(context)->normalized_map_cache()->Clear();
661 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
662 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100663}
664
665
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100666#ifdef DEBUG
667
668enum PageWatermarkValidity {
669 ALL_VALID,
670 ALL_INVALID
671};
672
673static void VerifyPageWatermarkValidity(PagedSpace* space,
674 PageWatermarkValidity validity) {
675 PageIterator it(space, PageIterator::PAGES_IN_USE);
676 bool expected_value = (validity == ALL_VALID);
677 while (it.has_next()) {
678 Page* page = it.next();
679 ASSERT(page->IsWatermarkValid() == expected_value);
680 }
681}
682#endif
683
Steve Block8defd9f2010-07-08 12:39:36 +0100684void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
685 double survival_rate =
686 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
687 start_new_space_size;
688
689 if (survival_rate > kYoungSurvivalRateThreshold) {
690 high_survival_rate_period_length_++;
691 } else {
692 high_survival_rate_period_length_ = 0;
693 }
694
695 double survival_rate_diff = survival_rate_ - survival_rate;
696
697 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
698 set_survival_rate_trend(DECREASING);
699 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
700 set_survival_rate_trend(INCREASING);
701 } else {
702 set_survival_rate_trend(STABLE);
703 }
704
705 survival_rate_ = survival_rate;
706}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100707
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800708bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700709 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800710 bool next_gc_likely_to_collect_more = false;
711
Ben Murdochf87a2032010-10-22 12:50:53 +0100712 if (collector != SCAVENGER) {
713 PROFILE(CodeMovingGCEvent());
714 }
715
Steve Blocka7e24c12009-10-30 11:49:00 +0000716 VerifySymbolTable();
717 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
718 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100719 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000720 global_gc_prologue_callback_();
721 }
Steve Block6ded16b2010-05-10 14:33:55 +0100722
723 GCType gc_type =
724 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
725
726 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
727 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
728 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
729 }
730 }
731
Steve Blocka7e24c12009-10-30 11:49:00 +0000732 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100733
Ben Murdochf87a2032010-10-22 12:50:53 +0100734 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100735
Steve Blocka7e24c12009-10-30 11:49:00 +0000736 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100737 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000738 MarkCompact(tracer);
739
Steve Block8defd9f2010-07-08 12:39:36 +0100740 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
741 IsStableOrIncreasingSurvivalTrend();
742
743 UpdateSurvivalRateTrend(start_new_space_size);
744
John Reck59135872010-11-02 12:39:01 -0700745 intptr_t old_gen_size = PromotedSpaceSize();
746 old_gen_promotion_limit_ =
747 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
748 old_gen_allocation_limit_ =
749 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100750
John Reck59135872010-11-02 12:39:01 -0700751 if (high_survival_rate_during_scavenges &&
752 IsStableOrIncreasingSurvivalTrend()) {
753 // Stable high survival rates of young objects both during partial and
754 // full collection indicate that mutator is either building or modifying
755 // a structure with a long lifetime.
756 // In this case we aggressively raise old generation memory limits to
757 // postpone subsequent mark-sweep collection and thus trade memory
758 // space for the mutation speed.
759 old_gen_promotion_limit_ *= 2;
760 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100761 }
762
John Reck59135872010-11-02 12:39:01 -0700763 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100764 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100765 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100766 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100767 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100768
769 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000770 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000771
772 Counters::objs_since_last_young.Set(0);
773
John Reck59135872010-11-02 12:39:01 -0700774 if (collector == MARK_COMPACTOR) {
775 DisableAssertNoAllocation allow_allocation;
776 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800777 next_gc_likely_to_collect_more =
778 GlobalHandles::PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700779 }
780
Steve Block3ce2e202009-11-05 08:53:23 +0000781 // Update relocatables.
782 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000783
784 if (collector == MARK_COMPACTOR) {
785 // Register the amount of external allocated memory.
786 amount_of_external_allocated_memory_at_last_global_gc_ =
787 amount_of_external_allocated_memory_;
788 }
789
Steve Block6ded16b2010-05-10 14:33:55 +0100790 GCCallbackFlags callback_flags = tracer->is_compacting()
791 ? kGCCallbackFlagCompacted
792 : kNoGCCallbackFlags;
793 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
794 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
795 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
796 }
797 }
798
Steve Blocka7e24c12009-10-30 11:49:00 +0000799 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
800 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100801 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000802 global_gc_epilogue_callback_();
803 }
804 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800805
806 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000807}
808
809
Steve Blocka7e24c12009-10-30 11:49:00 +0000810void Heap::MarkCompact(GCTracer* tracer) {
811 gc_state_ = MARK_COMPACT;
Steve Blocka7e24c12009-10-30 11:49:00 +0000812 LOG(ResourceEvent("markcompact", "begin"));
813
814 MarkCompactCollector::Prepare(tracer);
815
816 bool is_compacting = MarkCompactCollector::IsCompacting();
817
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100818 if (is_compacting) {
819 mc_count_++;
820 } else {
821 ms_count_++;
822 }
823 tracer->set_full_gc_count(mc_count_ + ms_count_);
824
Steve Blocka7e24c12009-10-30 11:49:00 +0000825 MarkCompactPrologue(is_compacting);
826
827 MarkCompactCollector::CollectGarbage();
828
Steve Blocka7e24c12009-10-30 11:49:00 +0000829 LOG(ResourceEvent("markcompact", "end"));
830
831 gc_state_ = NOT_IN_GC;
832
833 Shrink();
834
835 Counters::objs_since_last_full.Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100836
837 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000838}
839
840
841void Heap::MarkCompactPrologue(bool is_compacting) {
842 // At any old GC clear the keyed lookup cache to enable collection of unused
843 // maps.
844 KeyedLookupCache::Clear();
845 ContextSlotCache::Clear();
846 DescriptorLookupCache::Clear();
847
Ben Murdochb0fe1622011-05-05 13:52:32 +0100848 RuntimeProfiler::MarkCompactPrologue(is_compacting);
849
Steve Blocka7e24c12009-10-30 11:49:00 +0000850 CompilationCache::MarkCompactPrologue();
851
Kristian Monsen25f61362010-05-21 11:50:48 +0100852 CompletelyClearInstanceofCache();
853
Leon Clarkee46be812010-01-19 14:06:41 +0000854 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000855
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100856 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000857}
858
859
860Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700861 Object* obj = NULL; // Initialization to please compiler.
862 { MaybeObject* maybe_obj = code_space_->FindObject(a);
863 if (!maybe_obj->ToObject(&obj)) {
864 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
865 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000867 return obj;
868}
869
870
871// Helper class for copying HeapObjects
872class ScavengeVisitor: public ObjectVisitor {
873 public:
874
875 void VisitPointer(Object** p) { ScavengePointer(p); }
876
877 void VisitPointers(Object** start, Object** end) {
878 // Copy all HeapObject pointers in [start, end)
879 for (Object** p = start; p < end; p++) ScavengePointer(p);
880 }
881
882 private:
883 void ScavengePointer(Object** p) {
884 Object* object = *p;
885 if (!Heap::InNewSpace(object)) return;
886 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
887 reinterpret_cast<HeapObject*>(object));
888 }
889};
890
891
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100892// A queue of objects promoted during scavenge. Each object is accompanied
893// by it's size to avoid dereferencing a map pointer for scanning.
Steve Blocka7e24c12009-10-30 11:49:00 +0000894class PromotionQueue {
895 public:
896 void Initialize(Address start_address) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100897 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000898 }
899
900 bool is_empty() { return front_ <= rear_; }
901
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100902 void insert(HeapObject* target, int size) {
903 *(--rear_) = reinterpret_cast<intptr_t>(target);
904 *(--rear_) = size;
Steve Blocka7e24c12009-10-30 11:49:00 +0000905 // Assert no overflow into live objects.
906 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
907 }
908
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100909 void remove(HeapObject** target, int* size) {
910 *target = reinterpret_cast<HeapObject*>(*(--front_));
911 *size = static_cast<int>(*(--front_));
Steve Blocka7e24c12009-10-30 11:49:00 +0000912 // Assert no underflow.
913 ASSERT(front_ >= rear_);
914 }
915
916 private:
917 // The front of the queue is higher in memory than the rear.
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100918 intptr_t* front_;
919 intptr_t* rear_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000920};
921
922
923// Shared state read by the scavenge collector and set by ScavengeObject.
924static PromotionQueue promotion_queue;
925
926
927#ifdef DEBUG
928// Visitor class to verify pointers in code or data space do not point into
929// new space.
930class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
931 public:
932 void VisitPointers(Object** start, Object**end) {
933 for (Object** current = start; current < end; current++) {
934 if ((*current)->IsHeapObject()) {
935 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
936 }
937 }
938 }
939};
940
941
942static void VerifyNonPointerSpacePointers() {
943 // Verify that there are no pointers to new space in spaces where we
944 // do not expect them.
945 VerifyNonPointerSpacePointersVisitor v;
946 HeapObjectIterator code_it(Heap::code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000947 for (HeapObject* object = code_it.next();
948 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000949 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000950
951 HeapObjectIterator data_it(Heap::old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000952 for (HeapObject* object = data_it.next();
953 object != NULL; object = data_it.next())
954 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000955}
956#endif
957
958
Steve Block6ded16b2010-05-10 14:33:55 +0100959void Heap::CheckNewSpaceExpansionCriteria() {
960 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
961 survived_since_last_expansion_ > new_space_.Capacity()) {
962 // Grow the size of new space if there is room to grow and enough
963 // data has survived scavenge since the last expansion.
964 new_space_.Grow();
965 survived_since_last_expansion_ = 0;
966 }
967}
968
969
Steve Blocka7e24c12009-10-30 11:49:00 +0000970void Heap::Scavenge() {
971#ifdef DEBUG
972 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
973#endif
974
975 gc_state_ = SCAVENGE;
976
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100977 Page::FlipMeaningOfInvalidatedWatermarkFlag();
978#ifdef DEBUG
979 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
980 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
981#endif
982
983 // We do not update an allocation watermark of the top page during linear
984 // allocation to avoid overhead. So to maintain the watermark invariant
985 // we have to manually cache the watermark and mark the top page as having an
986 // invalid watermark. This guarantees that dirty regions iteration will use a
987 // correct watermark even if a linear allocation happens.
988 old_pointer_space_->FlushTopPageWatermark();
989 map_space_->FlushTopPageWatermark();
990
Steve Blocka7e24c12009-10-30 11:49:00 +0000991 // Implements Cheney's copying algorithm
992 LOG(ResourceEvent("scavenge", "begin"));
993
994 // Clear descriptor cache.
995 DescriptorLookupCache::Clear();
996
997 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100998 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000999
Steve Block6ded16b2010-05-10 14:33:55 +01001000 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +00001001
1002 // Flip the semispaces. After flipping, to space is empty, from space has
1003 // live objects.
1004 new_space_.Flip();
1005 new_space_.ResetAllocationInfo();
1006
1007 // We need to sweep newly copied objects which can be either in the
1008 // to space or promoted to the old generation. For to-space
1009 // objects, we treat the bottom of the to space as a queue. Newly
1010 // copied and unswept objects lie between a 'front' mark and the
1011 // allocation pointer.
1012 //
1013 // Promoted objects can go into various old-generation spaces, and
1014 // can be allocated internally in the spaces (from the free list).
1015 // We treat the top of the to space as a queue of addresses of
1016 // promoted objects. The addresses of newly promoted and unswept
1017 // objects lie between a 'front' mark and a 'rear' mark that is
1018 // updated as a side effect of promoting an object.
1019 //
1020 // There is guaranteed to be enough room at the top of the to space
1021 // for the addresses of promoted objects: every object promoted
1022 // frees up its size in bytes from the top of the new space, and
1023 // objects are at least one pointer in size.
1024 Address new_space_front = new_space_.ToSpaceLow();
1025 promotion_queue.Initialize(new_space_.ToSpaceHigh());
1026
1027 ScavengeVisitor scavenge_visitor;
1028 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001029 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001030
1031 // Copy objects reachable from the old generation. By definition,
1032 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001033 IterateDirtyRegions(old_pointer_space_,
1034 &IteratePointersInDirtyRegion,
1035 &ScavengePointer,
1036 WATERMARK_CAN_BE_INVALID);
1037
1038 IterateDirtyRegions(map_space_,
1039 &IteratePointersInDirtyMapsRegion,
1040 &ScavengePointer,
1041 WATERMARK_CAN_BE_INVALID);
1042
1043 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001044
1045 // Copy objects reachable from cells by scavenging cell values directly.
1046 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001047 for (HeapObject* cell = cell_iterator.next();
1048 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001049 if (cell->IsJSGlobalPropertyCell()) {
1050 Address value_address =
1051 reinterpret_cast<Address>(cell) +
1052 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1053 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1054 }
1055 }
1056
Ben Murdochf87a2032010-10-22 12:50:53 +01001057 // Scavenge object reachable from the global contexts list directly.
1058 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1059
Ben Murdochb0fe1622011-05-05 13:52:32 +01001060 // Scavenge objects reachable from the runtime-profiler sampler
1061 // window directly.
1062 Object** sampler_window_address = RuntimeProfiler::SamplerWindowAddress();
1063 int sampler_window_size = RuntimeProfiler::SamplerWindowSize();
1064 scavenge_visitor.VisitPointers(
1065 sampler_window_address,
1066 sampler_window_address + sampler_window_size);
1067
Leon Clarkee46be812010-01-19 14:06:41 +00001068 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1069
Steve Block6ded16b2010-05-10 14:33:55 +01001070 UpdateNewSpaceReferencesInExternalStringTable(
1071 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1072
Steve Block1e0659c2011-05-24 12:43:12 +01001073 LiveObjectList::UpdateReferencesForScavengeGC();
1074
Leon Clarkee46be812010-01-19 14:06:41 +00001075 ASSERT(new_space_front == new_space_.top());
1076
1077 // Set age mark.
1078 new_space_.set_age_mark(new_space_.top());
1079
1080 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001081 IncrementYoungSurvivorsCounter(static_cast<int>(
1082 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001083
1084 LOG(ResourceEvent("scavenge", "end"));
1085
1086 gc_state_ = NOT_IN_GC;
1087}
1088
1089
Steve Block6ded16b2010-05-10 14:33:55 +01001090String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
1091 MapWord first_word = HeapObject::cast(*p)->map_word();
1092
1093 if (!first_word.IsForwardingAddress()) {
1094 // Unreachable external string can be finalized.
1095 FinalizeExternalString(String::cast(*p));
1096 return NULL;
1097 }
1098
1099 // String is still reachable.
1100 return String::cast(first_word.ToForwardingAddress());
1101}
1102
1103
1104void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1105 ExternalStringTableUpdaterCallback updater_func) {
Leon Clarkee46be812010-01-19 14:06:41 +00001106 ExternalStringTable::Verify();
1107
1108 if (ExternalStringTable::new_space_strings_.is_empty()) return;
1109
1110 Object** start = &ExternalStringTable::new_space_strings_[0];
1111 Object** end = start + ExternalStringTable::new_space_strings_.length();
1112 Object** last = start;
1113
1114 for (Object** p = start; p < end; ++p) {
1115 ASSERT(Heap::InFromSpace(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001116 String* target = updater_func(p);
Leon Clarkee46be812010-01-19 14:06:41 +00001117
Steve Block6ded16b2010-05-10 14:33:55 +01001118 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001119
Leon Clarkee46be812010-01-19 14:06:41 +00001120 ASSERT(target->IsExternalString());
1121
1122 if (Heap::InNewSpace(target)) {
1123 // String is still in new space. Update the table entry.
1124 *last = target;
1125 ++last;
1126 } else {
1127 // String got promoted. Move it to the old string list.
1128 ExternalStringTable::AddOldString(target);
1129 }
1130 }
1131
1132 ASSERT(last <= end);
1133 ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
1134}
1135
1136
Ben Murdochb0fe1622011-05-05 13:52:32 +01001137static Object* ProcessFunctionWeakReferences(Object* function,
1138 WeakObjectRetainer* retainer) {
1139 Object* head = Heap::undefined_value();
1140 JSFunction* tail = NULL;
1141 Object* candidate = function;
1142 while (!candidate->IsUndefined()) {
1143 // Check whether to keep the candidate in the list.
1144 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1145 Object* retain = retainer->RetainAs(candidate);
1146 if (retain != NULL) {
1147 if (head->IsUndefined()) {
1148 // First element in the list.
1149 head = candidate_function;
1150 } else {
1151 // Subsequent elements in the list.
1152 ASSERT(tail != NULL);
1153 tail->set_next_function_link(candidate_function);
1154 }
1155 // Retained function is new tail.
1156 tail = candidate_function;
1157 }
1158 // Move to next element in the list.
1159 candidate = candidate_function->next_function_link();
1160 }
1161
1162 // Terminate the list if there is one or more elements.
1163 if (tail != NULL) {
1164 tail->set_next_function_link(Heap::undefined_value());
1165 }
1166
1167 return head;
1168}
1169
1170
Ben Murdochf87a2032010-10-22 12:50:53 +01001171void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1172 Object* head = undefined_value();
1173 Context* tail = NULL;
1174 Object* candidate = global_contexts_list_;
1175 while (!candidate->IsUndefined()) {
1176 // Check whether to keep the candidate in the list.
1177 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1178 Object* retain = retainer->RetainAs(candidate);
1179 if (retain != NULL) {
1180 if (head->IsUndefined()) {
1181 // First element in the list.
1182 head = candidate_context;
1183 } else {
1184 // Subsequent elements in the list.
1185 ASSERT(tail != NULL);
1186 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1187 candidate_context,
1188 UPDATE_WRITE_BARRIER);
1189 }
1190 // Retained context is new tail.
1191 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001192
1193 // Process the weak list of optimized functions for the context.
1194 Object* function_list_head =
1195 ProcessFunctionWeakReferences(
1196 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1197 retainer);
1198 candidate_context->set_unchecked(Context::OPTIMIZED_FUNCTIONS_LIST,
1199 function_list_head,
1200 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001201 }
1202 // Move to next element in the list.
1203 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1204 }
1205
1206 // Terminate the list if there is one or more elements.
1207 if (tail != NULL) {
1208 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1209 Heap::undefined_value(),
1210 UPDATE_WRITE_BARRIER);
1211 }
1212
1213 // Update the head of the list of contexts.
1214 Heap::global_contexts_list_ = head;
1215}
1216
1217
Iain Merrick75681382010-08-19 15:07:18 +01001218class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1219 public:
1220 static inline void VisitPointer(Object** p) {
1221 Object* object = *p;
1222 if (!Heap::InNewSpace(object)) return;
1223 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1224 reinterpret_cast<HeapObject*>(object));
1225 }
1226};
1227
1228
Leon Clarkee46be812010-01-19 14:06:41 +00001229Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1230 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001231 do {
1232 ASSERT(new_space_front <= new_space_.top());
1233
1234 // The addresses new_space_front and new_space_.top() define a
1235 // queue of unprocessed copied objects. Process them until the
1236 // queue is empty.
1237 while (new_space_front < new_space_.top()) {
1238 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001239 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001240 }
1241
1242 // Promote and process all the to-be-promoted objects.
1243 while (!promotion_queue.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001244 HeapObject* target;
1245 int size;
1246 promotion_queue.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001247
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001248 // Promoted object might be already partially visited
1249 // during dirty regions iteration. Thus we search specificly
1250 // for pointers to from semispace instead of looking for pointers
1251 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001252 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001253 IterateAndMarkPointersToFromSpace(target->address(),
1254 target->address() + size,
1255 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001256 }
1257
1258 // Take another spin if there are now unswept objects in new space
1259 // (there are currently no more unswept promoted objects).
1260 } while (new_space_front < new_space_.top());
1261
Leon Clarkee46be812010-01-19 14:06:41 +00001262 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001263}
1264
1265
Iain Merrick75681382010-08-19 15:07:18 +01001266class ScavengingVisitor : public StaticVisitorBase {
1267 public:
1268 static void Initialize() {
1269 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1270 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1271 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1272 table_.Register(kVisitByteArray, &EvacuateByteArray);
1273 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdochf87a2032010-10-22 12:50:53 +01001274 table_.Register(kVisitGlobalContext,
1275 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1276 VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001277
1278 typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
1279
1280 table_.Register(kVisitConsString,
1281 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1282 VisitSpecialized<ConsString::kSize>);
1283
1284 table_.Register(kVisitSharedFunctionInfo,
1285 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1286 VisitSpecialized<SharedFunctionInfo::kSize>);
1287
1288 table_.Register(kVisitJSFunction,
1289 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1290 VisitSpecialized<JSFunction::kSize>);
1291
1292 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1293 kVisitDataObject,
1294 kVisitDataObjectGeneric>();
1295
1296 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1297 kVisitJSObject,
1298 kVisitJSObjectGeneric>();
1299
1300 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1301 kVisitStruct,
1302 kVisitStructGeneric>();
1303 }
1304
1305
1306 static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
1307 table_.GetVisitor(map)(map, slot, obj);
1308 }
1309
1310
1311 private:
1312 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1313 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1314
Steve Blocka7e24c12009-10-30 11:49:00 +00001315#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001316 static void RecordCopiedObject(HeapObject* obj) {
1317 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001318#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001319 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001320#endif
1321#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001322 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001323#endif
Iain Merrick75681382010-08-19 15:07:18 +01001324 if (should_record) {
1325 if (Heap::new_space()->Contains(obj)) {
1326 Heap::new_space()->RecordAllocation(obj);
1327 } else {
1328 Heap::new_space()->RecordPromotion(obj);
1329 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001330 }
1331 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001332#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1333
Iain Merrick75681382010-08-19 15:07:18 +01001334 // Helper function used by CopyObject to copy a source object to an
1335 // allocated target object and update the forwarding pointer in the source
1336 // object. Returns the target object.
1337 INLINE(static HeapObject* MigrateObject(HeapObject* source,
1338 HeapObject* target,
1339 int size)) {
1340 // Copy the content of source to target.
1341 Heap::CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001342
Iain Merrick75681382010-08-19 15:07:18 +01001343 // Set the forwarding address.
1344 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001345
1346#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001347 // Update NewSpace stats if necessary.
1348 RecordCopiedObject(target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001349#endif
Iain Merrick75681382010-08-19 15:07:18 +01001350 HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001351#if defined(ENABLE_LOGGING_AND_PROFILING)
1352 if (Logger::is_logging() || CpuProfiler::is_profiling()) {
1353 if (target->IsJSFunction()) {
1354 PROFILE(FunctionMoveEvent(source->address(), target->address()));
Ben Murdochf87a2032010-10-22 12:50:53 +01001355 PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001356 }
1357 }
1358#endif
Iain Merrick75681382010-08-19 15:07:18 +01001359 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001360 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001361
1362
Iain Merrick75681382010-08-19 15:07:18 +01001363 template<ObjectContents object_contents, SizeRestriction size_restriction>
1364 static inline void EvacuateObject(Map* map,
1365 HeapObject** slot,
1366 HeapObject* object,
1367 int object_size) {
1368 ASSERT((size_restriction != SMALL) ||
1369 (object_size <= Page::kMaxHeapObjectSize));
1370 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001371
Iain Merrick75681382010-08-19 15:07:18 +01001372 if (Heap::ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001373 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001374
Iain Merrick75681382010-08-19 15:07:18 +01001375 if ((size_restriction != SMALL) &&
1376 (object_size > Page::kMaxHeapObjectSize)) {
John Reck59135872010-11-02 12:39:01 -07001377 maybe_result = Heap::lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001378 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001379 if (object_contents == DATA_OBJECT) {
John Reck59135872010-11-02 12:39:01 -07001380 maybe_result = Heap::old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001381 } else {
John Reck59135872010-11-02 12:39:01 -07001382 maybe_result = Heap::old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001383 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001384 }
1385
John Reck59135872010-11-02 12:39:01 -07001386 Object* result = NULL; // Initialization to please compiler.
1387 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001388 HeapObject* target = HeapObject::cast(result);
1389 *slot = MigrateObject(object, target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001390
Iain Merrick75681382010-08-19 15:07:18 +01001391 if (object_contents == POINTER_OBJECT) {
1392 promotion_queue.insert(target, object_size);
1393 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001394
Iain Merrick75681382010-08-19 15:07:18 +01001395 Heap::tracer()->increment_promoted_objects_size(object_size);
1396 return;
1397 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001398 }
John Reck59135872010-11-02 12:39:01 -07001399 Object* result =
1400 Heap::new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
Iain Merrick75681382010-08-19 15:07:18 +01001401 *slot = MigrateObject(object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001402 return;
1403 }
1404
Iain Merrick75681382010-08-19 15:07:18 +01001405
1406 static inline void EvacuateFixedArray(Map* map,
1407 HeapObject** slot,
1408 HeapObject* object) {
1409 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1410 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1411 slot,
1412 object,
1413 object_size);
1414 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001415
1416
Iain Merrick75681382010-08-19 15:07:18 +01001417 static inline void EvacuateByteArray(Map* map,
1418 HeapObject** slot,
1419 HeapObject* object) {
1420 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1421 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1422 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001423
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001424
Iain Merrick75681382010-08-19 15:07:18 +01001425 static inline void EvacuateSeqAsciiString(Map* map,
1426 HeapObject** slot,
1427 HeapObject* object) {
1428 int object_size = SeqAsciiString::cast(object)->
1429 SeqAsciiStringSize(map->instance_type());
1430 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1431 }
1432
1433
1434 static inline void EvacuateSeqTwoByteString(Map* map,
1435 HeapObject** slot,
1436 HeapObject* object) {
1437 int object_size = SeqTwoByteString::cast(object)->
1438 SeqTwoByteStringSize(map->instance_type());
1439 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1440 }
1441
1442
1443 static inline bool IsShortcutCandidate(int type) {
1444 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1445 }
1446
1447 static inline void EvacuateShortcutCandidate(Map* map,
1448 HeapObject** slot,
1449 HeapObject* object) {
1450 ASSERT(IsShortcutCandidate(map->instance_type()));
1451
1452 if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
1453 HeapObject* first =
1454 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1455
1456 *slot = first;
1457
1458 if (!Heap::InNewSpace(first)) {
1459 object->set_map_word(MapWord::FromForwardingAddress(first));
1460 return;
1461 }
1462
1463 MapWord first_word = first->map_word();
1464 if (first_word.IsForwardingAddress()) {
1465 HeapObject* target = first_word.ToForwardingAddress();
1466
1467 *slot = target;
1468 object->set_map_word(MapWord::FromForwardingAddress(target));
1469 return;
1470 }
1471
1472 Scavenge(first->map(), slot, first);
1473 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1474 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001475 }
Iain Merrick75681382010-08-19 15:07:18 +01001476
1477 int object_size = ConsString::kSize;
1478 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001479 }
1480
Iain Merrick75681382010-08-19 15:07:18 +01001481 template<ObjectContents object_contents>
1482 class ObjectEvacuationStrategy {
1483 public:
1484 template<int object_size>
1485 static inline void VisitSpecialized(Map* map,
1486 HeapObject** slot,
1487 HeapObject* object) {
1488 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1489 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001490
Iain Merrick75681382010-08-19 15:07:18 +01001491 static inline void Visit(Map* map,
1492 HeapObject** slot,
1493 HeapObject* object) {
1494 int object_size = map->instance_size();
1495 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1496 }
1497 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001498
Iain Merrick75681382010-08-19 15:07:18 +01001499 typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001500
Iain Merrick75681382010-08-19 15:07:18 +01001501 static VisitorDispatchTable<Callback> table_;
1502};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001503
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001504
Iain Merrick75681382010-08-19 15:07:18 +01001505VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001506
1507
1508void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1509 ASSERT(InFromSpace(object));
1510 MapWord first_word = object->map_word();
1511 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001512 Map* map = first_word.ToMap();
Iain Merrick75681382010-08-19 15:07:18 +01001513 ScavengingVisitor::Scavenge(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001514}
1515
1516
1517void Heap::ScavengePointer(HeapObject** p) {
1518 ScavengeObject(p, *p);
1519}
1520
1521
John Reck59135872010-11-02 12:39:01 -07001522MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1523 int instance_size) {
1524 Object* result;
1525 { MaybeObject* maybe_result = AllocateRawMap();
1526 if (!maybe_result->ToObject(&result)) return maybe_result;
1527 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001528
1529 // Map::cast cannot be used due to uninitialized map field.
1530 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1531 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1532 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001533 reinterpret_cast<Map*>(result)->
Iain Merrick75681382010-08-19 15:07:18 +01001534 set_visitor_id(
1535 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001536 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001537 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001538 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001539 reinterpret_cast<Map*>(result)->set_bit_field(0);
1540 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001541 return result;
1542}
1543
1544
John Reck59135872010-11-02 12:39:01 -07001545MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1546 Object* result;
1547 { MaybeObject* maybe_result = AllocateRawMap();
1548 if (!maybe_result->ToObject(&result)) return maybe_result;
1549 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001550
1551 Map* map = reinterpret_cast<Map*>(result);
1552 map->set_map(meta_map());
1553 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001554 map->set_visitor_id(
1555 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001556 map->set_prototype(null_value());
1557 map->set_constructor(null_value());
1558 map->set_instance_size(instance_size);
1559 map->set_inobject_properties(0);
1560 map->set_pre_allocated_property_fields(0);
1561 map->set_instance_descriptors(empty_descriptor_array());
1562 map->set_code_cache(empty_fixed_array());
1563 map->set_unused_property_fields(0);
1564 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001565 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001566
1567 // If the map object is aligned fill the padding area with Smi 0 objects.
1568 if (Map::kPadStart < Map::kSize) {
1569 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1570 0,
1571 Map::kSize - Map::kPadStart);
1572 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001573 return map;
1574}
1575
1576
John Reck59135872010-11-02 12:39:01 -07001577MaybeObject* Heap::AllocateCodeCache() {
1578 Object* result;
1579 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1580 if (!maybe_result->ToObject(&result)) return maybe_result;
1581 }
Steve Block6ded16b2010-05-10 14:33:55 +01001582 CodeCache* code_cache = CodeCache::cast(result);
1583 code_cache->set_default_cache(empty_fixed_array());
1584 code_cache->set_normal_type_cache(undefined_value());
1585 return code_cache;
1586}
1587
1588
Steve Blocka7e24c12009-10-30 11:49:00 +00001589const Heap::StringTypeTable Heap::string_type_table[] = {
1590#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1591 {type, size, k##camel_name##MapRootIndex},
1592 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1593#undef STRING_TYPE_ELEMENT
1594};
1595
1596
1597const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1598#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1599 {contents, k##name##RootIndex},
1600 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1601#undef CONSTANT_SYMBOL_ELEMENT
1602};
1603
1604
1605const Heap::StructTable Heap::struct_table[] = {
1606#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1607 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1608 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1609#undef STRUCT_TABLE_ELEMENT
1610};
1611
1612
1613bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001614 Object* obj;
1615 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1616 if (!maybe_obj->ToObject(&obj)) return false;
1617 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001618 // Map::cast cannot be used due to uninitialized map field.
1619 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1620 set_meta_map(new_meta_map);
1621 new_meta_map->set_map(new_meta_map);
1622
John Reck59135872010-11-02 12:39:01 -07001623 { MaybeObject* maybe_obj =
1624 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1625 if (!maybe_obj->ToObject(&obj)) return false;
1626 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001627 set_fixed_array_map(Map::cast(obj));
1628
John Reck59135872010-11-02 12:39:01 -07001629 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1630 if (!maybe_obj->ToObject(&obj)) return false;
1631 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001632 set_oddball_map(Map::cast(obj));
1633
Steve Block6ded16b2010-05-10 14:33:55 +01001634 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001635 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1636 if (!maybe_obj->ToObject(&obj)) return false;
1637 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001638 set_empty_fixed_array(FixedArray::cast(obj));
1639
John Reck59135872010-11-02 12:39:01 -07001640 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1641 if (!maybe_obj->ToObject(&obj)) return false;
1642 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001643 set_null_value(obj);
1644
1645 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001646 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1647 if (!maybe_obj->ToObject(&obj)) return false;
1648 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001649 set_empty_descriptor_array(DescriptorArray::cast(obj));
1650
1651 // Fix the instance_descriptors for the existing maps.
1652 meta_map()->set_instance_descriptors(empty_descriptor_array());
1653 meta_map()->set_code_cache(empty_fixed_array());
1654
1655 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1656 fixed_array_map()->set_code_cache(empty_fixed_array());
1657
1658 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1659 oddball_map()->set_code_cache(empty_fixed_array());
1660
1661 // Fix prototype object for existing maps.
1662 meta_map()->set_prototype(null_value());
1663 meta_map()->set_constructor(null_value());
1664
1665 fixed_array_map()->set_prototype(null_value());
1666 fixed_array_map()->set_constructor(null_value());
1667
1668 oddball_map()->set_prototype(null_value());
1669 oddball_map()->set_constructor(null_value());
1670
John Reck59135872010-11-02 12:39:01 -07001671 { MaybeObject* maybe_obj =
1672 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1673 if (!maybe_obj->ToObject(&obj)) return false;
1674 }
Iain Merrick75681382010-08-19 15:07:18 +01001675 set_fixed_cow_array_map(Map::cast(obj));
1676 ASSERT(fixed_array_map() != fixed_cow_array_map());
1677
John Reck59135872010-11-02 12:39:01 -07001678 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1679 if (!maybe_obj->ToObject(&obj)) return false;
1680 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001681 set_heap_number_map(Map::cast(obj));
1682
John Reck59135872010-11-02 12:39:01 -07001683 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1684 if (!maybe_obj->ToObject(&obj)) return false;
1685 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001686 set_proxy_map(Map::cast(obj));
1687
1688 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1689 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001690 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1691 if (!maybe_obj->ToObject(&obj)) return false;
1692 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001693 roots_[entry.index] = Map::cast(obj);
1694 }
1695
John Reck59135872010-11-02 12:39:01 -07001696 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1697 if (!maybe_obj->ToObject(&obj)) return false;
1698 }
Steve Blockd0582a62009-12-15 09:54:21 +00001699 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001700 Map::cast(obj)->set_is_undetectable();
1701
John Reck59135872010-11-02 12:39:01 -07001702 { MaybeObject* maybe_obj =
1703 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1704 if (!maybe_obj->ToObject(&obj)) return false;
1705 }
Steve Blockd0582a62009-12-15 09:54:21 +00001706 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001707 Map::cast(obj)->set_is_undetectable();
1708
John Reck59135872010-11-02 12:39:01 -07001709 { MaybeObject* maybe_obj =
1710 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1711 if (!maybe_obj->ToObject(&obj)) return false;
1712 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001713 set_byte_array_map(Map::cast(obj));
1714
Ben Murdochb0fe1622011-05-05 13:52:32 +01001715 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1716 if (!maybe_obj->ToObject(&obj)) return false;
1717 }
1718 set_empty_byte_array(ByteArray::cast(obj));
1719
John Reck59135872010-11-02 12:39:01 -07001720 { MaybeObject* maybe_obj =
1721 AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
1722 if (!maybe_obj->ToObject(&obj)) return false;
1723 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001724 set_pixel_array_map(Map::cast(obj));
1725
John Reck59135872010-11-02 12:39:01 -07001726 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1727 ExternalArray::kAlignedSize);
1728 if (!maybe_obj->ToObject(&obj)) return false;
1729 }
Steve Block3ce2e202009-11-05 08:53:23 +00001730 set_external_byte_array_map(Map::cast(obj));
1731
John Reck59135872010-11-02 12:39:01 -07001732 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1733 ExternalArray::kAlignedSize);
1734 if (!maybe_obj->ToObject(&obj)) return false;
1735 }
Steve Block3ce2e202009-11-05 08:53:23 +00001736 set_external_unsigned_byte_array_map(Map::cast(obj));
1737
John Reck59135872010-11-02 12:39:01 -07001738 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1739 ExternalArray::kAlignedSize);
1740 if (!maybe_obj->ToObject(&obj)) return false;
1741 }
Steve Block3ce2e202009-11-05 08:53:23 +00001742 set_external_short_array_map(Map::cast(obj));
1743
John Reck59135872010-11-02 12:39:01 -07001744 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1745 ExternalArray::kAlignedSize);
1746 if (!maybe_obj->ToObject(&obj)) return false;
1747 }
Steve Block3ce2e202009-11-05 08:53:23 +00001748 set_external_unsigned_short_array_map(Map::cast(obj));
1749
John Reck59135872010-11-02 12:39:01 -07001750 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1751 ExternalArray::kAlignedSize);
1752 if (!maybe_obj->ToObject(&obj)) return false;
1753 }
Steve Block3ce2e202009-11-05 08:53:23 +00001754 set_external_int_array_map(Map::cast(obj));
1755
John Reck59135872010-11-02 12:39:01 -07001756 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1757 ExternalArray::kAlignedSize);
1758 if (!maybe_obj->ToObject(&obj)) return false;
1759 }
Steve Block3ce2e202009-11-05 08:53:23 +00001760 set_external_unsigned_int_array_map(Map::cast(obj));
1761
John Reck59135872010-11-02 12:39:01 -07001762 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1763 ExternalArray::kAlignedSize);
1764 if (!maybe_obj->ToObject(&obj)) return false;
1765 }
Steve Block3ce2e202009-11-05 08:53:23 +00001766 set_external_float_array_map(Map::cast(obj));
1767
John Reck59135872010-11-02 12:39:01 -07001768 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1769 if (!maybe_obj->ToObject(&obj)) return false;
1770 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001771 set_code_map(Map::cast(obj));
1772
John Reck59135872010-11-02 12:39:01 -07001773 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1774 JSGlobalPropertyCell::kSize);
1775 if (!maybe_obj->ToObject(&obj)) return false;
1776 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001777 set_global_property_cell_map(Map::cast(obj));
1778
John Reck59135872010-11-02 12:39:01 -07001779 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1780 if (!maybe_obj->ToObject(&obj)) return false;
1781 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001782 set_one_pointer_filler_map(Map::cast(obj));
1783
John Reck59135872010-11-02 12:39:01 -07001784 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1785 if (!maybe_obj->ToObject(&obj)) return false;
1786 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001787 set_two_pointer_filler_map(Map::cast(obj));
1788
1789 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1790 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001791 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1792 if (!maybe_obj->ToObject(&obj)) return false;
1793 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001794 roots_[entry.index] = Map::cast(obj);
1795 }
1796
John Reck59135872010-11-02 12:39:01 -07001797 { MaybeObject* maybe_obj =
1798 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1799 if (!maybe_obj->ToObject(&obj)) return false;
1800 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 set_hash_table_map(Map::cast(obj));
1802
John Reck59135872010-11-02 12:39:01 -07001803 { MaybeObject* maybe_obj =
1804 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1805 if (!maybe_obj->ToObject(&obj)) return false;
1806 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001807 set_context_map(Map::cast(obj));
1808
John Reck59135872010-11-02 12:39:01 -07001809 { MaybeObject* maybe_obj =
1810 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1811 if (!maybe_obj->ToObject(&obj)) return false;
1812 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001813 set_catch_context_map(Map::cast(obj));
1814
John Reck59135872010-11-02 12:39:01 -07001815 { MaybeObject* maybe_obj =
1816 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1817 if (!maybe_obj->ToObject(&obj)) return false;
1818 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001819 Map* global_context_map = Map::cast(obj);
1820 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1821 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001822
John Reck59135872010-11-02 12:39:01 -07001823 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1824 SharedFunctionInfo::kAlignedSize);
1825 if (!maybe_obj->ToObject(&obj)) return false;
1826 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001827 set_shared_function_info_map(Map::cast(obj));
1828
Steve Block1e0659c2011-05-24 12:43:12 +01001829 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1830 JSMessageObject::kSize);
1831 if (!maybe_obj->ToObject(&obj)) return false;
1832 }
1833 set_message_object_map(Map::cast(obj));
1834
Steve Blocka7e24c12009-10-30 11:49:00 +00001835 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1836 return true;
1837}
1838
1839
John Reck59135872010-11-02 12:39:01 -07001840MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001841 // Statically ensure that it is safe to allocate heap numbers in paged
1842 // spaces.
1843 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1844 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1845
John Reck59135872010-11-02 12:39:01 -07001846 Object* result;
1847 { MaybeObject* maybe_result =
1848 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1849 if (!maybe_result->ToObject(&result)) return maybe_result;
1850 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001851
1852 HeapObject::cast(result)->set_map(heap_number_map());
1853 HeapNumber::cast(result)->set_value(value);
1854 return result;
1855}
1856
1857
John Reck59135872010-11-02 12:39:01 -07001858MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001859 // Use general version, if we're forced to always allocate.
1860 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1861
1862 // This version of AllocateHeapNumber is optimized for
1863 // allocation in new space.
1864 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1865 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001866 Object* result;
1867 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1868 if (!maybe_result->ToObject(&result)) return maybe_result;
1869 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001870 HeapObject::cast(result)->set_map(heap_number_map());
1871 HeapNumber::cast(result)->set_value(value);
1872 return result;
1873}
1874
1875
John Reck59135872010-11-02 12:39:01 -07001876MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1877 Object* result;
1878 { MaybeObject* maybe_result = AllocateRawCell();
1879 if (!maybe_result->ToObject(&result)) return maybe_result;
1880 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001881 HeapObject::cast(result)->set_map(global_property_cell_map());
1882 JSGlobalPropertyCell::cast(result)->set_value(value);
1883 return result;
1884}
1885
1886
John Reck59135872010-11-02 12:39:01 -07001887MaybeObject* Heap::CreateOddball(const char* to_string,
1888 Object* to_number) {
1889 Object* result;
1890 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1891 if (!maybe_result->ToObject(&result)) return maybe_result;
1892 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001893 return Oddball::cast(result)->Initialize(to_string, to_number);
1894}
1895
1896
1897bool Heap::CreateApiObjects() {
1898 Object* obj;
1899
John Reck59135872010-11-02 12:39:01 -07001900 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1901 if (!maybe_obj->ToObject(&obj)) return false;
1902 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001903 set_neander_map(Map::cast(obj));
1904
John Reck59135872010-11-02 12:39:01 -07001905 { MaybeObject* maybe_obj = Heap::AllocateJSObjectFromMap(neander_map());
1906 if (!maybe_obj->ToObject(&obj)) return false;
1907 }
1908 Object* elements;
1909 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1910 if (!maybe_elements->ToObject(&elements)) return false;
1911 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001912 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1913 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1914 set_message_listeners(JSObject::cast(obj));
1915
1916 return true;
1917}
1918
1919
1920void Heap::CreateCEntryStub() {
1921 CEntryStub stub(1);
1922 set_c_entry_code(*stub.GetCode());
1923}
1924
1925
Steve Block6ded16b2010-05-10 14:33:55 +01001926#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001927void Heap::CreateRegExpCEntryStub() {
1928 RegExpCEntryStub stub;
1929 set_re_c_entry_code(*stub.GetCode());
1930}
1931#endif
1932
1933
Steve Blocka7e24c12009-10-30 11:49:00 +00001934void Heap::CreateJSEntryStub() {
1935 JSEntryStub stub;
1936 set_js_entry_code(*stub.GetCode());
1937}
1938
1939
1940void Heap::CreateJSConstructEntryStub() {
1941 JSConstructEntryStub stub;
1942 set_js_construct_entry_code(*stub.GetCode());
1943}
1944
1945
Steve Block1e0659c2011-05-24 12:43:12 +01001946#if V8_TARGET_ARCH_ARM
1947void Heap::CreateDirectCEntryStub() {
1948 DirectCEntryStub stub;
1949 set_direct_c_entry_code(*stub.GetCode());
1950}
1951#endif
1952
1953
Steve Blocka7e24c12009-10-30 11:49:00 +00001954void Heap::CreateFixedStubs() {
1955 // Here we create roots for fixed stubs. They are needed at GC
1956 // for cooking and uncooking (check out frames.cc).
1957 // The eliminates the need for doing dictionary lookup in the
1958 // stub cache for these stubs.
1959 HandleScope scope;
1960 // gcc-4.4 has problem generating correct code of following snippet:
1961 // { CEntryStub stub;
1962 // c_entry_code_ = *stub.GetCode();
1963 // }
Leon Clarke4515c472010-02-03 11:58:03 +00001964 // { DebuggerStatementStub stub;
1965 // debugger_statement_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001966 // }
1967 // To workaround the problem, make separate functions without inlining.
1968 Heap::CreateCEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001969 Heap::CreateJSEntryStub();
1970 Heap::CreateJSConstructEntryStub();
Steve Block6ded16b2010-05-10 14:33:55 +01001971#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001972 Heap::CreateRegExpCEntryStub();
1973#endif
Steve Block1e0659c2011-05-24 12:43:12 +01001974#if V8_TARGET_ARCH_ARM
1975 Heap::CreateDirectCEntryStub();
1976#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00001977}
1978
1979
1980bool Heap::CreateInitialObjects() {
1981 Object* obj;
1982
1983 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001984 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1985 if (!maybe_obj->ToObject(&obj)) return false;
1986 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001987 set_minus_zero_value(obj);
1988 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1989
John Reck59135872010-11-02 12:39:01 -07001990 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1991 if (!maybe_obj->ToObject(&obj)) return false;
1992 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001993 set_nan_value(obj);
1994
John Reck59135872010-11-02 12:39:01 -07001995 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1996 if (!maybe_obj->ToObject(&obj)) return false;
1997 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001998 set_undefined_value(obj);
1999 ASSERT(!InNewSpace(undefined_value()));
2000
2001 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07002002 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2003 if (!maybe_obj->ToObject(&obj)) return false;
2004 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002005 // Don't use set_symbol_table() due to asserts.
2006 roots_[kSymbolTableRootIndex] = obj;
2007
2008 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07002009 Object* symbol;
2010 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
2011 if (!maybe_symbol->ToObject(&symbol)) return false;
2012 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002013 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
2014 Oddball::cast(undefined_value())->set_to_number(nan_value());
2015
Steve Blocka7e24c12009-10-30 11:49:00 +00002016 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07002017 { MaybeObject* maybe_obj =
2018 Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
2019 if (!maybe_obj->ToObject(&obj)) return false;
2020 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002021
John Reck59135872010-11-02 12:39:01 -07002022 { MaybeObject* maybe_obj = CreateOddball("true", Smi::FromInt(1));
2023 if (!maybe_obj->ToObject(&obj)) return false;
2024 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002025 set_true_value(obj);
2026
John Reck59135872010-11-02 12:39:01 -07002027 { MaybeObject* maybe_obj = CreateOddball("false", Smi::FromInt(0));
2028 if (!maybe_obj->ToObject(&obj)) return false;
2029 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002030 set_false_value(obj);
2031
John Reck59135872010-11-02 12:39:01 -07002032 { MaybeObject* maybe_obj = CreateOddball("hole", Smi::FromInt(-1));
2033 if (!maybe_obj->ToObject(&obj)) return false;
2034 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002035 set_the_hole_value(obj);
2036
Ben Murdoch086aeea2011-05-13 15:57:08 +01002037 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2038 Smi::FromInt(-4));
2039 if (!maybe_obj->ToObject(&obj)) return false;
2040 }
2041 set_arguments_marker(obj);
2042
John Reck59135872010-11-02 12:39:01 -07002043 { MaybeObject* maybe_obj =
2044 CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
2045 if (!maybe_obj->ToObject(&obj)) return false;
2046 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002047 set_no_interceptor_result_sentinel(obj);
2048
John Reck59135872010-11-02 12:39:01 -07002049 { MaybeObject* maybe_obj =
2050 CreateOddball("termination_exception", Smi::FromInt(-3));
2051 if (!maybe_obj->ToObject(&obj)) return false;
2052 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002053 set_termination_exception(obj);
2054
2055 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002056 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2057 if (!maybe_obj->ToObject(&obj)) return false;
2058 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002059 set_empty_string(String::cast(obj));
2060
2061 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002062 { MaybeObject* maybe_obj =
2063 LookupAsciiSymbol(constant_symbol_table[i].contents);
2064 if (!maybe_obj->ToObject(&obj)) return false;
2065 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002066 roots_[constant_symbol_table[i].index] = String::cast(obj);
2067 }
2068
2069 // Allocate the hidden symbol which is used to identify the hidden properties
2070 // in JSObjects. The hash code has a special value so that it will not match
2071 // the empty string when searching for the property. It cannot be part of the
2072 // loop above because it needs to be allocated manually with the special
2073 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2074 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002075 { MaybeObject* maybe_obj =
2076 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2077 if (!maybe_obj->ToObject(&obj)) return false;
2078 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002079 hidden_symbol_ = String::cast(obj);
2080
2081 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07002082 { MaybeObject* maybe_obj =
2083 AllocateProxy((Address) &Accessors::ObjectPrototype);
2084 if (!maybe_obj->ToObject(&obj)) return false;
2085 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002086 set_prototype_accessors(Proxy::cast(obj));
2087
2088 // Allocate the code_stubs dictionary. The initial size is set to avoid
2089 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002090 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2091 if (!maybe_obj->ToObject(&obj)) return false;
2092 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002093 set_code_stubs(NumberDictionary::cast(obj));
2094
2095 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2096 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002097 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2098 if (!maybe_obj->ToObject(&obj)) return false;
2099 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002100 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2101
Kristian Monsen25f61362010-05-21 11:50:48 +01002102 set_instanceof_cache_function(Smi::FromInt(0));
2103 set_instanceof_cache_map(Smi::FromInt(0));
2104 set_instanceof_cache_answer(Smi::FromInt(0));
2105
Steve Blocka7e24c12009-10-30 11:49:00 +00002106 CreateFixedStubs();
2107
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002108 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002109 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2110 if (!maybe_obj->ToObject(&obj)) return false;
2111 }
2112 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(obj);
2113 if (!maybe_obj->ToObject(&obj)) return false;
2114 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002115 set_intrinsic_function_names(StringDictionary::cast(obj));
2116
Leon Clarkee46be812010-01-19 14:06:41 +00002117 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002118
Steve Block6ded16b2010-05-10 14:33:55 +01002119 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002120 { MaybeObject* maybe_obj =
2121 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2122 if (!maybe_obj->ToObject(&obj)) return false;
2123 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002124 set_single_character_string_cache(FixedArray::cast(obj));
2125
2126 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002127 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2128 if (!maybe_obj->ToObject(&obj)) return false;
2129 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002130 set_natives_source_cache(FixedArray::cast(obj));
2131
2132 // Handling of script id generation is in Factory::NewScript.
2133 set_last_script_id(undefined_value());
2134
2135 // Initialize keyed lookup cache.
2136 KeyedLookupCache::Clear();
2137
2138 // Initialize context slot cache.
2139 ContextSlotCache::Clear();
2140
2141 // Initialize descriptor cache.
2142 DescriptorLookupCache::Clear();
2143
2144 // Initialize compilation cache.
2145 CompilationCache::Clear();
2146
2147 return true;
2148}
2149
2150
John Reck59135872010-11-02 12:39:01 -07002151MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002152 // Compute the size of the number string cache based on the max heap size.
2153 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2154 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2155 int number_string_cache_size = max_semispace_size_ / 512;
2156 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002157 Object* obj;
2158 MaybeObject* maybe_obj =
2159 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2160 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2161 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002162}
2163
2164
2165void Heap::FlushNumberStringCache() {
2166 // Flush the number to string cache.
2167 int len = number_string_cache()->length();
2168 for (int i = 0; i < len; i++) {
2169 number_string_cache()->set_undefined(i);
2170 }
2171}
2172
2173
Steve Blocka7e24c12009-10-30 11:49:00 +00002174static inline int double_get_hash(double d) {
2175 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002176 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002177}
2178
2179
2180static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002181 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002182}
2183
2184
Steve Blocka7e24c12009-10-30 11:49:00 +00002185Object* Heap::GetNumberStringCache(Object* number) {
2186 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002187 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002188 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002189 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002190 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002191 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002192 }
2193 Object* key = number_string_cache()->get(hash * 2);
2194 if (key == number) {
2195 return String::cast(number_string_cache()->get(hash * 2 + 1));
2196 } else if (key->IsHeapNumber() &&
2197 number->IsHeapNumber() &&
2198 key->Number() == number->Number()) {
2199 return String::cast(number_string_cache()->get(hash * 2 + 1));
2200 }
2201 return undefined_value();
2202}
2203
2204
2205void Heap::SetNumberStringCache(Object* number, String* string) {
2206 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002207 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002208 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002209 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002210 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002211 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002212 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002213 number_string_cache()->set(hash * 2, number);
2214 }
2215 number_string_cache()->set(hash * 2 + 1, string);
2216}
2217
2218
John Reck59135872010-11-02 12:39:01 -07002219MaybeObject* Heap::NumberToString(Object* number,
2220 bool check_number_string_cache) {
Andrei Popescu402d9372010-02-26 13:31:12 +00002221 Counters::number_to_string_runtime.Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002222 if (check_number_string_cache) {
2223 Object* cached = GetNumberStringCache(number);
2224 if (cached != undefined_value()) {
2225 return cached;
2226 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002227 }
2228
2229 char arr[100];
2230 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2231 const char* str;
2232 if (number->IsSmi()) {
2233 int num = Smi::cast(number)->value();
2234 str = IntToCString(num, buffer);
2235 } else {
2236 double num = HeapNumber::cast(number)->value();
2237 str = DoubleToCString(num, buffer);
2238 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002239
John Reck59135872010-11-02 12:39:01 -07002240 Object* js_string;
2241 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2242 if (maybe_js_string->ToObject(&js_string)) {
2243 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002244 }
John Reck59135872010-11-02 12:39:01 -07002245 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002246}
2247
2248
Steve Block3ce2e202009-11-05 08:53:23 +00002249Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2250 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2251}
2252
2253
2254Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2255 ExternalArrayType array_type) {
2256 switch (array_type) {
2257 case kExternalByteArray:
2258 return kExternalByteArrayMapRootIndex;
2259 case kExternalUnsignedByteArray:
2260 return kExternalUnsignedByteArrayMapRootIndex;
2261 case kExternalShortArray:
2262 return kExternalShortArrayMapRootIndex;
2263 case kExternalUnsignedShortArray:
2264 return kExternalUnsignedShortArrayMapRootIndex;
2265 case kExternalIntArray:
2266 return kExternalIntArrayMapRootIndex;
2267 case kExternalUnsignedIntArray:
2268 return kExternalUnsignedIntArrayMapRootIndex;
2269 case kExternalFloatArray:
2270 return kExternalFloatArrayMapRootIndex;
2271 default:
2272 UNREACHABLE();
2273 return kUndefinedValueRootIndex;
2274 }
2275}
2276
2277
John Reck59135872010-11-02 12:39:01 -07002278MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002279 // We need to distinguish the minus zero value and this cannot be
2280 // done after conversion to int. Doing this by comparing bit
2281 // patterns is faster than using fpclassify() et al.
2282 static const DoubleRepresentation minus_zero(-0.0);
2283
2284 DoubleRepresentation rep(value);
2285 if (rep.bits == minus_zero.bits) {
2286 return AllocateHeapNumber(-0.0, pretenure);
2287 }
2288
2289 int int_value = FastD2I(value);
2290 if (value == int_value && Smi::IsValid(int_value)) {
2291 return Smi::FromInt(int_value);
2292 }
2293
2294 // Materialize the value in the heap.
2295 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002296}
2297
2298
John Reck59135872010-11-02 12:39:01 -07002299MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002300 // Statically ensure that it is safe to allocate proxies in paged spaces.
2301 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2302 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002303 Object* result;
2304 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2305 if (!maybe_result->ToObject(&result)) return maybe_result;
2306 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002307
2308 Proxy::cast(result)->set_proxy(proxy);
2309 return result;
2310}
2311
2312
John Reck59135872010-11-02 12:39:01 -07002313MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2314 Object* result;
2315 { MaybeObject* maybe_result =
2316 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2317 if (!maybe_result->ToObject(&result)) return maybe_result;
2318 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002319
2320 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2321 share->set_name(name);
2322 Code* illegal = Builtins::builtin(Builtins::Illegal);
2323 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002324 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Blocka7e24c12009-10-30 11:49:00 +00002325 Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
2326 share->set_construct_stub(construct_stub);
2327 share->set_expected_nof_properties(0);
2328 share->set_length(0);
2329 share->set_formal_parameter_count(0);
2330 share->set_instance_class_name(Object_symbol());
2331 share->set_function_data(undefined_value());
2332 share->set_script(undefined_value());
2333 share->set_start_position_and_type(0);
2334 share->set_debug_info(undefined_value());
2335 share->set_inferred_name(empty_string());
2336 share->set_compiler_hints(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002337 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002338 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002339 share->set_this_property_assignments_count(0);
2340 share->set_this_property_assignments(undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002341 share->set_opt_count(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002342 share->set_num_literals(0);
2343 share->set_end_position(0);
2344 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002345 return result;
2346}
2347
2348
Steve Block1e0659c2011-05-24 12:43:12 +01002349MaybeObject* Heap::AllocateJSMessageObject(String* type,
2350 JSArray* arguments,
2351 int start_position,
2352 int end_position,
2353 Object* script,
2354 Object* stack_trace,
2355 Object* stack_frames) {
2356 Object* result;
2357 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2358 if (!maybe_result->ToObject(&result)) return maybe_result;
2359 }
2360 JSMessageObject* message = JSMessageObject::cast(result);
2361 message->set_properties(Heap::empty_fixed_array());
2362 message->set_elements(Heap::empty_fixed_array());
2363 message->set_type(type);
2364 message->set_arguments(arguments);
2365 message->set_start_position(start_position);
2366 message->set_end_position(end_position);
2367 message->set_script(script);
2368 message->set_stack_trace(stack_trace);
2369 message->set_stack_frames(stack_frames);
2370 return result;
2371}
2372
2373
2374
Steve Blockd0582a62009-12-15 09:54:21 +00002375// Returns true for a character in a range. Both limits are inclusive.
2376static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2377 // This makes uses of the the unsigned wraparound.
2378 return character - from <= to - from;
2379}
2380
2381
John Reck59135872010-11-02 12:39:01 -07002382MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
2383 uint32_t c1,
2384 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002385 String* symbol;
2386 // Numeric strings have a different hash algorithm not known by
2387 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2388 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2389 Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2390 return symbol;
2391 // Now we know the length is 2, we might as well make use of that fact
2392 // when building the new string.
2393 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2394 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002395 Object* result;
2396 { MaybeObject* maybe_result = Heap::AllocateRawAsciiString(2);
2397 if (!maybe_result->ToObject(&result)) return maybe_result;
2398 }
Steve Blockd0582a62009-12-15 09:54:21 +00002399 char* dest = SeqAsciiString::cast(result)->GetChars();
2400 dest[0] = c1;
2401 dest[1] = c2;
2402 return result;
2403 } else {
John Reck59135872010-11-02 12:39:01 -07002404 Object* result;
2405 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(2);
2406 if (!maybe_result->ToObject(&result)) return maybe_result;
2407 }
Steve Blockd0582a62009-12-15 09:54:21 +00002408 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2409 dest[0] = c1;
2410 dest[1] = c2;
2411 return result;
2412 }
2413}
2414
2415
John Reck59135872010-11-02 12:39:01 -07002416MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002417 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002418 if (first_length == 0) {
2419 return second;
2420 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002421
2422 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002423 if (second_length == 0) {
2424 return first;
2425 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002426
2427 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002428
2429 // Optimization for 2-byte strings often used as keys in a decompression
2430 // dictionary. Check whether we already have the string in the symbol
2431 // table to prevent creation of many unneccesary strings.
2432 if (length == 2) {
2433 unsigned c1 = first->Get(0);
2434 unsigned c2 = second->Get(0);
2435 return MakeOrFindTwoCharacterString(c1, c2);
2436 }
2437
Steve Block6ded16b2010-05-10 14:33:55 +01002438 bool first_is_ascii = first->IsAsciiRepresentation();
2439 bool second_is_ascii = second->IsAsciiRepresentation();
2440 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002441
2442 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002443 // of the new cons string is too large.
2444 if (length > String::kMaxLength || length < 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002445 Top::context()->mark_out_of_memory();
2446 return Failure::OutOfMemoryException();
2447 }
2448
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002449 bool is_ascii_data_in_two_byte_string = false;
2450 if (!is_ascii) {
2451 // At least one of the strings uses two-byte representation so we
2452 // can't use the fast case code for short ascii strings below, but
2453 // we can try to save memory if all chars actually fit in ascii.
2454 is_ascii_data_in_two_byte_string =
2455 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2456 if (is_ascii_data_in_two_byte_string) {
2457 Counters::string_add_runtime_ext_to_ascii.Increment();
2458 }
2459 }
2460
Steve Blocka7e24c12009-10-30 11:49:00 +00002461 // If the resulting string is small make a flat string.
2462 if (length < String::kMinNonFlatLength) {
2463 ASSERT(first->IsFlat());
2464 ASSERT(second->IsFlat());
2465 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002466 Object* result;
2467 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2468 if (!maybe_result->ToObject(&result)) return maybe_result;
2469 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002470 // Copy the characters into the new object.
2471 char* dest = SeqAsciiString::cast(result)->GetChars();
2472 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002473 const char* src;
2474 if (first->IsExternalString()) {
2475 src = ExternalAsciiString::cast(first)->resource()->data();
2476 } else {
2477 src = SeqAsciiString::cast(first)->GetChars();
2478 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002479 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2480 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002481 if (second->IsExternalString()) {
2482 src = ExternalAsciiString::cast(second)->resource()->data();
2483 } else {
2484 src = SeqAsciiString::cast(second)->GetChars();
2485 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002486 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2487 return result;
2488 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002489 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002490 Object* result;
2491 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2492 if (!maybe_result->ToObject(&result)) return maybe_result;
2493 }
Steve Block6ded16b2010-05-10 14:33:55 +01002494 // Copy the characters into the new object.
2495 char* dest = SeqAsciiString::cast(result)->GetChars();
2496 String::WriteToFlat(first, dest, 0, first_length);
2497 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block6ded16b2010-05-10 14:33:55 +01002498 return result;
2499 }
2500
John Reck59135872010-11-02 12:39:01 -07002501 Object* result;
2502 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2503 if (!maybe_result->ToObject(&result)) return maybe_result;
2504 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002505 // Copy the characters into the new object.
2506 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2507 String::WriteToFlat(first, dest, 0, first_length);
2508 String::WriteToFlat(second, dest + first_length, 0, second_length);
2509 return result;
2510 }
2511 }
2512
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002513 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2514 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002515
John Reck59135872010-11-02 12:39:01 -07002516 Object* result;
2517 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2518 if (!maybe_result->ToObject(&result)) return maybe_result;
2519 }
Leon Clarke4515c472010-02-03 11:58:03 +00002520
2521 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002522 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002523 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002524 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002525 cons_string->set_hash_field(String::kEmptyHashField);
2526 cons_string->set_first(first, mode);
2527 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002528 return result;
2529}
2530
2531
John Reck59135872010-11-02 12:39:01 -07002532MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002533 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002534 int end,
2535 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002536 int length = end - start;
2537
2538 if (length == 1) {
2539 return Heap::LookupSingleCharacterStringFromCode(
2540 buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002541 } else if (length == 2) {
2542 // Optimization for 2-byte strings often used as keys in a decompression
2543 // dictionary. Check whether we already have the string in the symbol
2544 // table to prevent creation of many unneccesary strings.
2545 unsigned c1 = buffer->Get(start);
2546 unsigned c2 = buffer->Get(start + 1);
2547 return MakeOrFindTwoCharacterString(c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002548 }
2549
2550 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002551 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002552
John Reck59135872010-11-02 12:39:01 -07002553 Object* result;
2554 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2555 ? AllocateRawAsciiString(length, pretenure )
2556 : AllocateRawTwoByteString(length, pretenure);
2557 if (!maybe_result->ToObject(&result)) return maybe_result;
2558 }
Steve Blockd0582a62009-12-15 09:54:21 +00002559 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002560 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002561 if (buffer->IsAsciiRepresentation()) {
2562 ASSERT(string_result->IsAsciiRepresentation());
2563 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2564 String::WriteToFlat(buffer, dest, start, end);
2565 } else {
2566 ASSERT(string_result->IsTwoByteRepresentation());
2567 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2568 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002569 }
Steve Blockd0582a62009-12-15 09:54:21 +00002570
Steve Blocka7e24c12009-10-30 11:49:00 +00002571 return result;
2572}
2573
2574
John Reck59135872010-11-02 12:39:01 -07002575MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002576 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002577 size_t length = resource->length();
2578 if (length > static_cast<size_t>(String::kMaxLength)) {
2579 Top::context()->mark_out_of_memory();
2580 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002581 }
2582
Steve Blockd0582a62009-12-15 09:54:21 +00002583 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002584 Object* result;
2585 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2586 if (!maybe_result->ToObject(&result)) return maybe_result;
2587 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002588
2589 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002590 external_string->set_length(static_cast<int>(length));
2591 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002592 external_string->set_resource(resource);
2593
2594 return result;
2595}
2596
2597
John Reck59135872010-11-02 12:39:01 -07002598MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002599 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002600 size_t length = resource->length();
2601 if (length > static_cast<size_t>(String::kMaxLength)) {
2602 Top::context()->mark_out_of_memory();
2603 return Failure::OutOfMemoryException();
2604 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002605
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002606 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002607 // ASCII characters. If yes, we use a different string map.
2608 static const size_t kAsciiCheckLengthLimit = 32;
2609 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2610 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002611 Map* map = is_ascii ?
2612 Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
John Reck59135872010-11-02 12:39:01 -07002613 Object* result;
2614 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2615 if (!maybe_result->ToObject(&result)) return maybe_result;
2616 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002617
2618 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002619 external_string->set_length(static_cast<int>(length));
2620 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002621 external_string->set_resource(resource);
2622
2623 return result;
2624}
2625
2626
John Reck59135872010-11-02 12:39:01 -07002627MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002628 if (code <= String::kMaxAsciiCharCode) {
2629 Object* value = Heap::single_character_string_cache()->get(code);
2630 if (value != Heap::undefined_value()) return value;
2631
2632 char buffer[1];
2633 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002634 Object* result;
2635 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002636
John Reck59135872010-11-02 12:39:01 -07002637 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002638 Heap::single_character_string_cache()->set(code, result);
2639 return result;
2640 }
2641
John Reck59135872010-11-02 12:39:01 -07002642 Object* result;
2643 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(1);
2644 if (!maybe_result->ToObject(&result)) return maybe_result;
2645 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002646 String* answer = String::cast(result);
2647 answer->Set(0, code);
2648 return answer;
2649}
2650
2651
John Reck59135872010-11-02 12:39:01 -07002652MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002653 if (length < 0 || length > ByteArray::kMaxLength) {
2654 return Failure::OutOfMemoryException();
2655 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002656 if (pretenure == NOT_TENURED) {
2657 return AllocateByteArray(length);
2658 }
2659 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002660 Object* result;
2661 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2662 ? old_data_space_->AllocateRaw(size)
2663 : lo_space_->AllocateRaw(size);
2664 if (!maybe_result->ToObject(&result)) return maybe_result;
2665 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002666
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002667 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2668 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002669 return result;
2670}
2671
2672
John Reck59135872010-11-02 12:39:01 -07002673MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002674 if (length < 0 || length > ByteArray::kMaxLength) {
2675 return Failure::OutOfMemoryException();
2676 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002677 int size = ByteArray::SizeFor(length);
2678 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002679 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002680 Object* result;
2681 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2682 if (!maybe_result->ToObject(&result)) return maybe_result;
2683 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002684
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002685 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2686 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002687 return result;
2688}
2689
2690
2691void Heap::CreateFillerObjectAt(Address addr, int size) {
2692 if (size == 0) return;
2693 HeapObject* filler = HeapObject::FromAddress(addr);
2694 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002695 filler->set_map(one_pointer_filler_map());
2696 } else if (size == 2 * kPointerSize) {
2697 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002698 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002699 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002700 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2701 }
2702}
2703
2704
John Reck59135872010-11-02 12:39:01 -07002705MaybeObject* Heap::AllocatePixelArray(int length,
Steve Blocka7e24c12009-10-30 11:49:00 +00002706 uint8_t* external_pointer,
2707 PretenureFlag pretenure) {
2708 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002709 Object* result;
2710 { MaybeObject* maybe_result =
2711 AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
2712 if (!maybe_result->ToObject(&result)) return maybe_result;
2713 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002714
2715 reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
2716 reinterpret_cast<PixelArray*>(result)->set_length(length);
2717 reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
2718
2719 return result;
2720}
2721
2722
John Reck59135872010-11-02 12:39:01 -07002723MaybeObject* Heap::AllocateExternalArray(int length,
2724 ExternalArrayType array_type,
2725 void* external_pointer,
2726 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002727 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002728 Object* result;
2729 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2730 space,
2731 OLD_DATA_SPACE);
2732 if (!maybe_result->ToObject(&result)) return maybe_result;
2733 }
Steve Block3ce2e202009-11-05 08:53:23 +00002734
2735 reinterpret_cast<ExternalArray*>(result)->set_map(
2736 MapForExternalArrayType(array_type));
2737 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2738 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2739 external_pointer);
2740
2741 return result;
2742}
2743
2744
John Reck59135872010-11-02 12:39:01 -07002745MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2746 Code::Flags flags,
2747 Handle<Object> self_reference) {
Leon Clarkeac952652010-07-15 11:15:24 +01002748 // Allocate ByteArray before the Code object, so that we do not risk
2749 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002750 Object* reloc_info;
2751 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2752 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2753 }
Leon Clarkeac952652010-07-15 11:15:24 +01002754
Steve Blocka7e24c12009-10-30 11:49:00 +00002755 // Compute size
Leon Clarkeac952652010-07-15 11:15:24 +01002756 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002757 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002758 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002759 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002760 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002761 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002762 } else {
John Reck59135872010-11-02 12:39:01 -07002763 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002764 }
2765
John Reck59135872010-11-02 12:39:01 -07002766 Object* result;
2767 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002768
2769 // Initialize the object
2770 HeapObject::cast(result)->set_map(code_map());
2771 Code* code = Code::cast(result);
2772 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2773 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002774 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002775 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002776 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2777 code->set_check_type(RECEIVER_MAP_CHECK);
2778 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002779 code->set_deoptimization_data(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002780 // Allow self references to created code object by patching the handle to
2781 // point to the newly allocated Code object.
2782 if (!self_reference.is_null()) {
2783 *(self_reference.location()) = code;
2784 }
2785 // Migrate generated code.
2786 // The generated code can contain Object** values (typically from handles)
2787 // that are dereferenced during the copy to point directly to the actual heap
2788 // objects. These pointers can include references to the code object itself,
2789 // through the self_reference parameter.
2790 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002791
2792#ifdef DEBUG
2793 code->Verify();
2794#endif
2795 return code;
2796}
2797
2798
John Reck59135872010-11-02 12:39:01 -07002799MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002800 // Allocate an object the same size as the code object.
2801 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002802 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002803 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002804 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002805 } else {
John Reck59135872010-11-02 12:39:01 -07002806 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002807 }
2808
John Reck59135872010-11-02 12:39:01 -07002809 Object* result;
2810 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002811
2812 // Copy code object.
2813 Address old_addr = code->address();
2814 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002815 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002816 // Relocate the copy.
2817 Code* new_code = Code::cast(result);
2818 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2819 new_code->Relocate(new_addr - old_addr);
2820 return new_code;
2821}
2822
2823
John Reck59135872010-11-02 12:39:01 -07002824MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002825 // Allocate ByteArray before the Code object, so that we do not risk
2826 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002827 Object* reloc_info_array;
2828 { MaybeObject* maybe_reloc_info_array =
2829 AllocateByteArray(reloc_info.length(), TENURED);
2830 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2831 return maybe_reloc_info_array;
2832 }
2833 }
Leon Clarkeac952652010-07-15 11:15:24 +01002834
2835 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002836
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002837 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002838
2839 Address old_addr = code->address();
2840
2841 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002842 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002843
John Reck59135872010-11-02 12:39:01 -07002844 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002845 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002846 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002847 } else {
John Reck59135872010-11-02 12:39:01 -07002848 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002849 }
2850
John Reck59135872010-11-02 12:39:01 -07002851 Object* result;
2852 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002853
2854 // Copy code object.
2855 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2856
2857 // Copy header and instructions.
2858 memcpy(new_addr, old_addr, relocation_offset);
2859
Steve Block6ded16b2010-05-10 14:33:55 +01002860 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002861 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002862
Leon Clarkeac952652010-07-15 11:15:24 +01002863 // Copy patched rinfo.
2864 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002865
2866 // Relocate the copy.
2867 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2868 new_code->Relocate(new_addr - old_addr);
2869
2870#ifdef DEBUG
2871 code->Verify();
2872#endif
2873 return new_code;
2874}
2875
2876
John Reck59135872010-11-02 12:39:01 -07002877MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002878 ASSERT(gc_state_ == NOT_IN_GC);
2879 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002880 // If allocation failures are disallowed, we may allocate in a different
2881 // space when new space is full and the object is not a large object.
2882 AllocationSpace retry_space =
2883 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002884 Object* result;
2885 { MaybeObject* maybe_result =
2886 AllocateRaw(map->instance_size(), space, retry_space);
2887 if (!maybe_result->ToObject(&result)) return maybe_result;
2888 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002889 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002890#ifdef ENABLE_LOGGING_AND_PROFILING
2891 ProducerHeapProfile::RecordJSObjectAllocation(result);
2892#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002893 return result;
2894}
2895
2896
John Reck59135872010-11-02 12:39:01 -07002897MaybeObject* Heap::InitializeFunction(JSFunction* function,
2898 SharedFunctionInfo* shared,
2899 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002900 ASSERT(!prototype->IsMap());
2901 function->initialize_properties();
2902 function->initialize_elements();
2903 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002904 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002905 function->set_prototype_or_initial_map(prototype);
2906 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002907 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002908 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002909 return function;
2910}
2911
2912
John Reck59135872010-11-02 12:39:01 -07002913MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002914 // Allocate the prototype. Make sure to use the object function
2915 // from the function's context, since the function can be from a
2916 // different context.
2917 JSFunction* object_function =
2918 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002919 Object* prototype;
2920 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2921 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2922 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002923 // When creating the prototype for the function we must set its
2924 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002925 Object* result;
2926 { MaybeObject* maybe_result =
2927 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
2928 function,
2929 DONT_ENUM);
2930 if (!maybe_result->ToObject(&result)) return maybe_result;
2931 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002932 return prototype;
2933}
2934
2935
John Reck59135872010-11-02 12:39:01 -07002936MaybeObject* Heap::AllocateFunction(Map* function_map,
2937 SharedFunctionInfo* shared,
2938 Object* prototype,
2939 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002940 AllocationSpace space =
2941 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002942 Object* result;
2943 { MaybeObject* maybe_result = Allocate(function_map, space);
2944 if (!maybe_result->ToObject(&result)) return maybe_result;
2945 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002946 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2947}
2948
2949
John Reck59135872010-11-02 12:39:01 -07002950MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002951 // To get fast allocation and map sharing for arguments objects we
2952 // allocate them based on an arguments boilerplate.
2953
2954 // This calls Copy directly rather than using Heap::AllocateRaw so we
2955 // duplicate the check here.
2956 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2957
2958 JSObject* boilerplate =
2959 Top::context()->global_context()->arguments_boilerplate();
2960
Leon Clarkee46be812010-01-19 14:06:41 +00002961 // Check that the size of the boilerplate matches our
2962 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2963 // on the size being a known constant.
2964 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2965
2966 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002967 Object* result;
2968 { MaybeObject* maybe_result =
2969 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
2970 if (!maybe_result->ToObject(&result)) return maybe_result;
2971 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002972
2973 // Copy the content. The arguments boilerplate doesn't have any
2974 // fields that point to new space so it's safe to skip the write
2975 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002976 CopyBlock(HeapObject::cast(result)->address(),
2977 boilerplate->address(),
Leon Clarkee46be812010-01-19 14:06:41 +00002978 kArgumentsObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002979
2980 // Set the two properties.
2981 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2982 callee);
2983 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2984 Smi::FromInt(length),
2985 SKIP_WRITE_BARRIER);
2986
2987 // Check the state of the object
2988 ASSERT(JSObject::cast(result)->HasFastProperties());
2989 ASSERT(JSObject::cast(result)->HasFastElements());
2990
2991 return result;
2992}
2993
2994
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002995static bool HasDuplicates(DescriptorArray* descriptors) {
2996 int count = descriptors->number_of_descriptors();
2997 if (count > 1) {
2998 String* prev_key = descriptors->GetKey(0);
2999 for (int i = 1; i != count; i++) {
3000 String* current_key = descriptors->GetKey(i);
3001 if (prev_key == current_key) return true;
3002 prev_key = current_key;
3003 }
3004 }
3005 return false;
3006}
3007
3008
John Reck59135872010-11-02 12:39:01 -07003009MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003010 ASSERT(!fun->has_initial_map());
3011
3012 // First create a new map with the size and number of in-object properties
3013 // suggested by the function.
3014 int instance_size = fun->shared()->CalculateInstanceSize();
3015 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07003016 Object* map_obj;
3017 { MaybeObject* maybe_map_obj =
3018 Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
3019 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3020 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003021
3022 // Fetch or allocate prototype.
3023 Object* prototype;
3024 if (fun->has_instance_prototype()) {
3025 prototype = fun->instance_prototype();
3026 } else {
John Reck59135872010-11-02 12:39:01 -07003027 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3028 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3029 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003030 }
3031 Map* map = Map::cast(map_obj);
3032 map->set_inobject_properties(in_object_properties);
3033 map->set_unused_property_fields(in_object_properties);
3034 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01003035 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003036
Andrei Popescu402d9372010-02-26 13:31:12 +00003037 // If the function has only simple this property assignments add
3038 // field descriptors for these to the initial map as the object
3039 // cannot be constructed without having these properties. Guard by
3040 // the inline_new flag so we only change the map if we generate a
3041 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00003042 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00003043 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003044 int count = fun->shared()->this_property_assignments_count();
3045 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003046 // Inline constructor can only handle inobject properties.
3047 fun->shared()->ForbidInlineConstructor();
3048 } else {
John Reck59135872010-11-02 12:39:01 -07003049 Object* descriptors_obj;
3050 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3051 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3052 return maybe_descriptors_obj;
3053 }
3054 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003055 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3056 for (int i = 0; i < count; i++) {
3057 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3058 ASSERT(name->IsSymbol());
3059 FieldDescriptor field(name, i, NONE);
3060 field.SetEnumerationIndex(i);
3061 descriptors->Set(i, &field);
3062 }
3063 descriptors->SetNextEnumerationIndex(count);
3064 descriptors->SortUnchecked();
3065
3066 // The descriptors may contain duplicates because the compiler does not
3067 // guarantee the uniqueness of property names (it would have required
3068 // quadratic time). Once the descriptors are sorted we can check for
3069 // duplicates in linear time.
3070 if (HasDuplicates(descriptors)) {
3071 fun->shared()->ForbidInlineConstructor();
3072 } else {
3073 map->set_instance_descriptors(descriptors);
3074 map->set_pre_allocated_property_fields(count);
3075 map->set_unused_property_fields(in_object_properties - count);
3076 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003077 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003078 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003079
3080 fun->shared()->StartInobjectSlackTracking(map);
3081
Steve Blocka7e24c12009-10-30 11:49:00 +00003082 return map;
3083}
3084
3085
3086void Heap::InitializeJSObjectFromMap(JSObject* obj,
3087 FixedArray* properties,
3088 Map* map) {
3089 obj->set_properties(properties);
3090 obj->initialize_elements();
3091 // TODO(1240798): Initialize the object's body using valid initial values
3092 // according to the object's initial map. For example, if the map's
3093 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3094 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3095 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3096 // verification code has to cope with (temporarily) invalid objects. See
3097 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003098 Object* filler;
3099 // We cannot always fill with one_pointer_filler_map because objects
3100 // created from API functions expect their internal fields to be initialized
3101 // with undefined_value.
3102 if (map->constructor()->IsJSFunction() &&
3103 JSFunction::cast(map->constructor())->shared()->
3104 IsInobjectSlackTrackingInProgress()) {
3105 // We might want to shrink the object later.
3106 ASSERT(obj->GetInternalFieldCount() == 0);
3107 filler = Heap::one_pointer_filler_map();
3108 } else {
3109 filler = Heap::undefined_value();
3110 }
3111 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003112}
3113
3114
John Reck59135872010-11-02 12:39:01 -07003115MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003116 // JSFunctions should be allocated using AllocateFunction to be
3117 // properly initialized.
3118 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3119
Steve Block8defd9f2010-07-08 12:39:36 +01003120 // Both types of global objects should be allocated using
3121 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003122 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3123 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3124
3125 // Allocate the backing storage for the properties.
3126 int prop_size =
3127 map->pre_allocated_property_fields() +
3128 map->unused_property_fields() -
3129 map->inobject_properties();
3130 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003131 Object* properties;
3132 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3133 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3134 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003135
3136 // Allocate the JSObject.
3137 AllocationSpace space =
3138 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3139 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003140 Object* obj;
3141 { MaybeObject* maybe_obj = Allocate(map, space);
3142 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3143 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003144
3145 // Initialize the JSObject.
3146 InitializeJSObjectFromMap(JSObject::cast(obj),
3147 FixedArray::cast(properties),
3148 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003149 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003150 return obj;
3151}
3152
3153
John Reck59135872010-11-02 12:39:01 -07003154MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3155 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003156 // Allocate the initial map if absent.
3157 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003158 Object* initial_map;
3159 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3160 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3161 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003162 constructor->set_initial_map(Map::cast(initial_map));
3163 Map::cast(initial_map)->set_constructor(constructor);
3164 }
3165 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003166 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003167 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003168#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003169 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003170 Object* non_failure;
3171 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3172#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003173 return result;
3174}
3175
3176
John Reck59135872010-11-02 12:39:01 -07003177MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003178 ASSERT(constructor->has_initial_map());
3179 Map* map = constructor->initial_map();
3180
3181 // Make sure no field properties are described in the initial map.
3182 // This guarantees us that normalizing the properties does not
3183 // require us to change property values to JSGlobalPropertyCells.
3184 ASSERT(map->NextFreePropertyIndex() == 0);
3185
3186 // Make sure we don't have a ton of pre-allocated slots in the
3187 // global objects. They will be unused once we normalize the object.
3188 ASSERT(map->unused_property_fields() == 0);
3189 ASSERT(map->inobject_properties() == 0);
3190
3191 // Initial size of the backing store to avoid resize of the storage during
3192 // bootstrapping. The size differs between the JS global object ad the
3193 // builtins object.
3194 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3195
3196 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003197 Object* obj;
3198 { MaybeObject* maybe_obj =
3199 StringDictionary::Allocate(
3200 map->NumberOfDescribedProperties() * 2 + initial_size);
3201 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3202 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003203 StringDictionary* dictionary = StringDictionary::cast(obj);
3204
3205 // The global object might be created from an object template with accessors.
3206 // Fill these accessors into the dictionary.
3207 DescriptorArray* descs = map->instance_descriptors();
3208 for (int i = 0; i < descs->number_of_descriptors(); i++) {
3209 PropertyDetails details = descs->GetDetails(i);
3210 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3211 PropertyDetails d =
3212 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3213 Object* value = descs->GetCallbacksObject(i);
John Reck59135872010-11-02 12:39:01 -07003214 { MaybeObject* maybe_value = Heap::AllocateJSGlobalPropertyCell(value);
3215 if (!maybe_value->ToObject(&value)) return maybe_value;
3216 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003217
John Reck59135872010-11-02 12:39:01 -07003218 Object* result;
3219 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3220 if (!maybe_result->ToObject(&result)) return maybe_result;
3221 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003222 dictionary = StringDictionary::cast(result);
3223 }
3224
3225 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003226 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3227 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3228 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003229 JSObject* global = JSObject::cast(obj);
3230 InitializeJSObjectFromMap(global, dictionary, map);
3231
3232 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003233 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3234 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3235 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003236 Map* new_map = Map::cast(obj);
3237
3238 // Setup the global object as a normalized object.
3239 global->set_map(new_map);
3240 global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
3241 global->set_properties(dictionary);
3242
3243 // Make sure result is a global object with properties in dictionary.
3244 ASSERT(global->IsGlobalObject());
3245 ASSERT(!global->HasFastProperties());
3246 return global;
3247}
3248
3249
John Reck59135872010-11-02 12:39:01 -07003250MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003251 // Never used to copy functions. If functions need to be copied we
3252 // have to be careful to clear the literals array.
3253 ASSERT(!source->IsJSFunction());
3254
3255 // Make the clone.
3256 Map* map = source->map();
3257 int object_size = map->instance_size();
3258 Object* clone;
3259
3260 // If we're forced to always allocate, we use the general allocation
3261 // functions which may leave us with an object in old space.
3262 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003263 { MaybeObject* maybe_clone =
3264 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3265 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3266 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003267 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003268 CopyBlock(clone_address,
3269 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003270 object_size);
3271 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003272 RecordWrites(clone_address,
3273 JSObject::kHeaderSize,
3274 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003275 } else {
John Reck59135872010-11-02 12:39:01 -07003276 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3277 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3278 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003279 ASSERT(Heap::InNewSpace(clone));
3280 // Since we know the clone is allocated in new space, we can copy
3281 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003282 CopyBlock(HeapObject::cast(clone)->address(),
3283 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003284 object_size);
3285 }
3286
3287 FixedArray* elements = FixedArray::cast(source->elements());
3288 FixedArray* properties = FixedArray::cast(source->properties());
3289 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003290 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003291 Object* elem;
3292 { MaybeObject* maybe_elem =
3293 (elements->map() == fixed_cow_array_map()) ?
3294 elements : CopyFixedArray(elements);
3295 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3296 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003297 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3298 }
3299 // Update properties if necessary.
3300 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003301 Object* prop;
3302 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3303 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3304 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003305 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3306 }
3307 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003308#ifdef ENABLE_LOGGING_AND_PROFILING
3309 ProducerHeapProfile::RecordJSObjectAllocation(clone);
3310#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003311 return clone;
3312}
3313
3314
John Reck59135872010-11-02 12:39:01 -07003315MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3316 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003317 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003318 Map* map = constructor->initial_map();
3319
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003320 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003321 // objects allocated using the constructor.
3322 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003323 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003324
3325 // Allocate the backing storage for the properties.
3326 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003327 Object* properties;
3328 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3329 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3330 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003331
3332 // Reset the map for the object.
3333 object->set_map(constructor->initial_map());
3334
3335 // Reinitialize the object from the constructor map.
3336 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3337 return object;
3338}
3339
3340
John Reck59135872010-11-02 12:39:01 -07003341MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3342 PretenureFlag pretenure) {
3343 Object* result;
3344 { MaybeObject* maybe_result =
3345 AllocateRawAsciiString(string.length(), pretenure);
3346 if (!maybe_result->ToObject(&result)) return maybe_result;
3347 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003348
3349 // Copy the characters into the new object.
3350 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3351 for (int i = 0; i < string.length(); i++) {
3352 string_result->SeqAsciiStringSet(i, string[i]);
3353 }
3354 return result;
3355}
3356
3357
Steve Block9fac8402011-05-12 15:51:54 +01003358MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3359 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003360 // V8 only supports characters in the Basic Multilingual Plane.
3361 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003362 // Count the number of characters in the UTF-8 string and check if
3363 // it is an ASCII string.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003364 Access<ScannerConstants::Utf8Decoder>
3365 decoder(ScannerConstants::utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003366 decoder->Reset(string.start(), string.length());
3367 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003368 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003369 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003370 chars++;
3371 }
3372
John Reck59135872010-11-02 12:39:01 -07003373 Object* result;
3374 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3375 if (!maybe_result->ToObject(&result)) return maybe_result;
3376 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003377
3378 // Convert and copy the characters into the new object.
3379 String* string_result = String::cast(result);
3380 decoder->Reset(string.start(), string.length());
3381 for (int i = 0; i < chars; i++) {
3382 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003383 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003384 string_result->Set(i, r);
3385 }
3386 return result;
3387}
3388
3389
John Reck59135872010-11-02 12:39:01 -07003390MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3391 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003392 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003393 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003394 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003395 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003396 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003397 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003398 }
John Reck59135872010-11-02 12:39:01 -07003399 Object* result;
3400 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003401
3402 // Copy the characters into the new object, which may be either ASCII or
3403 // UTF-16.
3404 String* string_result = String::cast(result);
3405 for (int i = 0; i < string.length(); i++) {
3406 string_result->Set(i, string[i]);
3407 }
3408 return result;
3409}
3410
3411
3412Map* Heap::SymbolMapForString(String* string) {
3413 // If the string is in new space it cannot be used as a symbol.
3414 if (InNewSpace(string)) return NULL;
3415
3416 // Find the corresponding symbol map for strings.
3417 Map* map = string->map();
Steve Blockd0582a62009-12-15 09:54:21 +00003418 if (map == ascii_string_map()) return ascii_symbol_map();
3419 if (map == string_map()) return symbol_map();
3420 if (map == cons_string_map()) return cons_symbol_map();
3421 if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
3422 if (map == external_string_map()) return external_symbol_map();
3423 if (map == external_ascii_string_map()) return external_ascii_symbol_map();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003424 if (map == external_string_with_ascii_data_map()) {
3425 return external_symbol_with_ascii_data_map();
3426 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003427
3428 // No match found.
3429 return NULL;
3430}
3431
3432
John Reck59135872010-11-02 12:39:01 -07003433MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3434 int chars,
3435 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003436 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003437 // Ensure the chars matches the number of characters in the buffer.
3438 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3439 // Determine whether the string is ascii.
3440 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003441 while (buffer->has_more()) {
3442 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3443 is_ascii = false;
3444 break;
3445 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003446 }
3447 buffer->Rewind();
3448
3449 // Compute map and object size.
3450 int size;
3451 Map* map;
3452
3453 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003454 if (chars > SeqAsciiString::kMaxLength) {
3455 return Failure::OutOfMemoryException();
3456 }
Steve Blockd0582a62009-12-15 09:54:21 +00003457 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003458 size = SeqAsciiString::SizeFor(chars);
3459 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003460 if (chars > SeqTwoByteString::kMaxLength) {
3461 return Failure::OutOfMemoryException();
3462 }
Steve Blockd0582a62009-12-15 09:54:21 +00003463 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003464 size = SeqTwoByteString::SizeFor(chars);
3465 }
3466
3467 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003468 Object* result;
3469 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3470 ? lo_space_->AllocateRaw(size)
3471 : old_data_space_->AllocateRaw(size);
3472 if (!maybe_result->ToObject(&result)) return maybe_result;
3473 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003474
3475 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003476 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003477 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003478 answer->set_length(chars);
3479 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003480
3481 ASSERT_EQ(size, answer->Size());
3482
3483 // Fill in the characters.
3484 for (int i = 0; i < chars; i++) {
3485 answer->Set(i, buffer->GetNext());
3486 }
3487 return answer;
3488}
3489
3490
John Reck59135872010-11-02 12:39:01 -07003491MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003492 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3493 return Failure::OutOfMemoryException();
3494 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003495
3496 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003497 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003498
Leon Clarkee46be812010-01-19 14:06:41 +00003499 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3500 AllocationSpace retry_space = OLD_DATA_SPACE;
3501
Steve Blocka7e24c12009-10-30 11:49:00 +00003502 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003503 if (size > kMaxObjectSizeInNewSpace) {
3504 // Allocate in large object space, retry space will be ignored.
3505 space = LO_SPACE;
3506 } else if (size > MaxObjectSizeInPagedSpace()) {
3507 // Allocate in new space, retry in large object space.
3508 retry_space = LO_SPACE;
3509 }
3510 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3511 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003512 }
John Reck59135872010-11-02 12:39:01 -07003513 Object* result;
3514 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3515 if (!maybe_result->ToObject(&result)) return maybe_result;
3516 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003517
Steve Blocka7e24c12009-10-30 11:49:00 +00003518 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003519 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003520 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003521 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003522 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3523 return result;
3524}
3525
3526
John Reck59135872010-11-02 12:39:01 -07003527MaybeObject* Heap::AllocateRawTwoByteString(int length,
3528 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003529 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3530 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003531 }
Leon Clarkee46be812010-01-19 14:06:41 +00003532 int size = SeqTwoByteString::SizeFor(length);
3533 ASSERT(size <= SeqTwoByteString::kMaxSize);
3534 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3535 AllocationSpace retry_space = OLD_DATA_SPACE;
3536
3537 if (space == NEW_SPACE) {
3538 if (size > kMaxObjectSizeInNewSpace) {
3539 // Allocate in large object space, retry space will be ignored.
3540 space = LO_SPACE;
3541 } else if (size > MaxObjectSizeInPagedSpace()) {
3542 // Allocate in new space, retry in large object space.
3543 retry_space = LO_SPACE;
3544 }
3545 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3546 space = LO_SPACE;
3547 }
John Reck59135872010-11-02 12:39:01 -07003548 Object* result;
3549 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3550 if (!maybe_result->ToObject(&result)) return maybe_result;
3551 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003552
Steve Blocka7e24c12009-10-30 11:49:00 +00003553 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003554 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003555 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003556 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003557 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3558 return result;
3559}
3560
3561
John Reck59135872010-11-02 12:39:01 -07003562MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003563 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003564 Object* result;
3565 { MaybeObject* maybe_result =
3566 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3567 if (!maybe_result->ToObject(&result)) return maybe_result;
3568 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003569 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003570 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3571 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003572 return result;
3573}
3574
3575
John Reck59135872010-11-02 12:39:01 -07003576MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003577 if (length < 0 || length > FixedArray::kMaxLength) {
3578 return Failure::OutOfMemoryException();
3579 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003580 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003581 // Use the general function if we're forced to always allocate.
3582 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3583 // Allocate the raw data for a fixed array.
3584 int size = FixedArray::SizeFor(length);
3585 return size <= kMaxObjectSizeInNewSpace
3586 ? new_space_.AllocateRaw(size)
3587 : lo_space_->AllocateRawFixedArray(size);
3588}
3589
3590
John Reck59135872010-11-02 12:39:01 -07003591MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003592 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003593 Object* obj;
3594 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3595 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3596 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003597 if (Heap::InNewSpace(obj)) {
3598 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003599 dst->set_map(map);
3600 CopyBlock(dst->address() + kPointerSize,
3601 src->address() + kPointerSize,
3602 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003603 return obj;
3604 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003605 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003606 FixedArray* result = FixedArray::cast(obj);
3607 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003608
Steve Blocka7e24c12009-10-30 11:49:00 +00003609 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003610 AssertNoAllocation no_gc;
3611 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003612 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3613 return result;
3614}
3615
3616
John Reck59135872010-11-02 12:39:01 -07003617MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003618 ASSERT(length >= 0);
3619 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003620 Object* result;
3621 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3622 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003623 }
John Reck59135872010-11-02 12:39:01 -07003624 // Initialize header.
3625 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3626 array->set_map(fixed_array_map());
3627 array->set_length(length);
3628 // Initialize body.
3629 ASSERT(!Heap::InNewSpace(undefined_value()));
3630 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003631 return result;
3632}
3633
3634
John Reck59135872010-11-02 12:39:01 -07003635MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003636 if (length < 0 || length > FixedArray::kMaxLength) {
3637 return Failure::OutOfMemoryException();
3638 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003639
Leon Clarkee46be812010-01-19 14:06:41 +00003640 AllocationSpace space =
3641 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003642 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003643 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3644 // Too big for new space.
3645 space = LO_SPACE;
3646 } else if (space == OLD_POINTER_SPACE &&
3647 size > MaxObjectSizeInPagedSpace()) {
3648 // Too big for old pointer space.
3649 space = LO_SPACE;
3650 }
3651
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003652 AllocationSpace retry_space =
3653 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3654
3655 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003656}
3657
3658
John Reck59135872010-11-02 12:39:01 -07003659MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
3660 int length,
3661 PretenureFlag pretenure,
3662 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003663 ASSERT(length >= 0);
3664 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
3665 if (length == 0) return Heap::empty_fixed_array();
3666
3667 ASSERT(!Heap::InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003668 Object* result;
3669 { MaybeObject* maybe_result = Heap::AllocateRawFixedArray(length, pretenure);
3670 if (!maybe_result->ToObject(&result)) return maybe_result;
3671 }
Steve Block6ded16b2010-05-10 14:33:55 +01003672
3673 HeapObject::cast(result)->set_map(Heap::fixed_array_map());
3674 FixedArray* array = FixedArray::cast(result);
3675 array->set_length(length);
3676 MemsetPointer(array->data_start(), filler, length);
3677 return array;
3678}
3679
3680
John Reck59135872010-11-02 12:39:01 -07003681MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003682 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3683}
3684
3685
John Reck59135872010-11-02 12:39:01 -07003686MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3687 PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003688 return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
3689}
3690
3691
John Reck59135872010-11-02 12:39:01 -07003692MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003693 if (length == 0) return empty_fixed_array();
3694
John Reck59135872010-11-02 12:39:01 -07003695 Object* obj;
3696 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3697 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3698 }
Steve Block6ded16b2010-05-10 14:33:55 +01003699
3700 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3701 FixedArray::cast(obj)->set_length(length);
3702 return obj;
3703}
3704
3705
John Reck59135872010-11-02 12:39:01 -07003706MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3707 Object* result;
3708 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length, pretenure);
3709 if (!maybe_result->ToObject(&result)) return maybe_result;
3710 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003711 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003712 ASSERT(result->IsHashTable());
3713 return result;
3714}
3715
3716
John Reck59135872010-11-02 12:39:01 -07003717MaybeObject* Heap::AllocateGlobalContext() {
3718 Object* result;
3719 { MaybeObject* maybe_result =
3720 Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3721 if (!maybe_result->ToObject(&result)) return maybe_result;
3722 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003723 Context* context = reinterpret_cast<Context*>(result);
3724 context->set_map(global_context_map());
3725 ASSERT(context->IsGlobalContext());
3726 ASSERT(result->IsContext());
3727 return result;
3728}
3729
3730
John Reck59135872010-11-02 12:39:01 -07003731MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003732 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003733 Object* result;
3734 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length);
3735 if (!maybe_result->ToObject(&result)) return maybe_result;
3736 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003737 Context* context = reinterpret_cast<Context*>(result);
3738 context->set_map(context_map());
3739 context->set_closure(function);
3740 context->set_fcontext(context);
3741 context->set_previous(NULL);
3742 context->set_extension(NULL);
3743 context->set_global(function->context()->global());
3744 ASSERT(!context->IsGlobalContext());
3745 ASSERT(context->is_function_context());
3746 ASSERT(result->IsContext());
3747 return result;
3748}
3749
3750
John Reck59135872010-11-02 12:39:01 -07003751MaybeObject* Heap::AllocateWithContext(Context* previous,
3752 JSObject* extension,
3753 bool is_catch_context) {
3754 Object* result;
3755 { MaybeObject* maybe_result =
3756 Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3757 if (!maybe_result->ToObject(&result)) return maybe_result;
3758 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003759 Context* context = reinterpret_cast<Context*>(result);
3760 context->set_map(is_catch_context ? catch_context_map() : context_map());
3761 context->set_closure(previous->closure());
3762 context->set_fcontext(previous->fcontext());
3763 context->set_previous(previous);
3764 context->set_extension(extension);
3765 context->set_global(previous->global());
3766 ASSERT(!context->IsGlobalContext());
3767 ASSERT(!context->is_function_context());
3768 ASSERT(result->IsContext());
3769 return result;
3770}
3771
3772
John Reck59135872010-11-02 12:39:01 -07003773MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003774 Map* map;
3775 switch (type) {
3776#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
3777STRUCT_LIST(MAKE_CASE)
3778#undef MAKE_CASE
3779 default:
3780 UNREACHABLE();
3781 return Failure::InternalError();
3782 }
3783 int size = map->instance_size();
3784 AllocationSpace space =
3785 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003786 Object* result;
3787 { MaybeObject* maybe_result = Heap::Allocate(map, space);
3788 if (!maybe_result->ToObject(&result)) return maybe_result;
3789 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003790 Struct::cast(result)->InitializeBody(size);
3791 return result;
3792}
3793
3794
3795bool Heap::IdleNotification() {
3796 static const int kIdlesBeforeScavenge = 4;
3797 static const int kIdlesBeforeMarkSweep = 7;
3798 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003799 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
3800 static const int kGCsBetweenCleanup = 4;
Steve Blocka7e24c12009-10-30 11:49:00 +00003801 static int number_idle_notifications = 0;
3802 static int last_gc_count = gc_count_;
3803
Steve Block6ded16b2010-05-10 14:33:55 +01003804 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003805 bool finished = false;
3806
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003807 // Reset the number of idle notifications received when a number of
3808 // GCs have taken place. This allows another round of cleanup based
3809 // on idle notifications if enough work has been carried out to
3810 // provoke a number of garbage collections.
3811 if (gc_count_ < last_gc_count + kGCsBetweenCleanup) {
3812 number_idle_notifications =
3813 Min(number_idle_notifications + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00003814 } else {
3815 number_idle_notifications = 0;
3816 last_gc_count = gc_count_;
3817 }
3818
3819 if (number_idle_notifications == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003820 if (contexts_disposed_ > 0) {
3821 HistogramTimerScope scope(&Counters::gc_context);
3822 CollectAllGarbage(false);
3823 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003824 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003825 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003826 new_space_.Shrink();
3827 last_gc_count = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003828 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003829 // Before doing the mark-sweep collections we clear the
3830 // compilation cache to avoid hanging on to source code and
3831 // generated code for cached functions.
3832 CompilationCache::Clear();
3833
Steve Blocka7e24c12009-10-30 11:49:00 +00003834 CollectAllGarbage(false);
3835 new_space_.Shrink();
3836 last_gc_count = gc_count_;
3837
3838 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
3839 CollectAllGarbage(true);
3840 new_space_.Shrink();
3841 last_gc_count = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003842 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003843
3844 } else if (contexts_disposed_ > 0) {
3845 if (FLAG_expose_gc) {
3846 contexts_disposed_ = 0;
3847 } else {
3848 HistogramTimerScope scope(&Counters::gc_context);
3849 CollectAllGarbage(false);
3850 last_gc_count = gc_count_;
3851 }
3852 // If this is the first idle notification, we reset the
3853 // notification count to avoid letting idle notifications for
3854 // context disposal garbage collections start a potentially too
3855 // aggressive idle GC cycle.
3856 if (number_idle_notifications <= 1) {
3857 number_idle_notifications = 0;
3858 uncommit = false;
3859 }
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003860 } else if (number_idle_notifications > kIdlesBeforeMarkCompact) {
3861 // If we have received more than kIdlesBeforeMarkCompact idle
3862 // notifications we do not perform any cleanup because we don't
3863 // expect to gain much by doing so.
3864 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003865 }
3866
Steve Block6ded16b2010-05-10 14:33:55 +01003867 // Make sure that we have no pending context disposals and
3868 // conditionally uncommit from space.
3869 ASSERT(contexts_disposed_ == 0);
3870 if (uncommit) Heap::UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003871 return finished;
3872}
3873
3874
3875#ifdef DEBUG
3876
3877void Heap::Print() {
3878 if (!HasBeenSetup()) return;
3879 Top::PrintStack();
3880 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003881 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3882 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003883}
3884
3885
3886void Heap::ReportCodeStatistics(const char* title) {
3887 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3888 PagedSpace::ResetCodeStatistics();
3889 // We do not look for code in new space, map space, or old space. If code
3890 // somehow ends up in those spaces, we would miss it here.
3891 code_space_->CollectCodeStatistics();
3892 lo_space_->CollectCodeStatistics();
3893 PagedSpace::ReportCodeStatistics();
3894}
3895
3896
3897// This function expects that NewSpace's allocated objects histogram is
3898// populated (via a call to CollectStatistics or else as a side effect of a
3899// just-completed scavenge collection).
3900void Heap::ReportHeapStatistics(const char* title) {
3901 USE(title);
3902 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3903 title, gc_count_);
3904 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003905 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3906 old_gen_promotion_limit_);
3907 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3908 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003909
3910 PrintF("\n");
3911 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3912 GlobalHandles::PrintStats();
3913 PrintF("\n");
3914
3915 PrintF("Heap statistics : ");
3916 MemoryAllocator::ReportStatistics();
3917 PrintF("To space : ");
3918 new_space_.ReportStatistics();
3919 PrintF("Old pointer space : ");
3920 old_pointer_space_->ReportStatistics();
3921 PrintF("Old data space : ");
3922 old_data_space_->ReportStatistics();
3923 PrintF("Code space : ");
3924 code_space_->ReportStatistics();
3925 PrintF("Map space : ");
3926 map_space_->ReportStatistics();
3927 PrintF("Cell space : ");
3928 cell_space_->ReportStatistics();
3929 PrintF("Large object space : ");
3930 lo_space_->ReportStatistics();
3931 PrintF(">>>>>> ========================================= >>>>>>\n");
3932}
3933
3934#endif // DEBUG
3935
3936bool Heap::Contains(HeapObject* value) {
3937 return Contains(value->address());
3938}
3939
3940
3941bool Heap::Contains(Address addr) {
3942 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3943 return HasBeenSetup() &&
3944 (new_space_.ToSpaceContains(addr) ||
3945 old_pointer_space_->Contains(addr) ||
3946 old_data_space_->Contains(addr) ||
3947 code_space_->Contains(addr) ||
3948 map_space_->Contains(addr) ||
3949 cell_space_->Contains(addr) ||
3950 lo_space_->SlowContains(addr));
3951}
3952
3953
3954bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3955 return InSpace(value->address(), space);
3956}
3957
3958
3959bool Heap::InSpace(Address addr, AllocationSpace space) {
3960 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3961 if (!HasBeenSetup()) return false;
3962
3963 switch (space) {
3964 case NEW_SPACE:
3965 return new_space_.ToSpaceContains(addr);
3966 case OLD_POINTER_SPACE:
3967 return old_pointer_space_->Contains(addr);
3968 case OLD_DATA_SPACE:
3969 return old_data_space_->Contains(addr);
3970 case CODE_SPACE:
3971 return code_space_->Contains(addr);
3972 case MAP_SPACE:
3973 return map_space_->Contains(addr);
3974 case CELL_SPACE:
3975 return cell_space_->Contains(addr);
3976 case LO_SPACE:
3977 return lo_space_->SlowContains(addr);
3978 }
3979
3980 return false;
3981}
3982
3983
3984#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003985static void DummyScavengePointer(HeapObject** p) {
3986}
3987
3988
3989static void VerifyPointersUnderWatermark(
3990 PagedSpace* space,
3991 DirtyRegionCallback visit_dirty_region) {
3992 PageIterator it(space, PageIterator::PAGES_IN_USE);
3993
3994 while (it.has_next()) {
3995 Page* page = it.next();
3996 Address start = page->ObjectAreaStart();
3997 Address end = page->AllocationWatermark();
3998
3999 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
4000 start,
4001 end,
4002 visit_dirty_region,
4003 &DummyScavengePointer);
4004 }
4005}
4006
4007
4008static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4009 LargeObjectIterator it(space);
4010 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4011 if (object->IsFixedArray()) {
4012 Address slot_address = object->address();
4013 Address end = object->address() + object->Size();
4014
4015 while (slot_address < end) {
4016 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4017 // When we are not in GC the Heap::InNewSpace() predicate
4018 // checks that pointers which satisfy predicate point into
4019 // the active semispace.
4020 Heap::InNewSpace(*slot);
4021 slot_address += kPointerSize;
4022 }
4023 }
4024 }
4025}
4026
4027
Steve Blocka7e24c12009-10-30 11:49:00 +00004028void Heap::Verify() {
4029 ASSERT(HasBeenSetup());
4030
4031 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004032 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004033
4034 new_space_.Verify();
4035
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004036 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4037 old_pointer_space_->Verify(&dirty_regions_visitor);
4038 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004039
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004040 VerifyPointersUnderWatermark(old_pointer_space_,
4041 &IteratePointersInDirtyRegion);
4042 VerifyPointersUnderWatermark(map_space_,
4043 &IteratePointersInDirtyMapsRegion);
4044 VerifyPointersUnderWatermark(lo_space_);
4045
4046 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4047 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4048
4049 VerifyPointersVisitor no_dirty_regions_visitor;
4050 old_data_space_->Verify(&no_dirty_regions_visitor);
4051 code_space_->Verify(&no_dirty_regions_visitor);
4052 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004053
4054 lo_space_->Verify();
4055}
4056#endif // DEBUG
4057
4058
John Reck59135872010-11-02 12:39:01 -07004059MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004060 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004061 Object* new_table;
4062 { MaybeObject* maybe_new_table =
4063 symbol_table()->LookupSymbol(string, &symbol);
4064 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4065 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004066 // Can't use set_symbol_table because SymbolTable::cast knows that
4067 // SymbolTable is a singleton and checks for identity.
4068 roots_[kSymbolTableRootIndex] = new_table;
4069 ASSERT(symbol != NULL);
4070 return symbol;
4071}
4072
4073
Steve Block9fac8402011-05-12 15:51:54 +01004074MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4075 Object* symbol = NULL;
4076 Object* new_table;
4077 { MaybeObject* maybe_new_table =
4078 symbol_table()->LookupAsciiSymbol(string, &symbol);
4079 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4080 }
4081 // Can't use set_symbol_table because SymbolTable::cast knows that
4082 // SymbolTable is a singleton and checks for identity.
4083 roots_[kSymbolTableRootIndex] = new_table;
4084 ASSERT(symbol != NULL);
4085 return symbol;
4086}
4087
4088
4089MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4090 Object* symbol = NULL;
4091 Object* new_table;
4092 { MaybeObject* maybe_new_table =
4093 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4094 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4095 }
4096 // Can't use set_symbol_table because SymbolTable::cast knows that
4097 // SymbolTable is a singleton and checks for identity.
4098 roots_[kSymbolTableRootIndex] = new_table;
4099 ASSERT(symbol != NULL);
4100 return symbol;
4101}
4102
4103
John Reck59135872010-11-02 12:39:01 -07004104MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004105 if (string->IsSymbol()) return string;
4106 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004107 Object* new_table;
4108 { MaybeObject* maybe_new_table =
4109 symbol_table()->LookupString(string, &symbol);
4110 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4111 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004112 // Can't use set_symbol_table because SymbolTable::cast knows that
4113 // SymbolTable is a singleton and checks for identity.
4114 roots_[kSymbolTableRootIndex] = new_table;
4115 ASSERT(symbol != NULL);
4116 return symbol;
4117}
4118
4119
4120bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4121 if (string->IsSymbol()) {
4122 *symbol = string;
4123 return true;
4124 }
4125 return symbol_table()->LookupSymbolIfExists(string, symbol);
4126}
4127
4128
4129#ifdef DEBUG
4130void Heap::ZapFromSpace() {
Steve Block1e0659c2011-05-24 12:43:12 +01004131 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
Steve Blocka7e24c12009-10-30 11:49:00 +00004132 for (Address a = new_space_.FromSpaceLow();
4133 a < new_space_.FromSpaceHigh();
4134 a += kPointerSize) {
4135 Memory::Address_at(a) = kFromSpaceZapValue;
4136 }
4137}
4138#endif // DEBUG
4139
4140
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004141bool Heap::IteratePointersInDirtyRegion(Address start,
4142 Address end,
4143 ObjectSlotCallback copy_object_func) {
4144 Address slot_address = start;
4145 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004146
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004147 while (slot_address < end) {
4148 Object** slot = reinterpret_cast<Object**>(slot_address);
4149 if (Heap::InNewSpace(*slot)) {
4150 ASSERT((*slot)->IsHeapObject());
4151 copy_object_func(reinterpret_cast<HeapObject**>(slot));
4152 if (Heap::InNewSpace(*slot)) {
4153 ASSERT((*slot)->IsHeapObject());
4154 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004155 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004156 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004157 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004158 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004159 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004160}
4161
4162
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004163// Compute start address of the first map following given addr.
4164static inline Address MapStartAlign(Address addr) {
4165 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4166 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4167}
Steve Blocka7e24c12009-10-30 11:49:00 +00004168
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004169
4170// Compute end address of the first map preceding given addr.
4171static inline Address MapEndAlign(Address addr) {
4172 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4173 return page + ((addr - page) / Map::kSize * Map::kSize);
4174}
4175
4176
4177static bool IteratePointersInDirtyMaps(Address start,
4178 Address end,
4179 ObjectSlotCallback copy_object_func) {
4180 ASSERT(MapStartAlign(start) == start);
4181 ASSERT(MapEndAlign(end) == end);
4182
4183 Address map_address = start;
4184 bool pointers_to_new_space_found = false;
4185
4186 while (map_address < end) {
4187 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
4188 ASSERT(Memory::Object_at(map_address)->IsMap());
4189
4190 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4191 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4192
4193 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
4194 pointer_fields_end,
4195 copy_object_func)) {
4196 pointers_to_new_space_found = true;
4197 }
4198
4199 map_address += Map::kSize;
4200 }
4201
4202 return pointers_to_new_space_found;
4203}
4204
4205
4206bool Heap::IteratePointersInDirtyMapsRegion(
4207 Address start,
4208 Address end,
4209 ObjectSlotCallback copy_object_func) {
4210 Address map_aligned_start = MapStartAlign(start);
4211 Address map_aligned_end = MapEndAlign(end);
4212
4213 bool contains_pointers_to_new_space = false;
4214
4215 if (map_aligned_start != start) {
4216 Address prev_map = map_aligned_start - Map::kSize;
4217 ASSERT(Memory::Object_at(prev_map)->IsMap());
4218
4219 Address pointer_fields_start =
4220 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4221
4222 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004223 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004224
4225 contains_pointers_to_new_space =
4226 IteratePointersInDirtyRegion(pointer_fields_start,
4227 pointer_fields_end,
4228 copy_object_func)
4229 || contains_pointers_to_new_space;
4230 }
4231
4232 contains_pointers_to_new_space =
4233 IteratePointersInDirtyMaps(map_aligned_start,
4234 map_aligned_end,
4235 copy_object_func)
4236 || contains_pointers_to_new_space;
4237
4238 if (map_aligned_end != end) {
4239 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4240
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004241 Address pointer_fields_start =
4242 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004243
4244 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004245 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004246
4247 contains_pointers_to_new_space =
4248 IteratePointersInDirtyRegion(pointer_fields_start,
4249 pointer_fields_end,
4250 copy_object_func)
4251 || contains_pointers_to_new_space;
4252 }
4253
4254 return contains_pointers_to_new_space;
4255}
4256
4257
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004258void Heap::IterateAndMarkPointersToFromSpace(Address start,
4259 Address end,
4260 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004261 Address slot_address = start;
4262 Page* page = Page::FromAddress(start);
4263
4264 uint32_t marks = page->GetRegionMarks();
4265
4266 while (slot_address < end) {
4267 Object** slot = reinterpret_cast<Object**>(slot_address);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004268 if (Heap::InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004269 ASSERT((*slot)->IsHeapObject());
4270 callback(reinterpret_cast<HeapObject**>(slot));
4271 if (Heap::InNewSpace(*slot)) {
4272 ASSERT((*slot)->IsHeapObject());
4273 marks |= page->GetRegionMaskForAddress(slot_address);
4274 }
4275 }
4276 slot_address += kPointerSize;
4277 }
4278
4279 page->SetRegionMarks(marks);
4280}
4281
4282
4283uint32_t Heap::IterateDirtyRegions(
4284 uint32_t marks,
4285 Address area_start,
4286 Address area_end,
4287 DirtyRegionCallback visit_dirty_region,
4288 ObjectSlotCallback copy_object_func) {
4289 uint32_t newmarks = 0;
4290 uint32_t mask = 1;
4291
4292 if (area_start >= area_end) {
4293 return newmarks;
4294 }
4295
4296 Address region_start = area_start;
4297
4298 // area_start does not necessarily coincide with start of the first region.
4299 // Thus to calculate the beginning of the next region we have to align
4300 // area_start by Page::kRegionSize.
4301 Address second_region =
4302 reinterpret_cast<Address>(
4303 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4304 ~Page::kRegionAlignmentMask);
4305
4306 // Next region might be beyond area_end.
4307 Address region_end = Min(second_region, area_end);
4308
4309 if (marks & mask) {
4310 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4311 newmarks |= mask;
4312 }
4313 }
4314 mask <<= 1;
4315
4316 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4317 region_start = region_end;
4318 region_end = region_start + Page::kRegionSize;
4319
4320 while (region_end <= area_end) {
4321 if (marks & mask) {
4322 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4323 newmarks |= mask;
4324 }
4325 }
4326
4327 region_start = region_end;
4328 region_end = region_start + Page::kRegionSize;
4329
4330 mask <<= 1;
4331 }
4332
4333 if (region_start != area_end) {
4334 // A small piece of area left uniterated because area_end does not coincide
4335 // with region end. Check whether region covering last part of area is
4336 // dirty.
4337 if (marks & mask) {
4338 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
4339 newmarks |= mask;
4340 }
4341 }
4342 }
4343
4344 return newmarks;
4345}
4346
4347
4348
4349void Heap::IterateDirtyRegions(
4350 PagedSpace* space,
4351 DirtyRegionCallback visit_dirty_region,
4352 ObjectSlotCallback copy_object_func,
4353 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004354
4355 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004356
Steve Blocka7e24c12009-10-30 11:49:00 +00004357 while (it.has_next()) {
4358 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004359 uint32_t marks = page->GetRegionMarks();
4360
4361 if (marks != Page::kAllRegionsCleanMarks) {
4362 Address start = page->ObjectAreaStart();
4363
4364 // Do not try to visit pointers beyond page allocation watermark.
4365 // Page can contain garbage pointers there.
4366 Address end;
4367
4368 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4369 page->IsWatermarkValid()) {
4370 end = page->AllocationWatermark();
4371 } else {
4372 end = page->CachedAllocationWatermark();
4373 }
4374
4375 ASSERT(space == old_pointer_space_ ||
4376 (space == map_space_ &&
4377 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4378
4379 page->SetRegionMarks(IterateDirtyRegions(marks,
4380 start,
4381 end,
4382 visit_dirty_region,
4383 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004384 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004385
4386 // Mark page watermark as invalid to maintain watermark validity invariant.
4387 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4388 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004389 }
4390}
4391
4392
Steve Blockd0582a62009-12-15 09:54:21 +00004393void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4394 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004395 IterateWeakRoots(v, mode);
4396}
4397
4398
4399void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004400 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004401 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004402 if (mode != VISIT_ALL_IN_SCAVENGE) {
4403 // Scavenge collections have special processing for this.
4404 ExternalStringTable::Iterate(v);
4405 }
4406 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004407}
4408
4409
Steve Blockd0582a62009-12-15 09:54:21 +00004410void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004411 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004412 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004413
Iain Merrick75681382010-08-19 15:07:18 +01004414 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004415 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004416
4417 Bootstrapper::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004418 v->Synchronize("bootstrapper");
Steve Blocka7e24c12009-10-30 11:49:00 +00004419 Top::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004420 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004421 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004422 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004423
4424#ifdef ENABLE_DEBUGGER_SUPPORT
4425 Debug::Iterate(v);
4426#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004427 v->Synchronize("debug");
Steve Blocka7e24c12009-10-30 11:49:00 +00004428 CompilationCache::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004429 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004430
4431 // Iterate over local handles in handle scopes.
4432 HandleScopeImplementer::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004433 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004434
Leon Clarkee46be812010-01-19 14:06:41 +00004435 // Iterate over the builtin code objects and code stubs in the
4436 // heap. Note that it is not necessary to iterate over code objects
4437 // on scavenge collections.
4438 if (mode != VISIT_ALL_IN_SCAVENGE) {
4439 Builtins::IterateBuiltins(v);
4440 }
Steve Blockd0582a62009-12-15 09:54:21 +00004441 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004442
4443 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004444 if (mode == VISIT_ONLY_STRONG) {
4445 GlobalHandles::IterateStrongRoots(v);
4446 } else {
4447 GlobalHandles::IterateAllRoots(v);
4448 }
4449 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004450
4451 // Iterate over pointers being held by inactive threads.
4452 ThreadManager::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004453 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004454
4455 // Iterate over the pointers the Serialization/Deserialization code is
4456 // holding.
4457 // During garbage collection this keeps the partial snapshot cache alive.
4458 // During deserialization of the startup snapshot this creates the partial
4459 // snapshot cache and deserializes the objects it refers to. During
4460 // serialization this does nothing, since the partial snapshot cache is
4461 // empty. However the next thing we do is create the partial snapshot,
4462 // filling up the partial snapshot cache with objects it needs as we go.
4463 SerializerDeserializer::Iterate(v);
4464 // We don't do a v->Synchronize call here, because in debug mode that will
4465 // output a flag to the snapshot. However at this point the serializer and
4466 // deserializer are deliberately a little unsynchronized (see above) so the
4467 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004468}
Steve Blocka7e24c12009-10-30 11:49:00 +00004469
4470
4471// Flag is set when the heap has been configured. The heap can be repeatedly
4472// configured through the API until it is setup.
4473static bool heap_configured = false;
4474
4475// TODO(1236194): Since the heap size is configurable on the command line
4476// and through the API, we should gracefully handle the case that the heap
4477// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004478bool Heap::ConfigureHeap(int max_semispace_size,
4479 int max_old_gen_size,
4480 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004481 if (HasBeenSetup()) return false;
4482
Steve Block3ce2e202009-11-05 08:53:23 +00004483 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4484
4485 if (Snapshot::IsEnabled()) {
4486 // If we are using a snapshot we always reserve the default amount
4487 // of memory for each semispace because code in the snapshot has
4488 // write-barrier code that relies on the size and alignment of new
4489 // space. We therefore cannot use a larger max semispace size
4490 // than the default reserved semispace size.
4491 if (max_semispace_size_ > reserved_semispace_size_) {
4492 max_semispace_size_ = reserved_semispace_size_;
4493 }
4494 } else {
4495 // If we are not using snapshots we reserve space for the actual
4496 // max semispace size.
4497 reserved_semispace_size_ = max_semispace_size_;
4498 }
4499
4500 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004501 if (max_executable_size > 0) {
4502 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4503 }
4504
4505 // The max executable size must be less than or equal to the max old
4506 // generation size.
4507 if (max_executable_size_ > max_old_generation_size_) {
4508 max_executable_size_ = max_old_generation_size_;
4509 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004510
4511 // The new space size must be a power of two to support single-bit testing
4512 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004513 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4514 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4515 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4516 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004517
4518 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004519 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004520
4521 heap_configured = true;
4522 return true;
4523}
4524
4525
4526bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004527 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4528 FLAG_max_old_space_size * MB,
4529 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004530}
4531
4532
Ben Murdochbb769b22010-08-11 14:56:33 +01004533void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004534 *stats->start_marker = HeapStats::kStartMarker;
4535 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004536 *stats->new_space_size = new_space_.SizeAsInt();
4537 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004538 *stats->old_pointer_space_size = old_pointer_space_->Size();
4539 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4540 *stats->old_data_space_size = old_data_space_->Size();
4541 *stats->old_data_space_capacity = old_data_space_->Capacity();
4542 *stats->code_space_size = code_space_->Size();
4543 *stats->code_space_capacity = code_space_->Capacity();
4544 *stats->map_space_size = map_space_->Size();
4545 *stats->map_space_capacity = map_space_->Capacity();
4546 *stats->cell_space_size = cell_space_->Size();
4547 *stats->cell_space_capacity = cell_space_->Capacity();
4548 *stats->lo_space_size = lo_space_->Size();
4549 GlobalHandles::RecordStats(stats);
Ben Murdochbb769b22010-08-11 14:56:33 +01004550 *stats->memory_allocator_size = MemoryAllocator::Size();
4551 *stats->memory_allocator_capacity =
4552 MemoryAllocator::Size() + MemoryAllocator::Available();
Iain Merrick75681382010-08-19 15:07:18 +01004553 *stats->os_error = OS::GetLastError();
Ben Murdochbb769b22010-08-11 14:56:33 +01004554 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004555 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004556 for (HeapObject* obj = iterator.next();
4557 obj != NULL;
4558 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004559 InstanceType type = obj->map()->instance_type();
4560 ASSERT(0 <= type && type <= LAST_TYPE);
4561 stats->objects_per_type[type]++;
4562 stats->size_per_type[type] += obj->Size();
4563 }
4564 }
Steve Blockd0582a62009-12-15 09:54:21 +00004565}
4566
4567
Ben Murdochf87a2032010-10-22 12:50:53 +01004568intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004569 return old_pointer_space_->Size()
4570 + old_data_space_->Size()
4571 + code_space_->Size()
4572 + map_space_->Size()
4573 + cell_space_->Size()
4574 + lo_space_->Size();
4575}
4576
4577
4578int Heap::PromotedExternalMemorySize() {
4579 if (amount_of_external_allocated_memory_
4580 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4581 return amount_of_external_allocated_memory_
4582 - amount_of_external_allocated_memory_at_last_global_gc_;
4583}
4584
4585
4586bool Heap::Setup(bool create_heap_objects) {
4587 // Initialize heap spaces and initial maps and objects. Whenever something
4588 // goes wrong, just return false. The caller should check the results and
4589 // call Heap::TearDown() to release allocated memory.
4590 //
4591 // If the heap is not yet configured (eg, through the API), configure it.
4592 // Configuration is based on the flags new-space-size (really the semispace
4593 // size) and old-space-size if set or the initial values of semispace_size_
4594 // and old_generation_size_ otherwise.
4595 if (!heap_configured) {
4596 if (!ConfigureHeapDefault()) return false;
4597 }
4598
Iain Merrick75681382010-08-19 15:07:18 +01004599 ScavengingVisitor::Initialize();
4600 NewSpaceScavenger::Initialize();
4601 MarkCompactCollector::Initialize();
4602
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004603 MarkMapPointersAsEncoded(false);
4604
Steve Blocka7e24c12009-10-30 11:49:00 +00004605 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004606 // space. The chunk is double the size of the requested reserved
4607 // new space size to ensure that we can find a pair of semispaces that
4608 // are contiguous and aligned to their size.
Russell Brenner90bac252010-11-18 13:33:46 -08004609 if (!MemoryAllocator::Setup(MaxReserved(), MaxExecutableSize())) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004610 void* chunk =
Steve Block3ce2e202009-11-05 08:53:23 +00004611 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004612 if (chunk == NULL) return false;
4613
4614 // Align the pair of semispaces to their size, which must be a power
4615 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004616 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004617 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4618 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4619 return false;
4620 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004621
4622 // Initialize old pointer space.
4623 old_pointer_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004624 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004625 if (old_pointer_space_ == NULL) return false;
4626 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4627
4628 // Initialize old data space.
4629 old_data_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004630 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004631 if (old_data_space_ == NULL) return false;
4632 if (!old_data_space_->Setup(NULL, 0)) return false;
4633
4634 // Initialize the code space, set its maximum capacity to the old
4635 // generation size. It needs executable memory.
4636 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4637 // virtual address space, so that they can call each other with near calls.
4638 if (code_range_size_ > 0) {
4639 if (!CodeRange::Setup(code_range_size_)) {
4640 return false;
4641 }
4642 }
4643
4644 code_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004645 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004646 if (code_space_ == NULL) return false;
4647 if (!code_space_->Setup(NULL, 0)) return false;
4648
4649 // Initialize map space.
Leon Clarkee46be812010-01-19 14:06:41 +00004650 map_space_ = new MapSpace(FLAG_use_big_map_space
4651 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004652 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4653 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004654 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004655 if (map_space_ == NULL) return false;
4656 if (!map_space_->Setup(NULL, 0)) return false;
4657
4658 // Initialize global property cell space.
Steve Block3ce2e202009-11-05 08:53:23 +00004659 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004660 if (cell_space_ == NULL) return false;
4661 if (!cell_space_->Setup(NULL, 0)) return false;
4662
4663 // The large object code space may contain code or data. We set the memory
4664 // to be non-executable here for safety, but this means we need to enable it
4665 // explicitly when allocating large code objects.
4666 lo_space_ = new LargeObjectSpace(LO_SPACE);
4667 if (lo_space_ == NULL) return false;
4668 if (!lo_space_->Setup()) return false;
4669
4670 if (create_heap_objects) {
4671 // Create initial maps.
4672 if (!CreateInitialMaps()) return false;
4673 if (!CreateApiObjects()) return false;
4674
4675 // Create initial objects
4676 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004677
4678 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004679 }
4680
Ben Murdochf87a2032010-10-22 12:50:53 +01004681 LOG(IntPtrTEvent("heap-capacity", Capacity()));
4682 LOG(IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004683
Steve Block3ce2e202009-11-05 08:53:23 +00004684#ifdef ENABLE_LOGGING_AND_PROFILING
4685 // This should be called only after initial objects have been created.
4686 ProducerHeapProfile::Setup();
4687#endif
4688
Steve Blocka7e24c12009-10-30 11:49:00 +00004689 return true;
4690}
4691
4692
Steve Blockd0582a62009-12-15 09:54:21 +00004693void Heap::SetStackLimits() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004694 // On 64 bit machines, pointers are generally out of range of Smis. We write
4695 // something that looks like an out of range Smi to the GC.
4696
Steve Blockd0582a62009-12-15 09:54:21 +00004697 // Set up the special root array entries containing the stack limits.
4698 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004699 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004700 reinterpret_cast<Object*>(
4701 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
4702 roots_[kRealStackLimitRootIndex] =
4703 reinterpret_cast<Object*>(
4704 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004705}
4706
4707
4708void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004709 if (FLAG_print_cumulative_gc_stat) {
4710 PrintF("\n\n");
4711 PrintF("gc_count=%d ", gc_count_);
4712 PrintF("mark_sweep_count=%d ", ms_count_);
4713 PrintF("mark_compact_count=%d ", mc_count_);
4714 PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
4715 PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004716 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
4717 GCTracer::get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004718 PrintF("\n\n");
4719 }
4720
Steve Blocka7e24c12009-10-30 11:49:00 +00004721 GlobalHandles::TearDown();
4722
Leon Clarkee46be812010-01-19 14:06:41 +00004723 ExternalStringTable::TearDown();
4724
Steve Blocka7e24c12009-10-30 11:49:00 +00004725 new_space_.TearDown();
4726
4727 if (old_pointer_space_ != NULL) {
4728 old_pointer_space_->TearDown();
4729 delete old_pointer_space_;
4730 old_pointer_space_ = NULL;
4731 }
4732
4733 if (old_data_space_ != NULL) {
4734 old_data_space_->TearDown();
4735 delete old_data_space_;
4736 old_data_space_ = NULL;
4737 }
4738
4739 if (code_space_ != NULL) {
4740 code_space_->TearDown();
4741 delete code_space_;
4742 code_space_ = NULL;
4743 }
4744
4745 if (map_space_ != NULL) {
4746 map_space_->TearDown();
4747 delete map_space_;
4748 map_space_ = NULL;
4749 }
4750
4751 if (cell_space_ != NULL) {
4752 cell_space_->TearDown();
4753 delete cell_space_;
4754 cell_space_ = NULL;
4755 }
4756
4757 if (lo_space_ != NULL) {
4758 lo_space_->TearDown();
4759 delete lo_space_;
4760 lo_space_ = NULL;
4761 }
4762
4763 MemoryAllocator::TearDown();
4764}
4765
4766
4767void Heap::Shrink() {
4768 // Try to shrink all paged spaces.
4769 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004770 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
4771 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00004772}
4773
4774
4775#ifdef ENABLE_HEAP_PROTECTION
4776
4777void Heap::Protect() {
4778 if (HasBeenSetup()) {
4779 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004780 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4781 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004782 }
4783}
4784
4785
4786void Heap::Unprotect() {
4787 if (HasBeenSetup()) {
4788 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004789 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4790 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004791 }
4792}
4793
4794#endif
4795
4796
Steve Block6ded16b2010-05-10 14:33:55 +01004797void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
4798 ASSERT(callback != NULL);
4799 GCPrologueCallbackPair pair(callback, gc_type);
4800 ASSERT(!gc_prologue_callbacks_.Contains(pair));
4801 return gc_prologue_callbacks_.Add(pair);
4802}
4803
4804
4805void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
4806 ASSERT(callback != NULL);
4807 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
4808 if (gc_prologue_callbacks_[i].callback == callback) {
4809 gc_prologue_callbacks_.Remove(i);
4810 return;
4811 }
4812 }
4813 UNREACHABLE();
4814}
4815
4816
4817void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
4818 ASSERT(callback != NULL);
4819 GCEpilogueCallbackPair pair(callback, gc_type);
4820 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
4821 return gc_epilogue_callbacks_.Add(pair);
4822}
4823
4824
4825void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
4826 ASSERT(callback != NULL);
4827 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
4828 if (gc_epilogue_callbacks_[i].callback == callback) {
4829 gc_epilogue_callbacks_.Remove(i);
4830 return;
4831 }
4832 }
4833 UNREACHABLE();
4834}
4835
4836
Steve Blocka7e24c12009-10-30 11:49:00 +00004837#ifdef DEBUG
4838
4839class PrintHandleVisitor: public ObjectVisitor {
4840 public:
4841 void VisitPointers(Object** start, Object** end) {
4842 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01004843 PrintF(" handle %p to %p\n",
4844 reinterpret_cast<void*>(p),
4845 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00004846 }
4847};
4848
4849void Heap::PrintHandles() {
4850 PrintF("Handles:\n");
4851 PrintHandleVisitor v;
4852 HandleScopeImplementer::Iterate(&v);
4853}
4854
4855#endif
4856
4857
4858Space* AllSpaces::next() {
4859 switch (counter_++) {
4860 case NEW_SPACE:
4861 return Heap::new_space();
4862 case OLD_POINTER_SPACE:
4863 return Heap::old_pointer_space();
4864 case OLD_DATA_SPACE:
4865 return Heap::old_data_space();
4866 case CODE_SPACE:
4867 return Heap::code_space();
4868 case MAP_SPACE:
4869 return Heap::map_space();
4870 case CELL_SPACE:
4871 return Heap::cell_space();
4872 case LO_SPACE:
4873 return Heap::lo_space();
4874 default:
4875 return NULL;
4876 }
4877}
4878
4879
4880PagedSpace* PagedSpaces::next() {
4881 switch (counter_++) {
4882 case OLD_POINTER_SPACE:
4883 return Heap::old_pointer_space();
4884 case OLD_DATA_SPACE:
4885 return Heap::old_data_space();
4886 case CODE_SPACE:
4887 return Heap::code_space();
4888 case MAP_SPACE:
4889 return Heap::map_space();
4890 case CELL_SPACE:
4891 return Heap::cell_space();
4892 default:
4893 return NULL;
4894 }
4895}
4896
4897
4898
4899OldSpace* OldSpaces::next() {
4900 switch (counter_++) {
4901 case OLD_POINTER_SPACE:
4902 return Heap::old_pointer_space();
4903 case OLD_DATA_SPACE:
4904 return Heap::old_data_space();
4905 case CODE_SPACE:
4906 return Heap::code_space();
4907 default:
4908 return NULL;
4909 }
4910}
4911
4912
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004913SpaceIterator::SpaceIterator()
4914 : current_space_(FIRST_SPACE),
4915 iterator_(NULL),
4916 size_func_(NULL) {
4917}
4918
4919
4920SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
4921 : current_space_(FIRST_SPACE),
4922 iterator_(NULL),
4923 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004924}
4925
4926
4927SpaceIterator::~SpaceIterator() {
4928 // Delete active iterator if any.
4929 delete iterator_;
4930}
4931
4932
4933bool SpaceIterator::has_next() {
4934 // Iterate until no more spaces.
4935 return current_space_ != LAST_SPACE;
4936}
4937
4938
4939ObjectIterator* SpaceIterator::next() {
4940 if (iterator_ != NULL) {
4941 delete iterator_;
4942 iterator_ = NULL;
4943 // Move to the next space
4944 current_space_++;
4945 if (current_space_ > LAST_SPACE) {
4946 return NULL;
4947 }
4948 }
4949
4950 // Return iterator for the new current space.
4951 return CreateIterator();
4952}
4953
4954
4955// Create an iterator for the space to iterate.
4956ObjectIterator* SpaceIterator::CreateIterator() {
4957 ASSERT(iterator_ == NULL);
4958
4959 switch (current_space_) {
4960 case NEW_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004961 iterator_ = new SemiSpaceIterator(Heap::new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004962 break;
4963 case OLD_POINTER_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004964 iterator_ = new HeapObjectIterator(Heap::old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004965 break;
4966 case OLD_DATA_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004967 iterator_ = new HeapObjectIterator(Heap::old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004968 break;
4969 case CODE_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004970 iterator_ = new HeapObjectIterator(Heap::code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004971 break;
4972 case MAP_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004973 iterator_ = new HeapObjectIterator(Heap::map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004974 break;
4975 case CELL_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004976 iterator_ = new HeapObjectIterator(Heap::cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004977 break;
4978 case LO_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004979 iterator_ = new LargeObjectIterator(Heap::lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004980 break;
4981 }
4982
4983 // Return the newly allocated iterator;
4984 ASSERT(iterator_ != NULL);
4985 return iterator_;
4986}
4987
4988
Ben Murdochb0fe1622011-05-05 13:52:32 +01004989class HeapObjectsFilter {
4990 public:
4991 virtual ~HeapObjectsFilter() {}
4992 virtual bool SkipObject(HeapObject* object) = 0;
4993};
4994
4995
4996class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004997 public:
4998 FreeListNodesFilter() {
4999 MarkFreeListNodes();
5000 }
5001
Ben Murdochb0fe1622011-05-05 13:52:32 +01005002 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005003 if (object->IsMarked()) {
5004 object->ClearMark();
5005 return true;
5006 } else {
5007 return false;
5008 }
5009 }
5010
5011 private:
5012 void MarkFreeListNodes() {
5013 Heap::old_pointer_space()->MarkFreeListNodes();
5014 Heap::old_data_space()->MarkFreeListNodes();
5015 MarkCodeSpaceFreeListNodes();
5016 Heap::map_space()->MarkFreeListNodes();
5017 Heap::cell_space()->MarkFreeListNodes();
5018 }
5019
5020 void MarkCodeSpaceFreeListNodes() {
5021 // For code space, using FreeListNode::IsFreeListNode is OK.
5022 HeapObjectIterator iter(Heap::code_space());
5023 for (HeapObject* obj = iter.next_object();
5024 obj != NULL;
5025 obj = iter.next_object()) {
5026 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5027 }
5028 }
5029
5030 AssertNoAllocation no_alloc;
5031};
5032
5033
Ben Murdochb0fe1622011-05-05 13:52:32 +01005034class UnreachableObjectsFilter : public HeapObjectsFilter {
5035 public:
5036 UnreachableObjectsFilter() {
5037 MarkUnreachableObjects();
5038 }
5039
5040 bool SkipObject(HeapObject* object) {
5041 if (object->IsMarked()) {
5042 object->ClearMark();
5043 return true;
5044 } else {
5045 return false;
5046 }
5047 }
5048
5049 private:
5050 class UnmarkingVisitor : public ObjectVisitor {
5051 public:
5052 UnmarkingVisitor() : list_(10) {}
5053
5054 void VisitPointers(Object** start, Object** end) {
5055 for (Object** p = start; p < end; p++) {
5056 if (!(*p)->IsHeapObject()) continue;
5057 HeapObject* obj = HeapObject::cast(*p);
5058 if (obj->IsMarked()) {
5059 obj->ClearMark();
5060 list_.Add(obj);
5061 }
5062 }
5063 }
5064
5065 bool can_process() { return !list_.is_empty(); }
5066
5067 void ProcessNext() {
5068 HeapObject* obj = list_.RemoveLast();
5069 obj->Iterate(this);
5070 }
5071
5072 private:
5073 List<HeapObject*> list_;
5074 };
5075
5076 void MarkUnreachableObjects() {
5077 HeapIterator iterator;
5078 for (HeapObject* obj = iterator.next();
5079 obj != NULL;
5080 obj = iterator.next()) {
5081 obj->SetMark();
5082 }
5083 UnmarkingVisitor visitor;
Ben Murdochb8e0da22011-05-16 14:20:40 +01005084 Heap::IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005085 while (visitor.can_process())
5086 visitor.ProcessNext();
5087 }
5088
5089 AssertNoAllocation no_alloc;
5090};
5091
5092
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005093HeapIterator::HeapIterator()
5094 : filtering_(HeapIterator::kNoFiltering),
5095 filter_(NULL) {
5096 Init();
5097}
5098
5099
Ben Murdochb0fe1622011-05-05 13:52:32 +01005100HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005101 : filtering_(filtering),
5102 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005103 Init();
5104}
5105
5106
5107HeapIterator::~HeapIterator() {
5108 Shutdown();
5109}
5110
5111
5112void HeapIterator::Init() {
5113 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005114 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5115 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5116 switch (filtering_) {
5117 case kFilterFreeListNodes:
5118 filter_ = new FreeListNodesFilter;
5119 break;
5120 case kFilterUnreachable:
5121 filter_ = new UnreachableObjectsFilter;
5122 break;
5123 default:
5124 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005125 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005126 object_iterator_ = space_iterator_->next();
5127}
5128
5129
5130void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005131#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005132 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005133 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005134 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005135 ASSERT(object_iterator_ == NULL);
5136 }
5137#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005138 // Make sure the last iterator is deallocated.
5139 delete space_iterator_;
5140 space_iterator_ = NULL;
5141 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005142 delete filter_;
5143 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005144}
5145
5146
Leon Clarked91b9f72010-01-27 17:25:45 +00005147HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005148 if (filter_ == NULL) return NextObject();
5149
5150 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005151 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005152 return obj;
5153}
5154
5155
5156HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005157 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005158 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005159
Leon Clarked91b9f72010-01-27 17:25:45 +00005160 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005161 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005162 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005163 } else {
5164 // Go though the spaces looking for one that has objects.
5165 while (space_iterator_->has_next()) {
5166 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005167 if (HeapObject* obj = object_iterator_->next_object()) {
5168 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005169 }
5170 }
5171 }
5172 // Done with the last space.
5173 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005174 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005175}
5176
5177
5178void HeapIterator::reset() {
5179 // Restart the iterator.
5180 Shutdown();
5181 Init();
5182}
5183
5184
5185#ifdef DEBUG
5186
5187static bool search_for_any_global;
5188static Object* search_target;
5189static bool found_target;
5190static List<Object*> object_stack(20);
5191
5192
5193// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5194static const int kMarkTag = 2;
5195
5196static void MarkObjectRecursively(Object** p);
5197class MarkObjectVisitor : public ObjectVisitor {
5198 public:
5199 void VisitPointers(Object** start, Object** end) {
5200 // Copy all HeapObject pointers in [start, end)
5201 for (Object** p = start; p < end; p++) {
5202 if ((*p)->IsHeapObject())
5203 MarkObjectRecursively(p);
5204 }
5205 }
5206};
5207
5208static MarkObjectVisitor mark_visitor;
5209
5210static void MarkObjectRecursively(Object** p) {
5211 if (!(*p)->IsHeapObject()) return;
5212
5213 HeapObject* obj = HeapObject::cast(*p);
5214
5215 Object* map = obj->map();
5216
5217 if (!map->IsHeapObject()) return; // visited before
5218
5219 if (found_target) return; // stop if target found
5220 object_stack.Add(obj);
5221 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
5222 (!search_for_any_global && (obj == search_target))) {
5223 found_target = true;
5224 return;
5225 }
5226
5227 // not visited yet
5228 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5229
5230 Address map_addr = map_p->address();
5231
5232 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5233
5234 MarkObjectRecursively(&map);
5235
5236 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
5237 &mark_visitor);
5238
5239 if (!found_target) // don't pop if found the target
5240 object_stack.RemoveLast();
5241}
5242
5243
5244static void UnmarkObjectRecursively(Object** p);
5245class UnmarkObjectVisitor : public ObjectVisitor {
5246 public:
5247 void VisitPointers(Object** start, Object** end) {
5248 // Copy all HeapObject pointers in [start, end)
5249 for (Object** p = start; p < end; p++) {
5250 if ((*p)->IsHeapObject())
5251 UnmarkObjectRecursively(p);
5252 }
5253 }
5254};
5255
5256static UnmarkObjectVisitor unmark_visitor;
5257
5258static void UnmarkObjectRecursively(Object** p) {
5259 if (!(*p)->IsHeapObject()) return;
5260
5261 HeapObject* obj = HeapObject::cast(*p);
5262
5263 Object* map = obj->map();
5264
5265 if (map->IsHeapObject()) return; // unmarked already
5266
5267 Address map_addr = reinterpret_cast<Address>(map);
5268
5269 map_addr -= kMarkTag;
5270
5271 ASSERT_TAG_ALIGNED(map_addr);
5272
5273 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5274
5275 obj->set_map(reinterpret_cast<Map*>(map_p));
5276
5277 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
5278
5279 obj->IterateBody(Map::cast(map_p)->instance_type(),
5280 obj->SizeFromMap(Map::cast(map_p)),
5281 &unmark_visitor);
5282}
5283
5284
5285static void MarkRootObjectRecursively(Object** root) {
5286 if (search_for_any_global) {
5287 ASSERT(search_target == NULL);
5288 } else {
5289 ASSERT(search_target->IsHeapObject());
5290 }
5291 found_target = false;
5292 object_stack.Clear();
5293
5294 MarkObjectRecursively(root);
5295 UnmarkObjectRecursively(root);
5296
5297 if (found_target) {
5298 PrintF("=====================================\n");
5299 PrintF("==== Path to object ====\n");
5300 PrintF("=====================================\n\n");
5301
5302 ASSERT(!object_stack.is_empty());
5303 for (int i = 0; i < object_stack.length(); i++) {
5304 if (i > 0) PrintF("\n |\n |\n V\n\n");
5305 Object* obj = object_stack[i];
5306 obj->Print();
5307 }
5308 PrintF("=====================================\n");
5309 }
5310}
5311
5312
5313// Helper class for visiting HeapObjects recursively.
5314class MarkRootVisitor: public ObjectVisitor {
5315 public:
5316 void VisitPointers(Object** start, Object** end) {
5317 // Visit all HeapObject pointers in [start, end)
5318 for (Object** p = start; p < end; p++) {
5319 if ((*p)->IsHeapObject())
5320 MarkRootObjectRecursively(p);
5321 }
5322 }
5323};
5324
5325
5326// Triggers a depth-first traversal of reachable objects from roots
5327// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005328void Heap::TracePathToObject(Object* target) {
5329 search_target = target;
Steve Blocka7e24c12009-10-30 11:49:00 +00005330 search_for_any_global = false;
5331
5332 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005333 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005334}
5335
5336
5337// Triggers a depth-first traversal of reachable objects from roots
5338// and finds a path to any global object and prints it. Useful for
5339// determining the source for leaks of global objects.
5340void Heap::TracePathToGlobal() {
5341 search_target = NULL;
5342 search_for_any_global = true;
5343
5344 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005345 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005346}
5347#endif
5348
5349
Ben Murdochf87a2032010-10-22 12:50:53 +01005350static intptr_t CountTotalHolesSize() {
5351 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005352 OldSpaces spaces;
5353 for (OldSpace* space = spaces.next();
5354 space != NULL;
5355 space = spaces.next()) {
5356 holes_size += space->Waste() + space->AvailableFree();
5357 }
5358 return holes_size;
5359}
5360
5361
Steve Blocka7e24c12009-10-30 11:49:00 +00005362GCTracer::GCTracer()
5363 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005364 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005365 gc_count_(0),
5366 full_gc_count_(0),
5367 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005368 marked_count_(0),
5369 allocated_since_last_gc_(0),
5370 spent_in_mutator_(0),
5371 promoted_objects_size_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005372 // These two fields reflect the state of the previous full collection.
5373 // Set them before they are changed by the collector.
5374 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
5375 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005376 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005377 start_time_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005378 start_size_ = Heap::SizeOfObjects();
5379
5380 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5381 scopes_[i] = 0;
5382 }
5383
5384 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5385
5386 allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
5387
5388 if (last_gc_end_timestamp_ > 0) {
5389 spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
5390 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005391}
5392
5393
5394GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005395 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005396 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5397
5398 bool first_gc = (last_gc_end_timestamp_ == 0);
5399
5400 alive_after_last_gc_ = Heap::SizeOfObjects();
5401 last_gc_end_timestamp_ = OS::TimeCurrentMillis();
5402
5403 int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
5404
5405 // Update cumulative GC statistics if required.
5406 if (FLAG_print_cumulative_gc_stat) {
5407 max_gc_pause_ = Max(max_gc_pause_, time);
5408 max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
5409 if (!first_gc) {
5410 min_in_mutator_ = Min(min_in_mutator_,
5411 static_cast<int>(spent_in_mutator_));
5412 }
5413 }
5414
5415 if (!FLAG_trace_gc_nvp) {
5416 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5417
5418 PrintF("%s %.1f -> %.1f MB, ",
5419 CollectorString(),
5420 static_cast<double>(start_size_) / MB,
5421 SizeOfHeapObjects());
5422
5423 if (external_time > 0) PrintF("%d / ", external_time);
5424 PrintF("%d ms.\n", time);
5425 } else {
5426 PrintF("pause=%d ", time);
5427 PrintF("mutator=%d ",
5428 static_cast<int>(spent_in_mutator_));
5429
5430 PrintF("gc=");
5431 switch (collector_) {
5432 case SCAVENGER:
5433 PrintF("s");
5434 break;
5435 case MARK_COMPACTOR:
5436 PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
5437 break;
5438 default:
5439 UNREACHABLE();
5440 }
5441 PrintF(" ");
5442
5443 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5444 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5445 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005446 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005447 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5448
Ben Murdochf87a2032010-10-22 12:50:53 +01005449 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
5450 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
5451 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5452 in_free_list_or_wasted_before_gc_);
5453 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005454
Ben Murdochf87a2032010-10-22 12:50:53 +01005455 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5456 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005457
5458 PrintF("\n");
5459 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005460
5461#if defined(ENABLE_LOGGING_AND_PROFILING)
5462 Heap::PrintShortHeapStatistics();
5463#endif
5464}
5465
5466
5467const char* GCTracer::CollectorString() {
5468 switch (collector_) {
5469 case SCAVENGER:
5470 return "Scavenge";
5471 case MARK_COMPACTOR:
5472 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
5473 : "Mark-sweep";
5474 }
5475 return "Unknown GC";
5476}
5477
5478
5479int KeyedLookupCache::Hash(Map* map, String* name) {
5480 // Uses only lower 32 bits if pointers are larger.
5481 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005482 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005483 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005484}
5485
5486
5487int KeyedLookupCache::Lookup(Map* map, String* name) {
5488 int index = Hash(map, name);
5489 Key& key = keys_[index];
5490 if ((key.map == map) && key.name->Equals(name)) {
5491 return field_offsets_[index];
5492 }
5493 return -1;
5494}
5495
5496
5497void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5498 String* symbol;
5499 if (Heap::LookupSymbolIfExists(name, &symbol)) {
5500 int index = Hash(map, symbol);
5501 Key& key = keys_[index];
5502 key.map = map;
5503 key.name = symbol;
5504 field_offsets_[index] = field_offset;
5505 }
5506}
5507
5508
5509void KeyedLookupCache::Clear() {
5510 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5511}
5512
5513
5514KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
5515
5516
5517int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
5518
5519
5520void DescriptorLookupCache::Clear() {
5521 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5522}
5523
5524
5525DescriptorLookupCache::Key
5526DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
5527
5528int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
5529
5530
5531#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005532void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005533 ASSERT(FLAG_gc_greedy);
Ben Murdochf87a2032010-10-22 12:50:53 +01005534 if (Bootstrapper::IsActive()) return;
5535 if (disallow_allocation_failure()) return;
5536 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005537}
5538#endif
5539
5540
5541TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
5542 : type_(t) {
5543 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5544 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5545 for (int i = 0; i < kCacheSize; i++) {
5546 elements_[i].in[0] = in0;
5547 elements_[i].in[1] = in1;
5548 elements_[i].output = NULL;
5549 }
5550}
5551
5552
5553TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
5554
5555
5556void TranscendentalCache::Clear() {
5557 for (int i = 0; i < kNumberOfCaches; i++) {
5558 if (caches_[i] != NULL) {
5559 delete caches_[i];
5560 caches_[i] = NULL;
5561 }
5562 }
5563}
5564
5565
Leon Clarkee46be812010-01-19 14:06:41 +00005566void ExternalStringTable::CleanUp() {
5567 int last = 0;
5568 for (int i = 0; i < new_space_strings_.length(); ++i) {
5569 if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5570 if (Heap::InNewSpace(new_space_strings_[i])) {
5571 new_space_strings_[last++] = new_space_strings_[i];
5572 } else {
5573 old_space_strings_.Add(new_space_strings_[i]);
5574 }
5575 }
5576 new_space_strings_.Rewind(last);
5577 last = 0;
5578 for (int i = 0; i < old_space_strings_.length(); ++i) {
5579 if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5580 ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
5581 old_space_strings_[last++] = old_space_strings_[i];
5582 }
5583 old_space_strings_.Rewind(last);
5584 Verify();
5585}
5586
5587
5588void ExternalStringTable::TearDown() {
5589 new_space_strings_.Free();
5590 old_space_strings_.Free();
5591}
5592
5593
5594List<Object*> ExternalStringTable::new_space_strings_;
5595List<Object*> ExternalStringTable::old_space_strings_;
5596
Steve Blocka7e24c12009-10-30 11:49:00 +00005597} } // namespace v8::internal