blob: 134f40e5070cf80d41978653d7406120d28a58fa [file] [log] [blame]
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
38#include "mark-compact.h"
39#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010040#include "objects-visiting.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000041#include "scanner.h"
42#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000043#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044#include "v8threads.h"
Steve Block6ded16b2010-05-10 14:33:55 +010045#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000046#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000047#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000048#endif
49
Steve Block6ded16b2010-05-10 14:33:55 +010050
Steve Blocka7e24c12009-10-30 11:49:00 +000051namespace v8 {
52namespace internal {
53
54
55String* Heap::hidden_symbol_;
56Object* Heap::roots_[Heap::kRootListLength];
Ben Murdochf87a2032010-10-22 12:50:53 +010057Object* Heap::global_contexts_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +000058
John Reck59135872010-11-02 12:39:01 -070059
Steve Blocka7e24c12009-10-30 11:49:00 +000060NewSpace Heap::new_space_;
61OldSpace* Heap::old_pointer_space_ = NULL;
62OldSpace* Heap::old_data_space_ = NULL;
63OldSpace* Heap::code_space_ = NULL;
64MapSpace* Heap::map_space_ = NULL;
65CellSpace* Heap::cell_space_ = NULL;
66LargeObjectSpace* Heap::lo_space_ = NULL;
67
John Reck59135872010-11-02 12:39:01 -070068static const intptr_t kMinimumPromotionLimit = 2 * MB;
69static const intptr_t kMinimumAllocationLimit = 8 * MB;
70
Ben Murdochf87a2032010-10-22 12:50:53 +010071intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
72intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
Steve Blocka7e24c12009-10-30 11:49:00 +000073
74int Heap::old_gen_exhausted_ = false;
75
76int Heap::amount_of_external_allocated_memory_ = 0;
77int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
78
79// semispace_size_ should be a power of 2 and old_generation_size_ should be
80// a multiple of Page::kPageSize.
81#if defined(ANDROID)
Leon Clarked91b9f72010-01-27 17:25:45 +000082int Heap::max_semispace_size_ = 2*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010083intptr_t Heap::max_old_generation_size_ = 192*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000084int Heap::initial_semispace_size_ = 128*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010085intptr_t Heap::code_range_size_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -080086intptr_t Heap::max_executable_size_ = max_old_generation_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +000087#elif defined(V8_TARGET_ARCH_X64)
Steve Block3ce2e202009-11-05 08:53:23 +000088int Heap::max_semispace_size_ = 16*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010089intptr_t Heap::max_old_generation_size_ = 1*GB;
Steve Blocka7e24c12009-10-30 11:49:00 +000090int Heap::initial_semispace_size_ = 1*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010091intptr_t Heap::code_range_size_ = 512*MB;
Russell Brenner90bac252010-11-18 13:33:46 -080092intptr_t Heap::max_executable_size_ = 256*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000093#else
Steve Block3ce2e202009-11-05 08:53:23 +000094int Heap::max_semispace_size_ = 8*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010095intptr_t Heap::max_old_generation_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000096int Heap::initial_semispace_size_ = 512*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010097intptr_t Heap::code_range_size_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -080098intptr_t Heap::max_executable_size_ = 128*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000099#endif
100
Steve Block3ce2e202009-11-05 08:53:23 +0000101// The snapshot semispace size will be the default semispace size if
102// snapshotting is used and will be the requested semispace size as
103// set up by ConfigureHeap otherwise.
104int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
105
Steve Block6ded16b2010-05-10 14:33:55 +0100106List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
107List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
108
Steve Blocka7e24c12009-10-30 11:49:00 +0000109GCCallback Heap::global_gc_prologue_callback_ = NULL;
110GCCallback Heap::global_gc_epilogue_callback_ = NULL;
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100111HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000112
113// Variables set based on semispace_size_ and old_generation_size_ in
114// ConfigureHeap.
Steve Block3ce2e202009-11-05 08:53:23 +0000115
116// Will be 4 * reserved_semispace_size_ to ensure that young
117// generation can be aligned to its size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000118int Heap::survived_since_last_expansion_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100119intptr_t Heap::external_allocation_limit_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000120
121Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
122
123int Heap::mc_count_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100124int Heap::ms_count_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000125int Heap::gc_count_ = 0;
126
Leon Clarkef7060e22010-06-03 12:02:55 +0100127GCTracer* Heap::tracer_ = NULL;
128
Steve Block6ded16b2010-05-10 14:33:55 +0100129int Heap::unflattened_strings_length_ = 0;
130
Steve Blocka7e24c12009-10-30 11:49:00 +0000131int Heap::always_allocate_scope_depth_ = 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000132int Heap::linear_allocation_scope_depth_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100133int Heap::contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000134
Steve Block8defd9f2010-07-08 12:39:36 +0100135int Heap::young_survivors_after_last_gc_ = 0;
136int Heap::high_survival_rate_period_length_ = 0;
137double Heap::survival_rate_ = 0;
138Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
139Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
140
Steve Blocka7e24c12009-10-30 11:49:00 +0000141#ifdef DEBUG
142bool Heap::allocation_allowed_ = true;
143
144int Heap::allocation_timeout_ = 0;
145bool Heap::disallow_allocation_failure_ = false;
146#endif // DEBUG
147
Ben Murdochf87a2032010-10-22 12:50:53 +0100148intptr_t GCTracer::alive_after_last_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100149double GCTracer::last_gc_end_timestamp_ = 0.0;
150int GCTracer::max_gc_pause_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100151intptr_t GCTracer::max_alive_after_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100152int GCTracer::min_in_mutator_ = kMaxInt;
Steve Blocka7e24c12009-10-30 11:49:00 +0000153
Ben Murdochf87a2032010-10-22 12:50:53 +0100154intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000155 if (!HasBeenSetup()) return 0;
156
157 return new_space_.Capacity() +
158 old_pointer_space_->Capacity() +
159 old_data_space_->Capacity() +
160 code_space_->Capacity() +
161 map_space_->Capacity() +
162 cell_space_->Capacity();
163}
164
165
Ben Murdochf87a2032010-10-22 12:50:53 +0100166intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000167 if (!HasBeenSetup()) return 0;
168
169 return new_space_.CommittedMemory() +
170 old_pointer_space_->CommittedMemory() +
171 old_data_space_->CommittedMemory() +
172 code_space_->CommittedMemory() +
173 map_space_->CommittedMemory() +
174 cell_space_->CommittedMemory() +
175 lo_space_->Size();
176}
177
Russell Brenner90bac252010-11-18 13:33:46 -0800178intptr_t Heap::CommittedMemoryExecutable() {
179 if (!HasBeenSetup()) return 0;
180
181 return MemoryAllocator::SizeExecutable();
182}
183
Steve Block3ce2e202009-11-05 08:53:23 +0000184
Ben Murdochf87a2032010-10-22 12:50:53 +0100185intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000186 if (!HasBeenSetup()) return 0;
187
188 return new_space_.Available() +
189 old_pointer_space_->Available() +
190 old_data_space_->Available() +
191 code_space_->Available() +
192 map_space_->Available() +
193 cell_space_->Available();
194}
195
196
197bool Heap::HasBeenSetup() {
198 return old_pointer_space_ != NULL &&
199 old_data_space_ != NULL &&
200 code_space_ != NULL &&
201 map_space_ != NULL &&
202 cell_space_ != NULL &&
203 lo_space_ != NULL;
204}
205
206
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100207int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
208 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
209 ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
210 MapWord map_word = object->map_word();
211 map_word.ClearMark();
212 map_word.ClearOverflow();
213 return object->SizeFromMap(map_word.ToMap());
214}
215
216
217int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
218 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
219 ASSERT(MarkCompactCollector::are_map_pointers_encoded());
220 uint32_t marker = Memory::uint32_at(object->address());
221 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
222 return kIntSize;
223 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
224 return Memory::int_at(object->address() + kIntSize);
225 } else {
226 MapWord map_word = object->map_word();
227 Address map_address = map_word.DecodeMapAddress(Heap::map_space());
228 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
229 return object->SizeFromMap(map);
230 }
231}
232
233
Steve Blocka7e24c12009-10-30 11:49:00 +0000234GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
235 // Is global GC requested?
236 if (space != NEW_SPACE || FLAG_gc_global) {
237 Counters::gc_compactor_caused_by_request.Increment();
238 return MARK_COMPACTOR;
239 }
240
241 // Is enough data promoted to justify a global GC?
242 if (OldGenerationPromotionLimitReached()) {
243 Counters::gc_compactor_caused_by_promoted_data.Increment();
244 return MARK_COMPACTOR;
245 }
246
247 // Have allocation in OLD and LO failed?
248 if (old_gen_exhausted_) {
249 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
250 return MARK_COMPACTOR;
251 }
252
253 // Is there enough space left in OLD to guarantee that a scavenge can
254 // succeed?
255 //
256 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
257 // for object promotion. It counts only the bytes that the memory
258 // allocator has not yet allocated from the OS and assigned to any space,
259 // and does not count available bytes already in the old space or code
260 // space. Undercounting is safe---we may get an unrequested full GC when
261 // a scavenge would have succeeded.
262 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
263 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
264 return MARK_COMPACTOR;
265 }
266
267 // Default
268 return SCAVENGER;
269}
270
271
272// TODO(1238405): Combine the infrastructure for --heap-stats and
273// --log-gc to avoid the complicated preprocessor and flag testing.
274#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
275void Heap::ReportStatisticsBeforeGC() {
276 // Heap::ReportHeapStatistics will also log NewSpace statistics when
277 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
278 // following logic is used to avoid double logging.
279#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
280 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
281 if (FLAG_heap_stats) {
282 ReportHeapStatistics("Before GC");
283 } else if (FLAG_log_gc) {
284 new_space_.ReportStatistics();
285 }
286 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
287#elif defined(DEBUG)
288 if (FLAG_heap_stats) {
289 new_space_.CollectStatistics();
290 ReportHeapStatistics("Before GC");
291 new_space_.ClearHistograms();
292 }
293#elif defined(ENABLE_LOGGING_AND_PROFILING)
294 if (FLAG_log_gc) {
295 new_space_.CollectStatistics();
296 new_space_.ReportStatistics();
297 new_space_.ClearHistograms();
298 }
299#endif
300}
301
302
303#if defined(ENABLE_LOGGING_AND_PROFILING)
304void Heap::PrintShortHeapStatistics() {
305 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100306 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
307 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000308 MemoryAllocator::Size(),
309 MemoryAllocator::Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100310 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
311 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000312 Heap::new_space_.Size(),
313 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100314 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
315 ", available: %8" V8_PTR_PREFIX "d"
316 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000317 old_pointer_space_->Size(),
318 old_pointer_space_->Available(),
319 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100320 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
321 ", available: %8" V8_PTR_PREFIX "d"
322 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000323 old_data_space_->Size(),
324 old_data_space_->Available(),
325 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100326 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
327 ", available: %8" V8_PTR_PREFIX "d"
328 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000329 code_space_->Size(),
330 code_space_->Available(),
331 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100332 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
333 ", available: %8" V8_PTR_PREFIX "d"
334 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000335 map_space_->Size(),
336 map_space_->Available(),
337 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100338 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
339 ", available: %8" V8_PTR_PREFIX "d"
340 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000341 cell_space_->Size(),
342 cell_space_->Available(),
343 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100344 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
345 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000346 lo_space_->Size(),
347 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000348}
349#endif
350
351
352// TODO(1238405): Combine the infrastructure for --heap-stats and
353// --log-gc to avoid the complicated preprocessor and flag testing.
354void Heap::ReportStatisticsAfterGC() {
355 // Similar to the before GC, we use some complicated logic to ensure that
356 // NewSpace statistics are logged exactly once when --log-gc is turned on.
357#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
358 if (FLAG_heap_stats) {
359 new_space_.CollectStatistics();
360 ReportHeapStatistics("After GC");
361 } else if (FLAG_log_gc) {
362 new_space_.ReportStatistics();
363 }
364#elif defined(DEBUG)
365 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
366#elif defined(ENABLE_LOGGING_AND_PROFILING)
367 if (FLAG_log_gc) new_space_.ReportStatistics();
368#endif
369}
370#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
371
372
373void Heap::GarbageCollectionPrologue() {
374 TranscendentalCache::Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100375 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000376 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100377 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000378#ifdef DEBUG
379 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
380 allow_allocation(false);
381
382 if (FLAG_verify_heap) {
383 Verify();
384 }
385
386 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000387#endif
388
389#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
390 ReportStatisticsBeforeGC();
391#endif
392}
393
Ben Murdochf87a2032010-10-22 12:50:53 +0100394intptr_t Heap::SizeOfObjects() {
395 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000396 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000397 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000398 total += space->Size();
399 }
400 return total;
401}
402
403void Heap::GarbageCollectionEpilogue() {
404#ifdef DEBUG
405 allow_allocation(true);
406 ZapFromSpace();
407
408 if (FLAG_verify_heap) {
409 Verify();
410 }
411
412 if (FLAG_print_global_handles) GlobalHandles::Print();
413 if (FLAG_print_handles) PrintHandles();
414 if (FLAG_gc_verbose) Print();
415 if (FLAG_code_stats) ReportCodeStatistics("After GC");
416#endif
417
Ben Murdochf87a2032010-10-22 12:50:53 +0100418 Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000419
420 Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
421 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
422#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
423 ReportStatisticsAfterGC();
424#endif
425#ifdef ENABLE_DEBUGGER_SUPPORT
426 Debug::AfterGarbageCollection();
427#endif
428}
429
430
John Reck59135872010-11-02 12:39:01 -0700431void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000432 // Since we are ignoring the return value, the exact choice of space does
433 // not matter, so long as we do not specify NEW_SPACE, which would not
434 // cause a full GC.
435 MarkCompactCollector::SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700436 CollectGarbage(OLD_POINTER_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +0000437 MarkCompactCollector::SetForceCompaction(false);
438}
439
440
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800441void Heap::CollectAllAvailableGarbage() {
442 // Since we are ignoring the return value, the exact choice of space does
443 // not matter, so long as we do not specify NEW_SPACE, which would not
444 // cause a full GC.
445 MarkCompactCollector::SetForceCompaction(true);
446
447 // Major GC would invoke weak handle callbacks on weakly reachable
448 // handles, but won't collect weakly reachable objects until next
449 // major GC. Therefore if we collect aggressively and weak handle callback
450 // has been invoked, we rerun major GC to release objects which become
451 // garbage.
452 // Note: as weak callbacks can execute arbitrary code, we cannot
453 // hope that eventually there will be no weak callbacks invocations.
454 // Therefore stop recollecting after several attempts.
455 const int kMaxNumberOfAttempts = 7;
456 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
457 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
458 break;
459 }
460 }
461 MarkCompactCollector::SetForceCompaction(false);
462}
463
464
465bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000466 // The VM is in the GC state until exiting this function.
467 VMState state(GC);
468
469#ifdef DEBUG
470 // Reset the allocation timeout to the GC interval, but make sure to
471 // allow at least a few allocations after a collection. The reason
472 // for this is that we have a lot of allocation sequences and we
473 // assume that a garbage collection will allow the subsequent
474 // allocation attempts to go through.
475 allocation_timeout_ = Max(6, FLAG_gc_interval);
476#endif
477
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800478 bool next_gc_likely_to_collect_more = false;
479
Steve Blocka7e24c12009-10-30 11:49:00 +0000480 { GCTracer tracer;
481 GarbageCollectionPrologue();
482 // The GC count was incremented in the prologue. Tell the tracer about
483 // it.
484 tracer.set_gc_count(gc_count_);
485
Steve Blocka7e24c12009-10-30 11:49:00 +0000486 // Tell the tracer which collector we've selected.
487 tracer.set_collector(collector);
488
489 HistogramTimer* rate = (collector == SCAVENGER)
490 ? &Counters::gc_scavenger
491 : &Counters::gc_compactor;
492 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800493 next_gc_likely_to_collect_more =
494 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000495 rate->Stop();
496
497 GarbageCollectionEpilogue();
498 }
499
500
501#ifdef ENABLE_LOGGING_AND_PROFILING
502 if (FLAG_log_gc) HeapProfiler::WriteSample();
Ben Murdochf87a2032010-10-22 12:50:53 +0100503 if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
Steve Blocka7e24c12009-10-30 11:49:00 +0000504#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800505
506 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000507}
508
509
510void Heap::PerformScavenge() {
511 GCTracer tracer;
John Reck59135872010-11-02 12:39:01 -0700512 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000513}
514
515
516#ifdef DEBUG
517// Helper class for verifying the symbol table.
518class SymbolTableVerifier : public ObjectVisitor {
519 public:
520 SymbolTableVerifier() { }
521 void VisitPointers(Object** start, Object** end) {
522 // Visit all HeapObject pointers in [start, end).
523 for (Object** p = start; p < end; p++) {
524 if ((*p)->IsHeapObject()) {
525 // Check that the symbol is actually a symbol.
526 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
527 }
528 }
529 }
530};
531#endif // DEBUG
532
533
534static void VerifySymbolTable() {
535#ifdef DEBUG
536 SymbolTableVerifier verifier;
537 Heap::symbol_table()->IterateElements(&verifier);
538#endif // DEBUG
539}
540
541
Leon Clarkee46be812010-01-19 14:06:41 +0000542void Heap::ReserveSpace(
543 int new_space_size,
544 int pointer_space_size,
545 int data_space_size,
546 int code_space_size,
547 int map_space_size,
548 int cell_space_size,
549 int large_object_size) {
550 NewSpace* new_space = Heap::new_space();
551 PagedSpace* old_pointer_space = Heap::old_pointer_space();
552 PagedSpace* old_data_space = Heap::old_data_space();
553 PagedSpace* code_space = Heap::code_space();
554 PagedSpace* map_space = Heap::map_space();
555 PagedSpace* cell_space = Heap::cell_space();
556 LargeObjectSpace* lo_space = Heap::lo_space();
557 bool gc_performed = true;
558 while (gc_performed) {
559 gc_performed = false;
560 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100561 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000562 gc_performed = true;
563 }
564 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100565 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000566 gc_performed = true;
567 }
568 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100569 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000570 gc_performed = true;
571 }
572 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100573 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000574 gc_performed = true;
575 }
576 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100577 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000578 gc_performed = true;
579 }
580 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100581 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000582 gc_performed = true;
583 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100584 // We add a slack-factor of 2 in order to have space for a series of
585 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000586 large_object_size *= 2;
587 // The ReserveSpace method on the large object space checks how much
588 // we can expand the old generation. This includes expansion caused by
589 // allocation in the other spaces.
590 large_object_size += cell_space_size + map_space_size + code_space_size +
591 data_space_size + pointer_space_size;
592 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100593 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000594 gc_performed = true;
595 }
596 }
597}
598
599
Steve Blocka7e24c12009-10-30 11:49:00 +0000600void Heap::EnsureFromSpaceIsCommitted() {
601 if (new_space_.CommitFromSpaceIfNeeded()) return;
602
603 // Committing memory to from space failed.
604 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100605 PagedSpaces spaces;
606 for (PagedSpace* space = spaces.next();
607 space != NULL;
608 space = spaces.next()) {
609 space->RelinkPageListInChunkOrder(true);
610 }
611
Steve Blocka7e24c12009-10-30 11:49:00 +0000612 Shrink();
613 if (new_space_.CommitFromSpaceIfNeeded()) return;
614
615 // Committing memory to from space failed again.
616 // Memory is exhausted and we will die.
617 V8::FatalProcessOutOfMemory("Committing semi space failed.");
618}
619
620
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800621void Heap::ClearJSFunctionResultCaches() {
622 if (Bootstrapper::IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100623
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800624 Object* context = global_contexts_list_;
625 while (!context->IsUndefined()) {
626 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100627 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800628 Context::cast(context)->jsfunction_result_caches();
629 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100630 int length = caches->length();
631 for (int i = 0; i < length; i++) {
632 JSFunctionResultCache::cast(caches->get(i))->Clear();
633 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800634 // Get the next context:
635 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100636 }
Steve Block6ded16b2010-05-10 14:33:55 +0100637}
638
639
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100640void Heap::ClearNormalizedMapCaches() {
641 if (Bootstrapper::IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100642
643 Object* context = global_contexts_list_;
644 while (!context->IsUndefined()) {
645 Context::cast(context)->normalized_map_cache()->Clear();
646 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
647 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100648}
649
650
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100651#ifdef DEBUG
652
653enum PageWatermarkValidity {
654 ALL_VALID,
655 ALL_INVALID
656};
657
658static void VerifyPageWatermarkValidity(PagedSpace* space,
659 PageWatermarkValidity validity) {
660 PageIterator it(space, PageIterator::PAGES_IN_USE);
661 bool expected_value = (validity == ALL_VALID);
662 while (it.has_next()) {
663 Page* page = it.next();
664 ASSERT(page->IsWatermarkValid() == expected_value);
665 }
666}
667#endif
668
Steve Block8defd9f2010-07-08 12:39:36 +0100669void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
670 double survival_rate =
671 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
672 start_new_space_size;
673
674 if (survival_rate > kYoungSurvivalRateThreshold) {
675 high_survival_rate_period_length_++;
676 } else {
677 high_survival_rate_period_length_ = 0;
678 }
679
680 double survival_rate_diff = survival_rate_ - survival_rate;
681
682 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
683 set_survival_rate_trend(DECREASING);
684 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
685 set_survival_rate_trend(INCREASING);
686 } else {
687 set_survival_rate_trend(STABLE);
688 }
689
690 survival_rate_ = survival_rate;
691}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100692
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800693bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700694 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800695 bool next_gc_likely_to_collect_more = false;
696
Ben Murdochf87a2032010-10-22 12:50:53 +0100697 if (collector != SCAVENGER) {
698 PROFILE(CodeMovingGCEvent());
699 }
700
Steve Blocka7e24c12009-10-30 11:49:00 +0000701 VerifySymbolTable();
702 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
703 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100704 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000705 global_gc_prologue_callback_();
706 }
Steve Block6ded16b2010-05-10 14:33:55 +0100707
708 GCType gc_type =
709 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
710
711 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
712 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
713 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
714 }
715 }
716
Steve Blocka7e24c12009-10-30 11:49:00 +0000717 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100718
Ben Murdochf87a2032010-10-22 12:50:53 +0100719 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100720
Steve Blocka7e24c12009-10-30 11:49:00 +0000721 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100722 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000723 MarkCompact(tracer);
724
Steve Block8defd9f2010-07-08 12:39:36 +0100725 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
726 IsStableOrIncreasingSurvivalTrend();
727
728 UpdateSurvivalRateTrend(start_new_space_size);
729
John Reck59135872010-11-02 12:39:01 -0700730 intptr_t old_gen_size = PromotedSpaceSize();
731 old_gen_promotion_limit_ =
732 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
733 old_gen_allocation_limit_ =
734 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100735
John Reck59135872010-11-02 12:39:01 -0700736 if (high_survival_rate_during_scavenges &&
737 IsStableOrIncreasingSurvivalTrend()) {
738 // Stable high survival rates of young objects both during partial and
739 // full collection indicate that mutator is either building or modifying
740 // a structure with a long lifetime.
741 // In this case we aggressively raise old generation memory limits to
742 // postpone subsequent mark-sweep collection and thus trade memory
743 // space for the mutation speed.
744 old_gen_promotion_limit_ *= 2;
745 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100746 }
747
John Reck59135872010-11-02 12:39:01 -0700748 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100749 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100750 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100751 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100752 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100753
754 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000755 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000756
757 Counters::objs_since_last_young.Set(0);
758
John Reck59135872010-11-02 12:39:01 -0700759 if (collector == MARK_COMPACTOR) {
760 DisableAssertNoAllocation allow_allocation;
761 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800762 next_gc_likely_to_collect_more =
763 GlobalHandles::PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700764 }
765
Steve Block3ce2e202009-11-05 08:53:23 +0000766 // Update relocatables.
767 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000768
769 if (collector == MARK_COMPACTOR) {
770 // Register the amount of external allocated memory.
771 amount_of_external_allocated_memory_at_last_global_gc_ =
772 amount_of_external_allocated_memory_;
773 }
774
Steve Block6ded16b2010-05-10 14:33:55 +0100775 GCCallbackFlags callback_flags = tracer->is_compacting()
776 ? kGCCallbackFlagCompacted
777 : kNoGCCallbackFlags;
778 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
779 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
780 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
781 }
782 }
783
Steve Blocka7e24c12009-10-30 11:49:00 +0000784 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
785 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100786 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000787 global_gc_epilogue_callback_();
788 }
789 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800790
791 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000792}
793
794
Steve Blocka7e24c12009-10-30 11:49:00 +0000795void Heap::MarkCompact(GCTracer* tracer) {
796 gc_state_ = MARK_COMPACT;
Steve Blocka7e24c12009-10-30 11:49:00 +0000797 LOG(ResourceEvent("markcompact", "begin"));
798
799 MarkCompactCollector::Prepare(tracer);
800
801 bool is_compacting = MarkCompactCollector::IsCompacting();
802
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100803 if (is_compacting) {
804 mc_count_++;
805 } else {
806 ms_count_++;
807 }
808 tracer->set_full_gc_count(mc_count_ + ms_count_);
809
Steve Blocka7e24c12009-10-30 11:49:00 +0000810 MarkCompactPrologue(is_compacting);
811
812 MarkCompactCollector::CollectGarbage();
813
Steve Blocka7e24c12009-10-30 11:49:00 +0000814 LOG(ResourceEvent("markcompact", "end"));
815
816 gc_state_ = NOT_IN_GC;
817
818 Shrink();
819
820 Counters::objs_since_last_full.Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100821
822 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000823}
824
825
826void Heap::MarkCompactPrologue(bool is_compacting) {
827 // At any old GC clear the keyed lookup cache to enable collection of unused
828 // maps.
829 KeyedLookupCache::Clear();
830 ContextSlotCache::Clear();
831 DescriptorLookupCache::Clear();
832
833 CompilationCache::MarkCompactPrologue();
834
Kristian Monsen25f61362010-05-21 11:50:48 +0100835 CompletelyClearInstanceofCache();
836
Leon Clarkee46be812010-01-19 14:06:41 +0000837 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000838
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100839 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000840}
841
842
843Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700844 Object* obj = NULL; // Initialization to please compiler.
845 { MaybeObject* maybe_obj = code_space_->FindObject(a);
846 if (!maybe_obj->ToObject(&obj)) {
847 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
848 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000849 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000850 return obj;
851}
852
853
854// Helper class for copying HeapObjects
855class ScavengeVisitor: public ObjectVisitor {
856 public:
857
858 void VisitPointer(Object** p) { ScavengePointer(p); }
859
860 void VisitPointers(Object** start, Object** end) {
861 // Copy all HeapObject pointers in [start, end)
862 for (Object** p = start; p < end; p++) ScavengePointer(p);
863 }
864
865 private:
866 void ScavengePointer(Object** p) {
867 Object* object = *p;
868 if (!Heap::InNewSpace(object)) return;
869 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
870 reinterpret_cast<HeapObject*>(object));
871 }
872};
873
874
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100875// A queue of objects promoted during scavenge. Each object is accompanied
876// by it's size to avoid dereferencing a map pointer for scanning.
Steve Blocka7e24c12009-10-30 11:49:00 +0000877class PromotionQueue {
878 public:
879 void Initialize(Address start_address) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100880 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000881 }
882
883 bool is_empty() { return front_ <= rear_; }
884
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100885 void insert(HeapObject* target, int size) {
886 *(--rear_) = reinterpret_cast<intptr_t>(target);
887 *(--rear_) = size;
Steve Blocka7e24c12009-10-30 11:49:00 +0000888 // Assert no overflow into live objects.
889 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
890 }
891
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100892 void remove(HeapObject** target, int* size) {
893 *target = reinterpret_cast<HeapObject*>(*(--front_));
894 *size = static_cast<int>(*(--front_));
Steve Blocka7e24c12009-10-30 11:49:00 +0000895 // Assert no underflow.
896 ASSERT(front_ >= rear_);
897 }
898
899 private:
900 // The front of the queue is higher in memory than the rear.
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100901 intptr_t* front_;
902 intptr_t* rear_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000903};
904
905
906// Shared state read by the scavenge collector and set by ScavengeObject.
907static PromotionQueue promotion_queue;
908
909
910#ifdef DEBUG
911// Visitor class to verify pointers in code or data space do not point into
912// new space.
913class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
914 public:
915 void VisitPointers(Object** start, Object**end) {
916 for (Object** current = start; current < end; current++) {
917 if ((*current)->IsHeapObject()) {
918 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
919 }
920 }
921 }
922};
923
924
925static void VerifyNonPointerSpacePointers() {
926 // Verify that there are no pointers to new space in spaces where we
927 // do not expect them.
928 VerifyNonPointerSpacePointersVisitor v;
929 HeapObjectIterator code_it(Heap::code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000930 for (HeapObject* object = code_it.next();
931 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000932 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000933
934 HeapObjectIterator data_it(Heap::old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000935 for (HeapObject* object = data_it.next();
936 object != NULL; object = data_it.next())
937 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000938}
939#endif
940
941
Steve Block6ded16b2010-05-10 14:33:55 +0100942void Heap::CheckNewSpaceExpansionCriteria() {
943 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
944 survived_since_last_expansion_ > new_space_.Capacity()) {
945 // Grow the size of new space if there is room to grow and enough
946 // data has survived scavenge since the last expansion.
947 new_space_.Grow();
948 survived_since_last_expansion_ = 0;
949 }
950}
951
952
Steve Blocka7e24c12009-10-30 11:49:00 +0000953void Heap::Scavenge() {
954#ifdef DEBUG
955 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
956#endif
957
958 gc_state_ = SCAVENGE;
959
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100960 Page::FlipMeaningOfInvalidatedWatermarkFlag();
961#ifdef DEBUG
962 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
963 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
964#endif
965
966 // We do not update an allocation watermark of the top page during linear
967 // allocation to avoid overhead. So to maintain the watermark invariant
968 // we have to manually cache the watermark and mark the top page as having an
969 // invalid watermark. This guarantees that dirty regions iteration will use a
970 // correct watermark even if a linear allocation happens.
971 old_pointer_space_->FlushTopPageWatermark();
972 map_space_->FlushTopPageWatermark();
973
Steve Blocka7e24c12009-10-30 11:49:00 +0000974 // Implements Cheney's copying algorithm
975 LOG(ResourceEvent("scavenge", "begin"));
976
977 // Clear descriptor cache.
978 DescriptorLookupCache::Clear();
979
980 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100981 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000982
Steve Block6ded16b2010-05-10 14:33:55 +0100983 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000984
985 // Flip the semispaces. After flipping, to space is empty, from space has
986 // live objects.
987 new_space_.Flip();
988 new_space_.ResetAllocationInfo();
989
990 // We need to sweep newly copied objects which can be either in the
991 // to space or promoted to the old generation. For to-space
992 // objects, we treat the bottom of the to space as a queue. Newly
993 // copied and unswept objects lie between a 'front' mark and the
994 // allocation pointer.
995 //
996 // Promoted objects can go into various old-generation spaces, and
997 // can be allocated internally in the spaces (from the free list).
998 // We treat the top of the to space as a queue of addresses of
999 // promoted objects. The addresses of newly promoted and unswept
1000 // objects lie between a 'front' mark and a 'rear' mark that is
1001 // updated as a side effect of promoting an object.
1002 //
1003 // There is guaranteed to be enough room at the top of the to space
1004 // for the addresses of promoted objects: every object promoted
1005 // frees up its size in bytes from the top of the new space, and
1006 // objects are at least one pointer in size.
1007 Address new_space_front = new_space_.ToSpaceLow();
1008 promotion_queue.Initialize(new_space_.ToSpaceHigh());
1009
1010 ScavengeVisitor scavenge_visitor;
1011 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001012 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001013
1014 // Copy objects reachable from the old generation. By definition,
1015 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001016 IterateDirtyRegions(old_pointer_space_,
1017 &IteratePointersInDirtyRegion,
1018 &ScavengePointer,
1019 WATERMARK_CAN_BE_INVALID);
1020
1021 IterateDirtyRegions(map_space_,
1022 &IteratePointersInDirtyMapsRegion,
1023 &ScavengePointer,
1024 WATERMARK_CAN_BE_INVALID);
1025
1026 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001027
1028 // Copy objects reachable from cells by scavenging cell values directly.
1029 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001030 for (HeapObject* cell = cell_iterator.next();
1031 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001032 if (cell->IsJSGlobalPropertyCell()) {
1033 Address value_address =
1034 reinterpret_cast<Address>(cell) +
1035 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1036 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1037 }
1038 }
1039
Ben Murdochf87a2032010-10-22 12:50:53 +01001040 // Scavenge object reachable from the global contexts list directly.
1041 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1042
Leon Clarkee46be812010-01-19 14:06:41 +00001043 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1044
Steve Block6ded16b2010-05-10 14:33:55 +01001045 UpdateNewSpaceReferencesInExternalStringTable(
1046 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1047
Leon Clarkee46be812010-01-19 14:06:41 +00001048 ASSERT(new_space_front == new_space_.top());
1049
1050 // Set age mark.
1051 new_space_.set_age_mark(new_space_.top());
1052
1053 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001054 IncrementYoungSurvivorsCounter(static_cast<int>(
1055 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001056
1057 LOG(ResourceEvent("scavenge", "end"));
1058
1059 gc_state_ = NOT_IN_GC;
1060}
1061
1062
Steve Block6ded16b2010-05-10 14:33:55 +01001063String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
1064 MapWord first_word = HeapObject::cast(*p)->map_word();
1065
1066 if (!first_word.IsForwardingAddress()) {
1067 // Unreachable external string can be finalized.
1068 FinalizeExternalString(String::cast(*p));
1069 return NULL;
1070 }
1071
1072 // String is still reachable.
1073 return String::cast(first_word.ToForwardingAddress());
1074}
1075
1076
1077void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1078 ExternalStringTableUpdaterCallback updater_func) {
Leon Clarkee46be812010-01-19 14:06:41 +00001079 ExternalStringTable::Verify();
1080
1081 if (ExternalStringTable::new_space_strings_.is_empty()) return;
1082
1083 Object** start = &ExternalStringTable::new_space_strings_[0];
1084 Object** end = start + ExternalStringTable::new_space_strings_.length();
1085 Object** last = start;
1086
1087 for (Object** p = start; p < end; ++p) {
1088 ASSERT(Heap::InFromSpace(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001089 String* target = updater_func(p);
Leon Clarkee46be812010-01-19 14:06:41 +00001090
Steve Block6ded16b2010-05-10 14:33:55 +01001091 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001092
Leon Clarkee46be812010-01-19 14:06:41 +00001093 ASSERT(target->IsExternalString());
1094
1095 if (Heap::InNewSpace(target)) {
1096 // String is still in new space. Update the table entry.
1097 *last = target;
1098 ++last;
1099 } else {
1100 // String got promoted. Move it to the old string list.
1101 ExternalStringTable::AddOldString(target);
1102 }
1103 }
1104
1105 ASSERT(last <= end);
1106 ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
1107}
1108
1109
Ben Murdochf87a2032010-10-22 12:50:53 +01001110void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1111 Object* head = undefined_value();
1112 Context* tail = NULL;
1113 Object* candidate = global_contexts_list_;
1114 while (!candidate->IsUndefined()) {
1115 // Check whether to keep the candidate in the list.
1116 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1117 Object* retain = retainer->RetainAs(candidate);
1118 if (retain != NULL) {
1119 if (head->IsUndefined()) {
1120 // First element in the list.
1121 head = candidate_context;
1122 } else {
1123 // Subsequent elements in the list.
1124 ASSERT(tail != NULL);
1125 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1126 candidate_context,
1127 UPDATE_WRITE_BARRIER);
1128 }
1129 // Retained context is new tail.
1130 tail = candidate_context;
1131 }
1132 // Move to next element in the list.
1133 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1134 }
1135
1136 // Terminate the list if there is one or more elements.
1137 if (tail != NULL) {
1138 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1139 Heap::undefined_value(),
1140 UPDATE_WRITE_BARRIER);
1141 }
1142
1143 // Update the head of the list of contexts.
1144 Heap::global_contexts_list_ = head;
1145}
1146
1147
Iain Merrick75681382010-08-19 15:07:18 +01001148class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1149 public:
1150 static inline void VisitPointer(Object** p) {
1151 Object* object = *p;
1152 if (!Heap::InNewSpace(object)) return;
1153 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1154 reinterpret_cast<HeapObject*>(object));
1155 }
1156};
1157
1158
Leon Clarkee46be812010-01-19 14:06:41 +00001159Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1160 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001161 do {
1162 ASSERT(new_space_front <= new_space_.top());
1163
1164 // The addresses new_space_front and new_space_.top() define a
1165 // queue of unprocessed copied objects. Process them until the
1166 // queue is empty.
1167 while (new_space_front < new_space_.top()) {
1168 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001169 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001170 }
1171
1172 // Promote and process all the to-be-promoted objects.
1173 while (!promotion_queue.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001174 HeapObject* target;
1175 int size;
1176 promotion_queue.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001177
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001178 // Promoted object might be already partially visited
1179 // during dirty regions iteration. Thus we search specificly
1180 // for pointers to from semispace instead of looking for pointers
1181 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001182 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001183 IterateAndMarkPointersToFromSpace(target->address(),
1184 target->address() + size,
1185 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001186 }
1187
1188 // Take another spin if there are now unswept objects in new space
1189 // (there are currently no more unswept promoted objects).
1190 } while (new_space_front < new_space_.top());
1191
Leon Clarkee46be812010-01-19 14:06:41 +00001192 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001193}
1194
1195
Iain Merrick75681382010-08-19 15:07:18 +01001196class ScavengingVisitor : public StaticVisitorBase {
1197 public:
1198 static void Initialize() {
1199 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1200 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1201 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1202 table_.Register(kVisitByteArray, &EvacuateByteArray);
1203 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdochf87a2032010-10-22 12:50:53 +01001204 table_.Register(kVisitGlobalContext,
1205 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1206 VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001207
1208 typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
1209
1210 table_.Register(kVisitConsString,
1211 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1212 VisitSpecialized<ConsString::kSize>);
1213
1214 table_.Register(kVisitSharedFunctionInfo,
1215 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1216 VisitSpecialized<SharedFunctionInfo::kSize>);
1217
1218 table_.Register(kVisitJSFunction,
1219 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1220 VisitSpecialized<JSFunction::kSize>);
1221
1222 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1223 kVisitDataObject,
1224 kVisitDataObjectGeneric>();
1225
1226 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1227 kVisitJSObject,
1228 kVisitJSObjectGeneric>();
1229
1230 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1231 kVisitStruct,
1232 kVisitStructGeneric>();
1233 }
1234
1235
1236 static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
1237 table_.GetVisitor(map)(map, slot, obj);
1238 }
1239
1240
1241 private:
1242 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1243 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1244
Steve Blocka7e24c12009-10-30 11:49:00 +00001245#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001246 static void RecordCopiedObject(HeapObject* obj) {
1247 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001248#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001249 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001250#endif
1251#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001252 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001253#endif
Iain Merrick75681382010-08-19 15:07:18 +01001254 if (should_record) {
1255 if (Heap::new_space()->Contains(obj)) {
1256 Heap::new_space()->RecordAllocation(obj);
1257 } else {
1258 Heap::new_space()->RecordPromotion(obj);
1259 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001260 }
1261 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001262#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1263
Iain Merrick75681382010-08-19 15:07:18 +01001264 // Helper function used by CopyObject to copy a source object to an
1265 // allocated target object and update the forwarding pointer in the source
1266 // object. Returns the target object.
1267 INLINE(static HeapObject* MigrateObject(HeapObject* source,
1268 HeapObject* target,
1269 int size)) {
1270 // Copy the content of source to target.
1271 Heap::CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001272
Iain Merrick75681382010-08-19 15:07:18 +01001273 // Set the forwarding address.
1274 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001275
1276#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001277 // Update NewSpace stats if necessary.
1278 RecordCopiedObject(target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001279#endif
Iain Merrick75681382010-08-19 15:07:18 +01001280 HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001281#if defined(ENABLE_LOGGING_AND_PROFILING)
1282 if (Logger::is_logging() || CpuProfiler::is_profiling()) {
1283 if (target->IsJSFunction()) {
1284 PROFILE(FunctionMoveEvent(source->address(), target->address()));
Ben Murdochf87a2032010-10-22 12:50:53 +01001285 PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001286 }
1287 }
1288#endif
Iain Merrick75681382010-08-19 15:07:18 +01001289 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001290 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001291
1292
Iain Merrick75681382010-08-19 15:07:18 +01001293 template<ObjectContents object_contents, SizeRestriction size_restriction>
1294 static inline void EvacuateObject(Map* map,
1295 HeapObject** slot,
1296 HeapObject* object,
1297 int object_size) {
1298 ASSERT((size_restriction != SMALL) ||
1299 (object_size <= Page::kMaxHeapObjectSize));
1300 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001301
Iain Merrick75681382010-08-19 15:07:18 +01001302 if (Heap::ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001303 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001304
Iain Merrick75681382010-08-19 15:07:18 +01001305 if ((size_restriction != SMALL) &&
1306 (object_size > Page::kMaxHeapObjectSize)) {
John Reck59135872010-11-02 12:39:01 -07001307 maybe_result = Heap::lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001308 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001309 if (object_contents == DATA_OBJECT) {
John Reck59135872010-11-02 12:39:01 -07001310 maybe_result = Heap::old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001311 } else {
John Reck59135872010-11-02 12:39:01 -07001312 maybe_result = Heap::old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001313 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001314 }
1315
John Reck59135872010-11-02 12:39:01 -07001316 Object* result = NULL; // Initialization to please compiler.
1317 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001318 HeapObject* target = HeapObject::cast(result);
1319 *slot = MigrateObject(object, target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001320
Iain Merrick75681382010-08-19 15:07:18 +01001321 if (object_contents == POINTER_OBJECT) {
1322 promotion_queue.insert(target, object_size);
1323 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001324
Iain Merrick75681382010-08-19 15:07:18 +01001325 Heap::tracer()->increment_promoted_objects_size(object_size);
1326 return;
1327 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001328 }
John Reck59135872010-11-02 12:39:01 -07001329 Object* result =
1330 Heap::new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
Iain Merrick75681382010-08-19 15:07:18 +01001331 *slot = MigrateObject(object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001332 return;
1333 }
1334
Iain Merrick75681382010-08-19 15:07:18 +01001335
1336 static inline void EvacuateFixedArray(Map* map,
1337 HeapObject** slot,
1338 HeapObject* object) {
1339 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1340 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1341 slot,
1342 object,
1343 object_size);
1344 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001345
1346
Iain Merrick75681382010-08-19 15:07:18 +01001347 static inline void EvacuateByteArray(Map* map,
1348 HeapObject** slot,
1349 HeapObject* object) {
1350 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1351 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1352 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001353
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001354
Iain Merrick75681382010-08-19 15:07:18 +01001355 static inline void EvacuateSeqAsciiString(Map* map,
1356 HeapObject** slot,
1357 HeapObject* object) {
1358 int object_size = SeqAsciiString::cast(object)->
1359 SeqAsciiStringSize(map->instance_type());
1360 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1361 }
1362
1363
1364 static inline void EvacuateSeqTwoByteString(Map* map,
1365 HeapObject** slot,
1366 HeapObject* object) {
1367 int object_size = SeqTwoByteString::cast(object)->
1368 SeqTwoByteStringSize(map->instance_type());
1369 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1370 }
1371
1372
1373 static inline bool IsShortcutCandidate(int type) {
1374 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1375 }
1376
1377 static inline void EvacuateShortcutCandidate(Map* map,
1378 HeapObject** slot,
1379 HeapObject* object) {
1380 ASSERT(IsShortcutCandidate(map->instance_type()));
1381
1382 if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
1383 HeapObject* first =
1384 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1385
1386 *slot = first;
1387
1388 if (!Heap::InNewSpace(first)) {
1389 object->set_map_word(MapWord::FromForwardingAddress(first));
1390 return;
1391 }
1392
1393 MapWord first_word = first->map_word();
1394 if (first_word.IsForwardingAddress()) {
1395 HeapObject* target = first_word.ToForwardingAddress();
1396
1397 *slot = target;
1398 object->set_map_word(MapWord::FromForwardingAddress(target));
1399 return;
1400 }
1401
1402 Scavenge(first->map(), slot, first);
1403 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1404 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001405 }
Iain Merrick75681382010-08-19 15:07:18 +01001406
1407 int object_size = ConsString::kSize;
1408 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001409 }
1410
Iain Merrick75681382010-08-19 15:07:18 +01001411 template<ObjectContents object_contents>
1412 class ObjectEvacuationStrategy {
1413 public:
1414 template<int object_size>
1415 static inline void VisitSpecialized(Map* map,
1416 HeapObject** slot,
1417 HeapObject* object) {
1418 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1419 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001420
Iain Merrick75681382010-08-19 15:07:18 +01001421 static inline void Visit(Map* map,
1422 HeapObject** slot,
1423 HeapObject* object) {
1424 int object_size = map->instance_size();
1425 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1426 }
1427 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001428
Iain Merrick75681382010-08-19 15:07:18 +01001429 typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001430
Iain Merrick75681382010-08-19 15:07:18 +01001431 static VisitorDispatchTable<Callback> table_;
1432};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001433
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001434
Iain Merrick75681382010-08-19 15:07:18 +01001435VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001436
1437
1438void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1439 ASSERT(InFromSpace(object));
1440 MapWord first_word = object->map_word();
1441 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001442 Map* map = first_word.ToMap();
Iain Merrick75681382010-08-19 15:07:18 +01001443 ScavengingVisitor::Scavenge(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001444}
1445
1446
1447void Heap::ScavengePointer(HeapObject** p) {
1448 ScavengeObject(p, *p);
1449}
1450
1451
John Reck59135872010-11-02 12:39:01 -07001452MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1453 int instance_size) {
1454 Object* result;
1455 { MaybeObject* maybe_result = AllocateRawMap();
1456 if (!maybe_result->ToObject(&result)) return maybe_result;
1457 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001458
1459 // Map::cast cannot be used due to uninitialized map field.
1460 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1461 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1462 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001463 reinterpret_cast<Map*>(result)->
Iain Merrick75681382010-08-19 15:07:18 +01001464 set_visitor_id(
1465 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001466 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001467 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001468 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001469 reinterpret_cast<Map*>(result)->set_bit_field(0);
1470 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001471 return result;
1472}
1473
1474
John Reck59135872010-11-02 12:39:01 -07001475MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1476 Object* result;
1477 { MaybeObject* maybe_result = AllocateRawMap();
1478 if (!maybe_result->ToObject(&result)) return maybe_result;
1479 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001480
1481 Map* map = reinterpret_cast<Map*>(result);
1482 map->set_map(meta_map());
1483 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001484 map->set_visitor_id(
1485 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001486 map->set_prototype(null_value());
1487 map->set_constructor(null_value());
1488 map->set_instance_size(instance_size);
1489 map->set_inobject_properties(0);
1490 map->set_pre_allocated_property_fields(0);
1491 map->set_instance_descriptors(empty_descriptor_array());
1492 map->set_code_cache(empty_fixed_array());
1493 map->set_unused_property_fields(0);
1494 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001495 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001496
1497 // If the map object is aligned fill the padding area with Smi 0 objects.
1498 if (Map::kPadStart < Map::kSize) {
1499 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1500 0,
1501 Map::kSize - Map::kPadStart);
1502 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001503 return map;
1504}
1505
1506
John Reck59135872010-11-02 12:39:01 -07001507MaybeObject* Heap::AllocateCodeCache() {
1508 Object* result;
1509 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1510 if (!maybe_result->ToObject(&result)) return maybe_result;
1511 }
Steve Block6ded16b2010-05-10 14:33:55 +01001512 CodeCache* code_cache = CodeCache::cast(result);
1513 code_cache->set_default_cache(empty_fixed_array());
1514 code_cache->set_normal_type_cache(undefined_value());
1515 return code_cache;
1516}
1517
1518
Steve Blocka7e24c12009-10-30 11:49:00 +00001519const Heap::StringTypeTable Heap::string_type_table[] = {
1520#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1521 {type, size, k##camel_name##MapRootIndex},
1522 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1523#undef STRING_TYPE_ELEMENT
1524};
1525
1526
1527const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1528#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1529 {contents, k##name##RootIndex},
1530 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1531#undef CONSTANT_SYMBOL_ELEMENT
1532};
1533
1534
1535const Heap::StructTable Heap::struct_table[] = {
1536#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1537 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1538 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1539#undef STRUCT_TABLE_ELEMENT
1540};
1541
1542
1543bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001544 Object* obj;
1545 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1546 if (!maybe_obj->ToObject(&obj)) return false;
1547 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001548 // Map::cast cannot be used due to uninitialized map field.
1549 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1550 set_meta_map(new_meta_map);
1551 new_meta_map->set_map(new_meta_map);
1552
John Reck59135872010-11-02 12:39:01 -07001553 { MaybeObject* maybe_obj =
1554 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1555 if (!maybe_obj->ToObject(&obj)) return false;
1556 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001557 set_fixed_array_map(Map::cast(obj));
1558
John Reck59135872010-11-02 12:39:01 -07001559 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1560 if (!maybe_obj->ToObject(&obj)) return false;
1561 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001562 set_oddball_map(Map::cast(obj));
1563
Steve Block6ded16b2010-05-10 14:33:55 +01001564 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001565 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1566 if (!maybe_obj->ToObject(&obj)) return false;
1567 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001568 set_empty_fixed_array(FixedArray::cast(obj));
1569
John Reck59135872010-11-02 12:39:01 -07001570 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1571 if (!maybe_obj->ToObject(&obj)) return false;
1572 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001573 set_null_value(obj);
1574
1575 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001576 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1577 if (!maybe_obj->ToObject(&obj)) return false;
1578 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001579 set_empty_descriptor_array(DescriptorArray::cast(obj));
1580
1581 // Fix the instance_descriptors for the existing maps.
1582 meta_map()->set_instance_descriptors(empty_descriptor_array());
1583 meta_map()->set_code_cache(empty_fixed_array());
1584
1585 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1586 fixed_array_map()->set_code_cache(empty_fixed_array());
1587
1588 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1589 oddball_map()->set_code_cache(empty_fixed_array());
1590
1591 // Fix prototype object for existing maps.
1592 meta_map()->set_prototype(null_value());
1593 meta_map()->set_constructor(null_value());
1594
1595 fixed_array_map()->set_prototype(null_value());
1596 fixed_array_map()->set_constructor(null_value());
1597
1598 oddball_map()->set_prototype(null_value());
1599 oddball_map()->set_constructor(null_value());
1600
John Reck59135872010-11-02 12:39:01 -07001601 { MaybeObject* maybe_obj =
1602 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1603 if (!maybe_obj->ToObject(&obj)) return false;
1604 }
Iain Merrick75681382010-08-19 15:07:18 +01001605 set_fixed_cow_array_map(Map::cast(obj));
1606 ASSERT(fixed_array_map() != fixed_cow_array_map());
1607
John Reck59135872010-11-02 12:39:01 -07001608 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1609 if (!maybe_obj->ToObject(&obj)) return false;
1610 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001611 set_heap_number_map(Map::cast(obj));
1612
John Reck59135872010-11-02 12:39:01 -07001613 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1614 if (!maybe_obj->ToObject(&obj)) return false;
1615 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001616 set_proxy_map(Map::cast(obj));
1617
1618 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1619 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001620 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1621 if (!maybe_obj->ToObject(&obj)) return false;
1622 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001623 roots_[entry.index] = Map::cast(obj);
1624 }
1625
John Reck59135872010-11-02 12:39:01 -07001626 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1627 if (!maybe_obj->ToObject(&obj)) return false;
1628 }
Steve Blockd0582a62009-12-15 09:54:21 +00001629 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001630 Map::cast(obj)->set_is_undetectable();
1631
John Reck59135872010-11-02 12:39:01 -07001632 { MaybeObject* maybe_obj =
1633 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1634 if (!maybe_obj->ToObject(&obj)) return false;
1635 }
Steve Blockd0582a62009-12-15 09:54:21 +00001636 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001637 Map::cast(obj)->set_is_undetectable();
1638
John Reck59135872010-11-02 12:39:01 -07001639 { MaybeObject* maybe_obj =
1640 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1641 if (!maybe_obj->ToObject(&obj)) return false;
1642 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001643 set_byte_array_map(Map::cast(obj));
1644
John Reck59135872010-11-02 12:39:01 -07001645 { MaybeObject* maybe_obj =
1646 AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
1647 if (!maybe_obj->ToObject(&obj)) return false;
1648 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001649 set_pixel_array_map(Map::cast(obj));
1650
John Reck59135872010-11-02 12:39:01 -07001651 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1652 ExternalArray::kAlignedSize);
1653 if (!maybe_obj->ToObject(&obj)) return false;
1654 }
Steve Block3ce2e202009-11-05 08:53:23 +00001655 set_external_byte_array_map(Map::cast(obj));
1656
John Reck59135872010-11-02 12:39:01 -07001657 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1658 ExternalArray::kAlignedSize);
1659 if (!maybe_obj->ToObject(&obj)) return false;
1660 }
Steve Block3ce2e202009-11-05 08:53:23 +00001661 set_external_unsigned_byte_array_map(Map::cast(obj));
1662
John Reck59135872010-11-02 12:39:01 -07001663 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1664 ExternalArray::kAlignedSize);
1665 if (!maybe_obj->ToObject(&obj)) return false;
1666 }
Steve Block3ce2e202009-11-05 08:53:23 +00001667 set_external_short_array_map(Map::cast(obj));
1668
John Reck59135872010-11-02 12:39:01 -07001669 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1670 ExternalArray::kAlignedSize);
1671 if (!maybe_obj->ToObject(&obj)) return false;
1672 }
Steve Block3ce2e202009-11-05 08:53:23 +00001673 set_external_unsigned_short_array_map(Map::cast(obj));
1674
John Reck59135872010-11-02 12:39:01 -07001675 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1676 ExternalArray::kAlignedSize);
1677 if (!maybe_obj->ToObject(&obj)) return false;
1678 }
Steve Block3ce2e202009-11-05 08:53:23 +00001679 set_external_int_array_map(Map::cast(obj));
1680
John Reck59135872010-11-02 12:39:01 -07001681 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1682 ExternalArray::kAlignedSize);
1683 if (!maybe_obj->ToObject(&obj)) return false;
1684 }
Steve Block3ce2e202009-11-05 08:53:23 +00001685 set_external_unsigned_int_array_map(Map::cast(obj));
1686
John Reck59135872010-11-02 12:39:01 -07001687 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1688 ExternalArray::kAlignedSize);
1689 if (!maybe_obj->ToObject(&obj)) return false;
1690 }
Steve Block3ce2e202009-11-05 08:53:23 +00001691 set_external_float_array_map(Map::cast(obj));
1692
John Reck59135872010-11-02 12:39:01 -07001693 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1694 if (!maybe_obj->ToObject(&obj)) return false;
1695 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001696 set_code_map(Map::cast(obj));
1697
John Reck59135872010-11-02 12:39:01 -07001698 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1699 JSGlobalPropertyCell::kSize);
1700 if (!maybe_obj->ToObject(&obj)) return false;
1701 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001702 set_global_property_cell_map(Map::cast(obj));
1703
John Reck59135872010-11-02 12:39:01 -07001704 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1705 if (!maybe_obj->ToObject(&obj)) return false;
1706 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001707 set_one_pointer_filler_map(Map::cast(obj));
1708
John Reck59135872010-11-02 12:39:01 -07001709 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1710 if (!maybe_obj->ToObject(&obj)) return false;
1711 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001712 set_two_pointer_filler_map(Map::cast(obj));
1713
1714 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1715 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001716 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1717 if (!maybe_obj->ToObject(&obj)) return false;
1718 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001719 roots_[entry.index] = Map::cast(obj);
1720 }
1721
John Reck59135872010-11-02 12:39:01 -07001722 { MaybeObject* maybe_obj =
1723 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1724 if (!maybe_obj->ToObject(&obj)) return false;
1725 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001726 set_hash_table_map(Map::cast(obj));
1727
John Reck59135872010-11-02 12:39:01 -07001728 { MaybeObject* maybe_obj =
1729 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1730 if (!maybe_obj->ToObject(&obj)) return false;
1731 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001732 set_context_map(Map::cast(obj));
1733
John Reck59135872010-11-02 12:39:01 -07001734 { MaybeObject* maybe_obj =
1735 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1736 if (!maybe_obj->ToObject(&obj)) return false;
1737 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001738 set_catch_context_map(Map::cast(obj));
1739
John Reck59135872010-11-02 12:39:01 -07001740 { MaybeObject* maybe_obj =
1741 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1742 if (!maybe_obj->ToObject(&obj)) return false;
1743 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001744 Map* global_context_map = Map::cast(obj);
1745 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1746 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001747
John Reck59135872010-11-02 12:39:01 -07001748 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1749 SharedFunctionInfo::kAlignedSize);
1750 if (!maybe_obj->ToObject(&obj)) return false;
1751 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001752 set_shared_function_info_map(Map::cast(obj));
1753
1754 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1755 return true;
1756}
1757
1758
John Reck59135872010-11-02 12:39:01 -07001759MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001760 // Statically ensure that it is safe to allocate heap numbers in paged
1761 // spaces.
1762 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1763 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1764
John Reck59135872010-11-02 12:39:01 -07001765 Object* result;
1766 { MaybeObject* maybe_result =
1767 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1768 if (!maybe_result->ToObject(&result)) return maybe_result;
1769 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001770
1771 HeapObject::cast(result)->set_map(heap_number_map());
1772 HeapNumber::cast(result)->set_value(value);
1773 return result;
1774}
1775
1776
John Reck59135872010-11-02 12:39:01 -07001777MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001778 // Use general version, if we're forced to always allocate.
1779 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1780
1781 // This version of AllocateHeapNumber is optimized for
1782 // allocation in new space.
1783 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1784 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001785 Object* result;
1786 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1787 if (!maybe_result->ToObject(&result)) return maybe_result;
1788 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001789 HeapObject::cast(result)->set_map(heap_number_map());
1790 HeapNumber::cast(result)->set_value(value);
1791 return result;
1792}
1793
1794
John Reck59135872010-11-02 12:39:01 -07001795MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1796 Object* result;
1797 { MaybeObject* maybe_result = AllocateRawCell();
1798 if (!maybe_result->ToObject(&result)) return maybe_result;
1799 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001800 HeapObject::cast(result)->set_map(global_property_cell_map());
1801 JSGlobalPropertyCell::cast(result)->set_value(value);
1802 return result;
1803}
1804
1805
John Reck59135872010-11-02 12:39:01 -07001806MaybeObject* Heap::CreateOddball(const char* to_string,
1807 Object* to_number) {
1808 Object* result;
1809 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1810 if (!maybe_result->ToObject(&result)) return maybe_result;
1811 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001812 return Oddball::cast(result)->Initialize(to_string, to_number);
1813}
1814
1815
1816bool Heap::CreateApiObjects() {
1817 Object* obj;
1818
John Reck59135872010-11-02 12:39:01 -07001819 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1820 if (!maybe_obj->ToObject(&obj)) return false;
1821 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001822 set_neander_map(Map::cast(obj));
1823
John Reck59135872010-11-02 12:39:01 -07001824 { MaybeObject* maybe_obj = Heap::AllocateJSObjectFromMap(neander_map());
1825 if (!maybe_obj->ToObject(&obj)) return false;
1826 }
1827 Object* elements;
1828 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1829 if (!maybe_elements->ToObject(&elements)) return false;
1830 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001831 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1832 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1833 set_message_listeners(JSObject::cast(obj));
1834
1835 return true;
1836}
1837
1838
1839void Heap::CreateCEntryStub() {
1840 CEntryStub stub(1);
1841 set_c_entry_code(*stub.GetCode());
1842}
1843
1844
Steve Block6ded16b2010-05-10 14:33:55 +01001845#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001846void Heap::CreateRegExpCEntryStub() {
1847 RegExpCEntryStub stub;
1848 set_re_c_entry_code(*stub.GetCode());
1849}
1850#endif
1851
1852
Steve Blocka7e24c12009-10-30 11:49:00 +00001853void Heap::CreateJSEntryStub() {
1854 JSEntryStub stub;
1855 set_js_entry_code(*stub.GetCode());
1856}
1857
1858
1859void Heap::CreateJSConstructEntryStub() {
1860 JSConstructEntryStub stub;
1861 set_js_construct_entry_code(*stub.GetCode());
1862}
1863
1864
1865void Heap::CreateFixedStubs() {
1866 // Here we create roots for fixed stubs. They are needed at GC
1867 // for cooking and uncooking (check out frames.cc).
1868 // The eliminates the need for doing dictionary lookup in the
1869 // stub cache for these stubs.
1870 HandleScope scope;
1871 // gcc-4.4 has problem generating correct code of following snippet:
1872 // { CEntryStub stub;
1873 // c_entry_code_ = *stub.GetCode();
1874 // }
Leon Clarke4515c472010-02-03 11:58:03 +00001875 // { DebuggerStatementStub stub;
1876 // debugger_statement_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001877 // }
1878 // To workaround the problem, make separate functions without inlining.
1879 Heap::CreateCEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001880 Heap::CreateJSEntryStub();
1881 Heap::CreateJSConstructEntryStub();
Steve Block6ded16b2010-05-10 14:33:55 +01001882#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001883 Heap::CreateRegExpCEntryStub();
1884#endif
1885}
1886
1887
1888bool Heap::CreateInitialObjects() {
1889 Object* obj;
1890
1891 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001892 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1893 if (!maybe_obj->ToObject(&obj)) return false;
1894 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001895 set_minus_zero_value(obj);
1896 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1897
John Reck59135872010-11-02 12:39:01 -07001898 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1899 if (!maybe_obj->ToObject(&obj)) return false;
1900 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001901 set_nan_value(obj);
1902
John Reck59135872010-11-02 12:39:01 -07001903 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1904 if (!maybe_obj->ToObject(&obj)) return false;
1905 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001906 set_undefined_value(obj);
1907 ASSERT(!InNewSpace(undefined_value()));
1908
1909 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07001910 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1911 if (!maybe_obj->ToObject(&obj)) return false;
1912 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001913 // Don't use set_symbol_table() due to asserts.
1914 roots_[kSymbolTableRootIndex] = obj;
1915
1916 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07001917 Object* symbol;
1918 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
1919 if (!maybe_symbol->ToObject(&symbol)) return false;
1920 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001921 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
1922 Oddball::cast(undefined_value())->set_to_number(nan_value());
1923
Steve Blocka7e24c12009-10-30 11:49:00 +00001924 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07001925 { MaybeObject* maybe_obj =
1926 Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1927 if (!maybe_obj->ToObject(&obj)) return false;
1928 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001929
John Reck59135872010-11-02 12:39:01 -07001930 { MaybeObject* maybe_obj = CreateOddball("true", Smi::FromInt(1));
1931 if (!maybe_obj->ToObject(&obj)) return false;
1932 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001933 set_true_value(obj);
1934
John Reck59135872010-11-02 12:39:01 -07001935 { MaybeObject* maybe_obj = CreateOddball("false", Smi::FromInt(0));
1936 if (!maybe_obj->ToObject(&obj)) return false;
1937 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001938 set_false_value(obj);
1939
John Reck59135872010-11-02 12:39:01 -07001940 { MaybeObject* maybe_obj = CreateOddball("hole", Smi::FromInt(-1));
1941 if (!maybe_obj->ToObject(&obj)) return false;
1942 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001943 set_the_hole_value(obj);
1944
John Reck59135872010-11-02 12:39:01 -07001945 { MaybeObject* maybe_obj =
1946 CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
1947 if (!maybe_obj->ToObject(&obj)) return false;
1948 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001949 set_no_interceptor_result_sentinel(obj);
1950
John Reck59135872010-11-02 12:39:01 -07001951 { MaybeObject* maybe_obj =
1952 CreateOddball("termination_exception", Smi::FromInt(-3));
1953 if (!maybe_obj->ToObject(&obj)) return false;
1954 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001955 set_termination_exception(obj);
1956
1957 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07001958 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
1959 if (!maybe_obj->ToObject(&obj)) return false;
1960 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001961 set_empty_string(String::cast(obj));
1962
1963 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07001964 { MaybeObject* maybe_obj =
1965 LookupAsciiSymbol(constant_symbol_table[i].contents);
1966 if (!maybe_obj->ToObject(&obj)) return false;
1967 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001968 roots_[constant_symbol_table[i].index] = String::cast(obj);
1969 }
1970
1971 // Allocate the hidden symbol which is used to identify the hidden properties
1972 // in JSObjects. The hash code has a special value so that it will not match
1973 // the empty string when searching for the property. It cannot be part of the
1974 // loop above because it needs to be allocated manually with the special
1975 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1976 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07001977 { MaybeObject* maybe_obj =
1978 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
1979 if (!maybe_obj->ToObject(&obj)) return false;
1980 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001981 hidden_symbol_ = String::cast(obj);
1982
1983 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07001984 { MaybeObject* maybe_obj =
1985 AllocateProxy((Address) &Accessors::ObjectPrototype);
1986 if (!maybe_obj->ToObject(&obj)) return false;
1987 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001988 set_prototype_accessors(Proxy::cast(obj));
1989
1990 // Allocate the code_stubs dictionary. The initial size is set to avoid
1991 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07001992 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
1993 if (!maybe_obj->ToObject(&obj)) return false;
1994 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001995 set_code_stubs(NumberDictionary::cast(obj));
1996
1997 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
1998 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07001999 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2000 if (!maybe_obj->ToObject(&obj)) return false;
2001 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002002 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2003
Kristian Monsen25f61362010-05-21 11:50:48 +01002004 set_instanceof_cache_function(Smi::FromInt(0));
2005 set_instanceof_cache_map(Smi::FromInt(0));
2006 set_instanceof_cache_answer(Smi::FromInt(0));
2007
Steve Blocka7e24c12009-10-30 11:49:00 +00002008 CreateFixedStubs();
2009
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002010 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002011 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2012 if (!maybe_obj->ToObject(&obj)) return false;
2013 }
2014 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(obj);
2015 if (!maybe_obj->ToObject(&obj)) return false;
2016 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002017 set_intrinsic_function_names(StringDictionary::cast(obj));
2018
Leon Clarkee46be812010-01-19 14:06:41 +00002019 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002020
Steve Block6ded16b2010-05-10 14:33:55 +01002021 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002022 { MaybeObject* maybe_obj =
2023 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2024 if (!maybe_obj->ToObject(&obj)) return false;
2025 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002026 set_single_character_string_cache(FixedArray::cast(obj));
2027
2028 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002029 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2030 if (!maybe_obj->ToObject(&obj)) return false;
2031 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002032 set_natives_source_cache(FixedArray::cast(obj));
2033
2034 // Handling of script id generation is in Factory::NewScript.
2035 set_last_script_id(undefined_value());
2036
2037 // Initialize keyed lookup cache.
2038 KeyedLookupCache::Clear();
2039
2040 // Initialize context slot cache.
2041 ContextSlotCache::Clear();
2042
2043 // Initialize descriptor cache.
2044 DescriptorLookupCache::Clear();
2045
2046 // Initialize compilation cache.
2047 CompilationCache::Clear();
2048
2049 return true;
2050}
2051
2052
John Reck59135872010-11-02 12:39:01 -07002053MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002054 // Compute the size of the number string cache based on the max heap size.
2055 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2056 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2057 int number_string_cache_size = max_semispace_size_ / 512;
2058 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002059 Object* obj;
2060 MaybeObject* maybe_obj =
2061 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2062 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2063 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002064}
2065
2066
2067void Heap::FlushNumberStringCache() {
2068 // Flush the number to string cache.
2069 int len = number_string_cache()->length();
2070 for (int i = 0; i < len; i++) {
2071 number_string_cache()->set_undefined(i);
2072 }
2073}
2074
2075
Steve Blocka7e24c12009-10-30 11:49:00 +00002076static inline int double_get_hash(double d) {
2077 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002078 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002079}
2080
2081
2082static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002083 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002084}
2085
2086
Steve Blocka7e24c12009-10-30 11:49:00 +00002087Object* Heap::GetNumberStringCache(Object* number) {
2088 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002089 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002090 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002091 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002092 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002093 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002094 }
2095 Object* key = number_string_cache()->get(hash * 2);
2096 if (key == number) {
2097 return String::cast(number_string_cache()->get(hash * 2 + 1));
2098 } else if (key->IsHeapNumber() &&
2099 number->IsHeapNumber() &&
2100 key->Number() == number->Number()) {
2101 return String::cast(number_string_cache()->get(hash * 2 + 1));
2102 }
2103 return undefined_value();
2104}
2105
2106
2107void Heap::SetNumberStringCache(Object* number, String* string) {
2108 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002109 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002110 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002111 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002112 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002113 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002114 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002115 number_string_cache()->set(hash * 2, number);
2116 }
2117 number_string_cache()->set(hash * 2 + 1, string);
2118}
2119
2120
John Reck59135872010-11-02 12:39:01 -07002121MaybeObject* Heap::NumberToString(Object* number,
2122 bool check_number_string_cache) {
Andrei Popescu402d9372010-02-26 13:31:12 +00002123 Counters::number_to_string_runtime.Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002124 if (check_number_string_cache) {
2125 Object* cached = GetNumberStringCache(number);
2126 if (cached != undefined_value()) {
2127 return cached;
2128 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002129 }
2130
2131 char arr[100];
2132 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2133 const char* str;
2134 if (number->IsSmi()) {
2135 int num = Smi::cast(number)->value();
2136 str = IntToCString(num, buffer);
2137 } else {
2138 double num = HeapNumber::cast(number)->value();
2139 str = DoubleToCString(num, buffer);
2140 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002141
John Reck59135872010-11-02 12:39:01 -07002142 Object* js_string;
2143 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2144 if (maybe_js_string->ToObject(&js_string)) {
2145 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002146 }
John Reck59135872010-11-02 12:39:01 -07002147 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002148}
2149
2150
Steve Block3ce2e202009-11-05 08:53:23 +00002151Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2152 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2153}
2154
2155
2156Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2157 ExternalArrayType array_type) {
2158 switch (array_type) {
2159 case kExternalByteArray:
2160 return kExternalByteArrayMapRootIndex;
2161 case kExternalUnsignedByteArray:
2162 return kExternalUnsignedByteArrayMapRootIndex;
2163 case kExternalShortArray:
2164 return kExternalShortArrayMapRootIndex;
2165 case kExternalUnsignedShortArray:
2166 return kExternalUnsignedShortArrayMapRootIndex;
2167 case kExternalIntArray:
2168 return kExternalIntArrayMapRootIndex;
2169 case kExternalUnsignedIntArray:
2170 return kExternalUnsignedIntArrayMapRootIndex;
2171 case kExternalFloatArray:
2172 return kExternalFloatArrayMapRootIndex;
2173 default:
2174 UNREACHABLE();
2175 return kUndefinedValueRootIndex;
2176 }
2177}
2178
2179
John Reck59135872010-11-02 12:39:01 -07002180MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002181 // We need to distinguish the minus zero value and this cannot be
2182 // done after conversion to int. Doing this by comparing bit
2183 // patterns is faster than using fpclassify() et al.
2184 static const DoubleRepresentation minus_zero(-0.0);
2185
2186 DoubleRepresentation rep(value);
2187 if (rep.bits == minus_zero.bits) {
2188 return AllocateHeapNumber(-0.0, pretenure);
2189 }
2190
2191 int int_value = FastD2I(value);
2192 if (value == int_value && Smi::IsValid(int_value)) {
2193 return Smi::FromInt(int_value);
2194 }
2195
2196 // Materialize the value in the heap.
2197 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002198}
2199
2200
John Reck59135872010-11-02 12:39:01 -07002201MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002202 // Statically ensure that it is safe to allocate proxies in paged spaces.
2203 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2204 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002205 Object* result;
2206 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2207 if (!maybe_result->ToObject(&result)) return maybe_result;
2208 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002209
2210 Proxy::cast(result)->set_proxy(proxy);
2211 return result;
2212}
2213
2214
John Reck59135872010-11-02 12:39:01 -07002215MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2216 Object* result;
2217 { MaybeObject* maybe_result =
2218 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2219 if (!maybe_result->ToObject(&result)) return maybe_result;
2220 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002221
2222 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2223 share->set_name(name);
2224 Code* illegal = Builtins::builtin(Builtins::Illegal);
2225 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002226 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Blocka7e24c12009-10-30 11:49:00 +00002227 Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
2228 share->set_construct_stub(construct_stub);
2229 share->set_expected_nof_properties(0);
2230 share->set_length(0);
2231 share->set_formal_parameter_count(0);
2232 share->set_instance_class_name(Object_symbol());
2233 share->set_function_data(undefined_value());
2234 share->set_script(undefined_value());
2235 share->set_start_position_and_type(0);
2236 share->set_debug_info(undefined_value());
2237 share->set_inferred_name(empty_string());
2238 share->set_compiler_hints(0);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002239 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002240 share->set_this_property_assignments_count(0);
2241 share->set_this_property_assignments(undefined_value());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002242 share->set_num_literals(0);
2243 share->set_end_position(0);
2244 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002245 return result;
2246}
2247
2248
Steve Blockd0582a62009-12-15 09:54:21 +00002249// Returns true for a character in a range. Both limits are inclusive.
2250static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2251 // This makes uses of the the unsigned wraparound.
2252 return character - from <= to - from;
2253}
2254
2255
John Reck59135872010-11-02 12:39:01 -07002256MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
2257 uint32_t c1,
2258 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002259 String* symbol;
2260 // Numeric strings have a different hash algorithm not known by
2261 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2262 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2263 Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2264 return symbol;
2265 // Now we know the length is 2, we might as well make use of that fact
2266 // when building the new string.
2267 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2268 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002269 Object* result;
2270 { MaybeObject* maybe_result = Heap::AllocateRawAsciiString(2);
2271 if (!maybe_result->ToObject(&result)) return maybe_result;
2272 }
Steve Blockd0582a62009-12-15 09:54:21 +00002273 char* dest = SeqAsciiString::cast(result)->GetChars();
2274 dest[0] = c1;
2275 dest[1] = c2;
2276 return result;
2277 } else {
John Reck59135872010-11-02 12:39:01 -07002278 Object* result;
2279 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(2);
2280 if (!maybe_result->ToObject(&result)) return maybe_result;
2281 }
Steve Blockd0582a62009-12-15 09:54:21 +00002282 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2283 dest[0] = c1;
2284 dest[1] = c2;
2285 return result;
2286 }
2287}
2288
2289
John Reck59135872010-11-02 12:39:01 -07002290MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002291 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002292 if (first_length == 0) {
2293 return second;
2294 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002295
2296 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002297 if (second_length == 0) {
2298 return first;
2299 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002300
2301 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002302
2303 // Optimization for 2-byte strings often used as keys in a decompression
2304 // dictionary. Check whether we already have the string in the symbol
2305 // table to prevent creation of many unneccesary strings.
2306 if (length == 2) {
2307 unsigned c1 = first->Get(0);
2308 unsigned c2 = second->Get(0);
2309 return MakeOrFindTwoCharacterString(c1, c2);
2310 }
2311
Steve Block6ded16b2010-05-10 14:33:55 +01002312 bool first_is_ascii = first->IsAsciiRepresentation();
2313 bool second_is_ascii = second->IsAsciiRepresentation();
2314 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002315
2316 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002317 // of the new cons string is too large.
2318 if (length > String::kMaxLength || length < 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002319 Top::context()->mark_out_of_memory();
2320 return Failure::OutOfMemoryException();
2321 }
2322
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002323 bool is_ascii_data_in_two_byte_string = false;
2324 if (!is_ascii) {
2325 // At least one of the strings uses two-byte representation so we
2326 // can't use the fast case code for short ascii strings below, but
2327 // we can try to save memory if all chars actually fit in ascii.
2328 is_ascii_data_in_two_byte_string =
2329 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2330 if (is_ascii_data_in_two_byte_string) {
2331 Counters::string_add_runtime_ext_to_ascii.Increment();
2332 }
2333 }
2334
Steve Blocka7e24c12009-10-30 11:49:00 +00002335 // If the resulting string is small make a flat string.
2336 if (length < String::kMinNonFlatLength) {
2337 ASSERT(first->IsFlat());
2338 ASSERT(second->IsFlat());
2339 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002340 Object* result;
2341 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2342 if (!maybe_result->ToObject(&result)) return maybe_result;
2343 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002344 // Copy the characters into the new object.
2345 char* dest = SeqAsciiString::cast(result)->GetChars();
2346 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002347 const char* src;
2348 if (first->IsExternalString()) {
2349 src = ExternalAsciiString::cast(first)->resource()->data();
2350 } else {
2351 src = SeqAsciiString::cast(first)->GetChars();
2352 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002353 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2354 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002355 if (second->IsExternalString()) {
2356 src = ExternalAsciiString::cast(second)->resource()->data();
2357 } else {
2358 src = SeqAsciiString::cast(second)->GetChars();
2359 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002360 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2361 return result;
2362 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002363 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002364 Object* result;
2365 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2366 if (!maybe_result->ToObject(&result)) return maybe_result;
2367 }
Steve Block6ded16b2010-05-10 14:33:55 +01002368 // Copy the characters into the new object.
2369 char* dest = SeqAsciiString::cast(result)->GetChars();
2370 String::WriteToFlat(first, dest, 0, first_length);
2371 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block6ded16b2010-05-10 14:33:55 +01002372 return result;
2373 }
2374
John Reck59135872010-11-02 12:39:01 -07002375 Object* result;
2376 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2377 if (!maybe_result->ToObject(&result)) return maybe_result;
2378 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002379 // Copy the characters into the new object.
2380 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2381 String::WriteToFlat(first, dest, 0, first_length);
2382 String::WriteToFlat(second, dest + first_length, 0, second_length);
2383 return result;
2384 }
2385 }
2386
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002387 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2388 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002389
John Reck59135872010-11-02 12:39:01 -07002390 Object* result;
2391 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2392 if (!maybe_result->ToObject(&result)) return maybe_result;
2393 }
Leon Clarke4515c472010-02-03 11:58:03 +00002394
2395 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002396 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002397 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002398 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002399 cons_string->set_hash_field(String::kEmptyHashField);
2400 cons_string->set_first(first, mode);
2401 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002402 return result;
2403}
2404
2405
John Reck59135872010-11-02 12:39:01 -07002406MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002407 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002408 int end,
2409 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002410 int length = end - start;
2411
2412 if (length == 1) {
2413 return Heap::LookupSingleCharacterStringFromCode(
2414 buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002415 } else if (length == 2) {
2416 // Optimization for 2-byte strings often used as keys in a decompression
2417 // dictionary. Check whether we already have the string in the symbol
2418 // table to prevent creation of many unneccesary strings.
2419 unsigned c1 = buffer->Get(start);
2420 unsigned c2 = buffer->Get(start + 1);
2421 return MakeOrFindTwoCharacterString(c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002422 }
2423
2424 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002425 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002426
John Reck59135872010-11-02 12:39:01 -07002427 Object* result;
2428 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2429 ? AllocateRawAsciiString(length, pretenure )
2430 : AllocateRawTwoByteString(length, pretenure);
2431 if (!maybe_result->ToObject(&result)) return maybe_result;
2432 }
Steve Blockd0582a62009-12-15 09:54:21 +00002433 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002434 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002435 if (buffer->IsAsciiRepresentation()) {
2436 ASSERT(string_result->IsAsciiRepresentation());
2437 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2438 String::WriteToFlat(buffer, dest, start, end);
2439 } else {
2440 ASSERT(string_result->IsTwoByteRepresentation());
2441 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2442 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002443 }
Steve Blockd0582a62009-12-15 09:54:21 +00002444
Steve Blocka7e24c12009-10-30 11:49:00 +00002445 return result;
2446}
2447
2448
John Reck59135872010-11-02 12:39:01 -07002449MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002450 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002451 size_t length = resource->length();
2452 if (length > static_cast<size_t>(String::kMaxLength)) {
2453 Top::context()->mark_out_of_memory();
2454 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002455 }
2456
Steve Blockd0582a62009-12-15 09:54:21 +00002457 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002458 Object* result;
2459 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2460 if (!maybe_result->ToObject(&result)) return maybe_result;
2461 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002462
2463 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002464 external_string->set_length(static_cast<int>(length));
2465 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002466 external_string->set_resource(resource);
2467
2468 return result;
2469}
2470
2471
John Reck59135872010-11-02 12:39:01 -07002472MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002473 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002474 size_t length = resource->length();
2475 if (length > static_cast<size_t>(String::kMaxLength)) {
2476 Top::context()->mark_out_of_memory();
2477 return Failure::OutOfMemoryException();
2478 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002479
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002480 // For small strings we check whether the resource contains only
2481 // ascii characters. If yes, we use a different string map.
2482 bool is_ascii = true;
2483 if (length >= static_cast<size_t>(String::kMinNonFlatLength)) {
2484 is_ascii = false;
2485 } else {
2486 const uc16* data = resource->data();
2487 for (size_t i = 0; i < length; i++) {
2488 if (data[i] > String::kMaxAsciiCharCode) {
2489 is_ascii = false;
2490 break;
2491 }
2492 }
2493 }
2494
2495 Map* map = is_ascii ?
2496 Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
John Reck59135872010-11-02 12:39:01 -07002497 Object* result;
2498 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2499 if (!maybe_result->ToObject(&result)) return maybe_result;
2500 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002501
2502 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002503 external_string->set_length(static_cast<int>(length));
2504 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002505 external_string->set_resource(resource);
2506
2507 return result;
2508}
2509
2510
John Reck59135872010-11-02 12:39:01 -07002511MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002512 if (code <= String::kMaxAsciiCharCode) {
2513 Object* value = Heap::single_character_string_cache()->get(code);
2514 if (value != Heap::undefined_value()) return value;
2515
2516 char buffer[1];
2517 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002518 Object* result;
2519 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002520
John Reck59135872010-11-02 12:39:01 -07002521 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002522 Heap::single_character_string_cache()->set(code, result);
2523 return result;
2524 }
2525
John Reck59135872010-11-02 12:39:01 -07002526 Object* result;
2527 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(1);
2528 if (!maybe_result->ToObject(&result)) return maybe_result;
2529 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002530 String* answer = String::cast(result);
2531 answer->Set(0, code);
2532 return answer;
2533}
2534
2535
John Reck59135872010-11-02 12:39:01 -07002536MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002537 if (length < 0 || length > ByteArray::kMaxLength) {
2538 return Failure::OutOfMemoryException();
2539 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002540 if (pretenure == NOT_TENURED) {
2541 return AllocateByteArray(length);
2542 }
2543 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002544 Object* result;
2545 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2546 ? old_data_space_->AllocateRaw(size)
2547 : lo_space_->AllocateRaw(size);
2548 if (!maybe_result->ToObject(&result)) return maybe_result;
2549 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002550
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002551 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2552 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002553 return result;
2554}
2555
2556
John Reck59135872010-11-02 12:39:01 -07002557MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002558 if (length < 0 || length > ByteArray::kMaxLength) {
2559 return Failure::OutOfMemoryException();
2560 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002561 int size = ByteArray::SizeFor(length);
2562 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002563 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002564 Object* result;
2565 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2566 if (!maybe_result->ToObject(&result)) return maybe_result;
2567 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002568
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002569 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2570 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002571 return result;
2572}
2573
2574
2575void Heap::CreateFillerObjectAt(Address addr, int size) {
2576 if (size == 0) return;
2577 HeapObject* filler = HeapObject::FromAddress(addr);
2578 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002579 filler->set_map(one_pointer_filler_map());
2580 } else if (size == 2 * kPointerSize) {
2581 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002582 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002583 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002584 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2585 }
2586}
2587
2588
John Reck59135872010-11-02 12:39:01 -07002589MaybeObject* Heap::AllocatePixelArray(int length,
Steve Blocka7e24c12009-10-30 11:49:00 +00002590 uint8_t* external_pointer,
2591 PretenureFlag pretenure) {
2592 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002593 Object* result;
2594 { MaybeObject* maybe_result =
2595 AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
2596 if (!maybe_result->ToObject(&result)) return maybe_result;
2597 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002598
2599 reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
2600 reinterpret_cast<PixelArray*>(result)->set_length(length);
2601 reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
2602
2603 return result;
2604}
2605
2606
John Reck59135872010-11-02 12:39:01 -07002607MaybeObject* Heap::AllocateExternalArray(int length,
2608 ExternalArrayType array_type,
2609 void* external_pointer,
2610 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002611 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002612 Object* result;
2613 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2614 space,
2615 OLD_DATA_SPACE);
2616 if (!maybe_result->ToObject(&result)) return maybe_result;
2617 }
Steve Block3ce2e202009-11-05 08:53:23 +00002618
2619 reinterpret_cast<ExternalArray*>(result)->set_map(
2620 MapForExternalArrayType(array_type));
2621 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2622 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2623 external_pointer);
2624
2625 return result;
2626}
2627
2628
John Reck59135872010-11-02 12:39:01 -07002629MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2630 Code::Flags flags,
2631 Handle<Object> self_reference) {
Leon Clarkeac952652010-07-15 11:15:24 +01002632 // Allocate ByteArray before the Code object, so that we do not risk
2633 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002634 Object* reloc_info;
2635 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2636 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2637 }
Leon Clarkeac952652010-07-15 11:15:24 +01002638
Steve Blocka7e24c12009-10-30 11:49:00 +00002639 // Compute size
Leon Clarkeac952652010-07-15 11:15:24 +01002640 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002641 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002642 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002643 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002644 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002645 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002646 } else {
John Reck59135872010-11-02 12:39:01 -07002647 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002648 }
2649
John Reck59135872010-11-02 12:39:01 -07002650 Object* result;
2651 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002652
2653 // Initialize the object
2654 HeapObject::cast(result)->set_map(code_map());
2655 Code* code = Code::cast(result);
2656 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2657 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002658 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002659 code->set_flags(flags);
2660 // Allow self references to created code object by patching the handle to
2661 // point to the newly allocated Code object.
2662 if (!self_reference.is_null()) {
2663 *(self_reference.location()) = code;
2664 }
2665 // Migrate generated code.
2666 // The generated code can contain Object** values (typically from handles)
2667 // that are dereferenced during the copy to point directly to the actual heap
2668 // objects. These pointers can include references to the code object itself,
2669 // through the self_reference parameter.
2670 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002671
2672#ifdef DEBUG
2673 code->Verify();
2674#endif
2675 return code;
2676}
2677
2678
John Reck59135872010-11-02 12:39:01 -07002679MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002680 // Allocate an object the same size as the code object.
2681 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002682 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002683 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002684 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002685 } else {
John Reck59135872010-11-02 12:39:01 -07002686 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002687 }
2688
John Reck59135872010-11-02 12:39:01 -07002689 Object* result;
2690 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002691
2692 // Copy code object.
2693 Address old_addr = code->address();
2694 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002695 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002696 // Relocate the copy.
2697 Code* new_code = Code::cast(result);
2698 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2699 new_code->Relocate(new_addr - old_addr);
2700 return new_code;
2701}
2702
2703
John Reck59135872010-11-02 12:39:01 -07002704MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002705 // Allocate ByteArray before the Code object, so that we do not risk
2706 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002707 Object* reloc_info_array;
2708 { MaybeObject* maybe_reloc_info_array =
2709 AllocateByteArray(reloc_info.length(), TENURED);
2710 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2711 return maybe_reloc_info_array;
2712 }
2713 }
Leon Clarkeac952652010-07-15 11:15:24 +01002714
2715 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002716
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002717 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002718
2719 Address old_addr = code->address();
2720
2721 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002722 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002723
John Reck59135872010-11-02 12:39:01 -07002724 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002725 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002726 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002727 } else {
John Reck59135872010-11-02 12:39:01 -07002728 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002729 }
2730
John Reck59135872010-11-02 12:39:01 -07002731 Object* result;
2732 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002733
2734 // Copy code object.
2735 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2736
2737 // Copy header and instructions.
2738 memcpy(new_addr, old_addr, relocation_offset);
2739
Steve Block6ded16b2010-05-10 14:33:55 +01002740 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002741 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002742
Leon Clarkeac952652010-07-15 11:15:24 +01002743 // Copy patched rinfo.
2744 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002745
2746 // Relocate the copy.
2747 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2748 new_code->Relocate(new_addr - old_addr);
2749
2750#ifdef DEBUG
2751 code->Verify();
2752#endif
2753 return new_code;
2754}
2755
2756
John Reck59135872010-11-02 12:39:01 -07002757MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002758 ASSERT(gc_state_ == NOT_IN_GC);
2759 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002760 // If allocation failures are disallowed, we may allocate in a different
2761 // space when new space is full and the object is not a large object.
2762 AllocationSpace retry_space =
2763 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002764 Object* result;
2765 { MaybeObject* maybe_result =
2766 AllocateRaw(map->instance_size(), space, retry_space);
2767 if (!maybe_result->ToObject(&result)) return maybe_result;
2768 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002769 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002770#ifdef ENABLE_LOGGING_AND_PROFILING
2771 ProducerHeapProfile::RecordJSObjectAllocation(result);
2772#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002773 return result;
2774}
2775
2776
John Reck59135872010-11-02 12:39:01 -07002777MaybeObject* Heap::InitializeFunction(JSFunction* function,
2778 SharedFunctionInfo* shared,
2779 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002780 ASSERT(!prototype->IsMap());
2781 function->initialize_properties();
2782 function->initialize_elements();
2783 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002784 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002785 function->set_prototype_or_initial_map(prototype);
2786 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002787 function->set_literals(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002788 return function;
2789}
2790
2791
John Reck59135872010-11-02 12:39:01 -07002792MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002793 // Allocate the prototype. Make sure to use the object function
2794 // from the function's context, since the function can be from a
2795 // different context.
2796 JSFunction* object_function =
2797 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002798 Object* prototype;
2799 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2800 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2801 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002802 // When creating the prototype for the function we must set its
2803 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002804 Object* result;
2805 { MaybeObject* maybe_result =
2806 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
2807 function,
2808 DONT_ENUM);
2809 if (!maybe_result->ToObject(&result)) return maybe_result;
2810 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002811 return prototype;
2812}
2813
2814
John Reck59135872010-11-02 12:39:01 -07002815MaybeObject* Heap::AllocateFunction(Map* function_map,
2816 SharedFunctionInfo* shared,
2817 Object* prototype,
2818 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002819 AllocationSpace space =
2820 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002821 Object* result;
2822 { MaybeObject* maybe_result = Allocate(function_map, space);
2823 if (!maybe_result->ToObject(&result)) return maybe_result;
2824 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002825 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2826}
2827
2828
John Reck59135872010-11-02 12:39:01 -07002829MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002830 // To get fast allocation and map sharing for arguments objects we
2831 // allocate them based on an arguments boilerplate.
2832
2833 // This calls Copy directly rather than using Heap::AllocateRaw so we
2834 // duplicate the check here.
2835 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2836
2837 JSObject* boilerplate =
2838 Top::context()->global_context()->arguments_boilerplate();
2839
Leon Clarkee46be812010-01-19 14:06:41 +00002840 // Check that the size of the boilerplate matches our
2841 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2842 // on the size being a known constant.
2843 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2844
2845 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002846 Object* result;
2847 { MaybeObject* maybe_result =
2848 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
2849 if (!maybe_result->ToObject(&result)) return maybe_result;
2850 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002851
2852 // Copy the content. The arguments boilerplate doesn't have any
2853 // fields that point to new space so it's safe to skip the write
2854 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002855 CopyBlock(HeapObject::cast(result)->address(),
2856 boilerplate->address(),
Leon Clarkee46be812010-01-19 14:06:41 +00002857 kArgumentsObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002858
2859 // Set the two properties.
2860 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2861 callee);
2862 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2863 Smi::FromInt(length),
2864 SKIP_WRITE_BARRIER);
2865
2866 // Check the state of the object
2867 ASSERT(JSObject::cast(result)->HasFastProperties());
2868 ASSERT(JSObject::cast(result)->HasFastElements());
2869
2870 return result;
2871}
2872
2873
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002874static bool HasDuplicates(DescriptorArray* descriptors) {
2875 int count = descriptors->number_of_descriptors();
2876 if (count > 1) {
2877 String* prev_key = descriptors->GetKey(0);
2878 for (int i = 1; i != count; i++) {
2879 String* current_key = descriptors->GetKey(i);
2880 if (prev_key == current_key) return true;
2881 prev_key = current_key;
2882 }
2883 }
2884 return false;
2885}
2886
2887
John Reck59135872010-11-02 12:39:01 -07002888MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002889 ASSERT(!fun->has_initial_map());
2890
2891 // First create a new map with the size and number of in-object properties
2892 // suggested by the function.
2893 int instance_size = fun->shared()->CalculateInstanceSize();
2894 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07002895 Object* map_obj;
2896 { MaybeObject* maybe_map_obj =
2897 Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
2898 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
2899 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002900
2901 // Fetch or allocate prototype.
2902 Object* prototype;
2903 if (fun->has_instance_prototype()) {
2904 prototype = fun->instance_prototype();
2905 } else {
John Reck59135872010-11-02 12:39:01 -07002906 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
2907 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2908 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002909 }
2910 Map* map = Map::cast(map_obj);
2911 map->set_inobject_properties(in_object_properties);
2912 map->set_unused_property_fields(in_object_properties);
2913 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01002914 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00002915
Andrei Popescu402d9372010-02-26 13:31:12 +00002916 // If the function has only simple this property assignments add
2917 // field descriptors for these to the initial map as the object
2918 // cannot be constructed without having these properties. Guard by
2919 // the inline_new flag so we only change the map if we generate a
2920 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00002921 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00002922 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002923 int count = fun->shared()->this_property_assignments_count();
2924 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002925 // Inline constructor can only handle inobject properties.
2926 fun->shared()->ForbidInlineConstructor();
2927 } else {
John Reck59135872010-11-02 12:39:01 -07002928 Object* descriptors_obj;
2929 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
2930 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
2931 return maybe_descriptors_obj;
2932 }
2933 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002934 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
2935 for (int i = 0; i < count; i++) {
2936 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
2937 ASSERT(name->IsSymbol());
2938 FieldDescriptor field(name, i, NONE);
2939 field.SetEnumerationIndex(i);
2940 descriptors->Set(i, &field);
2941 }
2942 descriptors->SetNextEnumerationIndex(count);
2943 descriptors->SortUnchecked();
2944
2945 // The descriptors may contain duplicates because the compiler does not
2946 // guarantee the uniqueness of property names (it would have required
2947 // quadratic time). Once the descriptors are sorted we can check for
2948 // duplicates in linear time.
2949 if (HasDuplicates(descriptors)) {
2950 fun->shared()->ForbidInlineConstructor();
2951 } else {
2952 map->set_instance_descriptors(descriptors);
2953 map->set_pre_allocated_property_fields(count);
2954 map->set_unused_property_fields(in_object_properties - count);
2955 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002956 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002957 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002958
2959 fun->shared()->StartInobjectSlackTracking(map);
2960
Steve Blocka7e24c12009-10-30 11:49:00 +00002961 return map;
2962}
2963
2964
2965void Heap::InitializeJSObjectFromMap(JSObject* obj,
2966 FixedArray* properties,
2967 Map* map) {
2968 obj->set_properties(properties);
2969 obj->initialize_elements();
2970 // TODO(1240798): Initialize the object's body using valid initial values
2971 // according to the object's initial map. For example, if the map's
2972 // instance type is JS_ARRAY_TYPE, the length field should be initialized
2973 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
2974 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
2975 // verification code has to cope with (temporarily) invalid objects. See
2976 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002977 Object* filler;
2978 // We cannot always fill with one_pointer_filler_map because objects
2979 // created from API functions expect their internal fields to be initialized
2980 // with undefined_value.
2981 if (map->constructor()->IsJSFunction() &&
2982 JSFunction::cast(map->constructor())->shared()->
2983 IsInobjectSlackTrackingInProgress()) {
2984 // We might want to shrink the object later.
2985 ASSERT(obj->GetInternalFieldCount() == 0);
2986 filler = Heap::one_pointer_filler_map();
2987 } else {
2988 filler = Heap::undefined_value();
2989 }
2990 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00002991}
2992
2993
John Reck59135872010-11-02 12:39:01 -07002994MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002995 // JSFunctions should be allocated using AllocateFunction to be
2996 // properly initialized.
2997 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
2998
Steve Block8defd9f2010-07-08 12:39:36 +01002999 // Both types of global objects should be allocated using
3000 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003001 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3002 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3003
3004 // Allocate the backing storage for the properties.
3005 int prop_size =
3006 map->pre_allocated_property_fields() +
3007 map->unused_property_fields() -
3008 map->inobject_properties();
3009 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003010 Object* properties;
3011 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3012 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3013 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003014
3015 // Allocate the JSObject.
3016 AllocationSpace space =
3017 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3018 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003019 Object* obj;
3020 { MaybeObject* maybe_obj = Allocate(map, space);
3021 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3022 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003023
3024 // Initialize the JSObject.
3025 InitializeJSObjectFromMap(JSObject::cast(obj),
3026 FixedArray::cast(properties),
3027 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003028 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003029 return obj;
3030}
3031
3032
John Reck59135872010-11-02 12:39:01 -07003033MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3034 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003035 // Allocate the initial map if absent.
3036 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003037 Object* initial_map;
3038 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3039 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3040 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003041 constructor->set_initial_map(Map::cast(initial_map));
3042 Map::cast(initial_map)->set_constructor(constructor);
3043 }
3044 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003045 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003046 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003047#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003048 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003049 Object* non_failure;
3050 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3051#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003052 return result;
3053}
3054
3055
John Reck59135872010-11-02 12:39:01 -07003056MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003057 ASSERT(constructor->has_initial_map());
3058 Map* map = constructor->initial_map();
3059
3060 // Make sure no field properties are described in the initial map.
3061 // This guarantees us that normalizing the properties does not
3062 // require us to change property values to JSGlobalPropertyCells.
3063 ASSERT(map->NextFreePropertyIndex() == 0);
3064
3065 // Make sure we don't have a ton of pre-allocated slots in the
3066 // global objects. They will be unused once we normalize the object.
3067 ASSERT(map->unused_property_fields() == 0);
3068 ASSERT(map->inobject_properties() == 0);
3069
3070 // Initial size of the backing store to avoid resize of the storage during
3071 // bootstrapping. The size differs between the JS global object ad the
3072 // builtins object.
3073 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3074
3075 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003076 Object* obj;
3077 { MaybeObject* maybe_obj =
3078 StringDictionary::Allocate(
3079 map->NumberOfDescribedProperties() * 2 + initial_size);
3080 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3081 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003082 StringDictionary* dictionary = StringDictionary::cast(obj);
3083
3084 // The global object might be created from an object template with accessors.
3085 // Fill these accessors into the dictionary.
3086 DescriptorArray* descs = map->instance_descriptors();
3087 for (int i = 0; i < descs->number_of_descriptors(); i++) {
3088 PropertyDetails details = descs->GetDetails(i);
3089 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3090 PropertyDetails d =
3091 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3092 Object* value = descs->GetCallbacksObject(i);
John Reck59135872010-11-02 12:39:01 -07003093 { MaybeObject* maybe_value = Heap::AllocateJSGlobalPropertyCell(value);
3094 if (!maybe_value->ToObject(&value)) return maybe_value;
3095 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003096
John Reck59135872010-11-02 12:39:01 -07003097 Object* result;
3098 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3099 if (!maybe_result->ToObject(&result)) return maybe_result;
3100 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003101 dictionary = StringDictionary::cast(result);
3102 }
3103
3104 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003105 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3106 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3107 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003108 JSObject* global = JSObject::cast(obj);
3109 InitializeJSObjectFromMap(global, dictionary, map);
3110
3111 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003112 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3113 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3114 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003115 Map* new_map = Map::cast(obj);
3116
3117 // Setup the global object as a normalized object.
3118 global->set_map(new_map);
3119 global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
3120 global->set_properties(dictionary);
3121
3122 // Make sure result is a global object with properties in dictionary.
3123 ASSERT(global->IsGlobalObject());
3124 ASSERT(!global->HasFastProperties());
3125 return global;
3126}
3127
3128
John Reck59135872010-11-02 12:39:01 -07003129MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003130 // Never used to copy functions. If functions need to be copied we
3131 // have to be careful to clear the literals array.
3132 ASSERT(!source->IsJSFunction());
3133
3134 // Make the clone.
3135 Map* map = source->map();
3136 int object_size = map->instance_size();
3137 Object* clone;
3138
3139 // If we're forced to always allocate, we use the general allocation
3140 // functions which may leave us with an object in old space.
3141 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003142 { MaybeObject* maybe_clone =
3143 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3144 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3145 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003146 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003147 CopyBlock(clone_address,
3148 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003149 object_size);
3150 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003151 RecordWrites(clone_address,
3152 JSObject::kHeaderSize,
3153 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003154 } else {
John Reck59135872010-11-02 12:39:01 -07003155 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3156 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3157 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003158 ASSERT(Heap::InNewSpace(clone));
3159 // Since we know the clone is allocated in new space, we can copy
3160 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003161 CopyBlock(HeapObject::cast(clone)->address(),
3162 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003163 object_size);
3164 }
3165
3166 FixedArray* elements = FixedArray::cast(source->elements());
3167 FixedArray* properties = FixedArray::cast(source->properties());
3168 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003169 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003170 Object* elem;
3171 { MaybeObject* maybe_elem =
3172 (elements->map() == fixed_cow_array_map()) ?
3173 elements : CopyFixedArray(elements);
3174 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3175 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003176 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3177 }
3178 // Update properties if necessary.
3179 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003180 Object* prop;
3181 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3182 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3183 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003184 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3185 }
3186 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003187#ifdef ENABLE_LOGGING_AND_PROFILING
3188 ProducerHeapProfile::RecordJSObjectAllocation(clone);
3189#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003190 return clone;
3191}
3192
3193
John Reck59135872010-11-02 12:39:01 -07003194MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3195 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003196 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003197 Map* map = constructor->initial_map();
3198
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003199 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003200 // objects allocated using the constructor.
3201 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003202 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003203
3204 // Allocate the backing storage for the properties.
3205 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003206 Object* properties;
3207 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3208 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3209 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003210
3211 // Reset the map for the object.
3212 object->set_map(constructor->initial_map());
3213
3214 // Reinitialize the object from the constructor map.
3215 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3216 return object;
3217}
3218
3219
John Reck59135872010-11-02 12:39:01 -07003220MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3221 PretenureFlag pretenure) {
3222 Object* result;
3223 { MaybeObject* maybe_result =
3224 AllocateRawAsciiString(string.length(), pretenure);
3225 if (!maybe_result->ToObject(&result)) return maybe_result;
3226 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003227
3228 // Copy the characters into the new object.
3229 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3230 for (int i = 0; i < string.length(); i++) {
3231 string_result->SeqAsciiStringSet(i, string[i]);
3232 }
3233 return result;
3234}
3235
3236
John Reck59135872010-11-02 12:39:01 -07003237MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> string,
3238 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003239 // V8 only supports characters in the Basic Multilingual Plane.
3240 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003241 // Count the number of characters in the UTF-8 string and check if
3242 // it is an ASCII string.
3243 Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
3244 decoder->Reset(string.start(), string.length());
3245 int chars = 0;
3246 bool is_ascii = true;
3247 while (decoder->has_more()) {
3248 uc32 r = decoder->GetNext();
3249 if (r > String::kMaxAsciiCharCode) is_ascii = false;
3250 chars++;
3251 }
3252
3253 // If the string is ascii, we do not need to convert the characters
3254 // since UTF8 is backwards compatible with ascii.
3255 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
3256
John Reck59135872010-11-02 12:39:01 -07003257 Object* result;
3258 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3259 if (!maybe_result->ToObject(&result)) return maybe_result;
3260 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003261
3262 // Convert and copy the characters into the new object.
3263 String* string_result = String::cast(result);
3264 decoder->Reset(string.start(), string.length());
3265 for (int i = 0; i < chars; i++) {
3266 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003267 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003268 string_result->Set(i, r);
3269 }
3270 return result;
3271}
3272
3273
John Reck59135872010-11-02 12:39:01 -07003274MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3275 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003276 // Check if the string is an ASCII string.
3277 int i = 0;
3278 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
3279
John Reck59135872010-11-02 12:39:01 -07003280 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003281 if (i == string.length()) { // It's an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003282 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003283 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003284 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003285 }
John Reck59135872010-11-02 12:39:01 -07003286 Object* result;
3287 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003288
3289 // Copy the characters into the new object, which may be either ASCII or
3290 // UTF-16.
3291 String* string_result = String::cast(result);
3292 for (int i = 0; i < string.length(); i++) {
3293 string_result->Set(i, string[i]);
3294 }
3295 return result;
3296}
3297
3298
3299Map* Heap::SymbolMapForString(String* string) {
3300 // If the string is in new space it cannot be used as a symbol.
3301 if (InNewSpace(string)) return NULL;
3302
3303 // Find the corresponding symbol map for strings.
3304 Map* map = string->map();
Steve Blockd0582a62009-12-15 09:54:21 +00003305 if (map == ascii_string_map()) return ascii_symbol_map();
3306 if (map == string_map()) return symbol_map();
3307 if (map == cons_string_map()) return cons_symbol_map();
3308 if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
3309 if (map == external_string_map()) return external_symbol_map();
3310 if (map == external_ascii_string_map()) return external_ascii_symbol_map();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003311 if (map == external_string_with_ascii_data_map()) {
3312 return external_symbol_with_ascii_data_map();
3313 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003314
3315 // No match found.
3316 return NULL;
3317}
3318
3319
John Reck59135872010-11-02 12:39:01 -07003320MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3321 int chars,
3322 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003323 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003324 // Ensure the chars matches the number of characters in the buffer.
3325 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3326 // Determine whether the string is ascii.
3327 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003328 while (buffer->has_more()) {
3329 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3330 is_ascii = false;
3331 break;
3332 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003333 }
3334 buffer->Rewind();
3335
3336 // Compute map and object size.
3337 int size;
3338 Map* map;
3339
3340 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003341 if (chars > SeqAsciiString::kMaxLength) {
3342 return Failure::OutOfMemoryException();
3343 }
Steve Blockd0582a62009-12-15 09:54:21 +00003344 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003345 size = SeqAsciiString::SizeFor(chars);
3346 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003347 if (chars > SeqTwoByteString::kMaxLength) {
3348 return Failure::OutOfMemoryException();
3349 }
Steve Blockd0582a62009-12-15 09:54:21 +00003350 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003351 size = SeqTwoByteString::SizeFor(chars);
3352 }
3353
3354 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003355 Object* result;
3356 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3357 ? lo_space_->AllocateRaw(size)
3358 : old_data_space_->AllocateRaw(size);
3359 if (!maybe_result->ToObject(&result)) return maybe_result;
3360 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003361
3362 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003363 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003364 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003365 answer->set_length(chars);
3366 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003367
3368 ASSERT_EQ(size, answer->Size());
3369
3370 // Fill in the characters.
3371 for (int i = 0; i < chars; i++) {
3372 answer->Set(i, buffer->GetNext());
3373 }
3374 return answer;
3375}
3376
3377
John Reck59135872010-11-02 12:39:01 -07003378MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003379 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3380 return Failure::OutOfMemoryException();
3381 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003382
3383 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003384 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003385
Leon Clarkee46be812010-01-19 14:06:41 +00003386 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3387 AllocationSpace retry_space = OLD_DATA_SPACE;
3388
Steve Blocka7e24c12009-10-30 11:49:00 +00003389 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003390 if (size > kMaxObjectSizeInNewSpace) {
3391 // Allocate in large object space, retry space will be ignored.
3392 space = LO_SPACE;
3393 } else if (size > MaxObjectSizeInPagedSpace()) {
3394 // Allocate in new space, retry in large object space.
3395 retry_space = LO_SPACE;
3396 }
3397 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3398 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003399 }
John Reck59135872010-11-02 12:39:01 -07003400 Object* result;
3401 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3402 if (!maybe_result->ToObject(&result)) return maybe_result;
3403 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003404
Steve Blocka7e24c12009-10-30 11:49:00 +00003405 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003406 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003407 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003408 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003409 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3410 return result;
3411}
3412
3413
John Reck59135872010-11-02 12:39:01 -07003414MaybeObject* Heap::AllocateRawTwoByteString(int length,
3415 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003416 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3417 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003418 }
Leon Clarkee46be812010-01-19 14:06:41 +00003419 int size = SeqTwoByteString::SizeFor(length);
3420 ASSERT(size <= SeqTwoByteString::kMaxSize);
3421 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3422 AllocationSpace retry_space = OLD_DATA_SPACE;
3423
3424 if (space == NEW_SPACE) {
3425 if (size > kMaxObjectSizeInNewSpace) {
3426 // Allocate in large object space, retry space will be ignored.
3427 space = LO_SPACE;
3428 } else if (size > MaxObjectSizeInPagedSpace()) {
3429 // Allocate in new space, retry in large object space.
3430 retry_space = LO_SPACE;
3431 }
3432 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3433 space = LO_SPACE;
3434 }
John Reck59135872010-11-02 12:39:01 -07003435 Object* result;
3436 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3437 if (!maybe_result->ToObject(&result)) return maybe_result;
3438 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003439
Steve Blocka7e24c12009-10-30 11:49:00 +00003440 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003441 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003442 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003443 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003444 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3445 return result;
3446}
3447
3448
John Reck59135872010-11-02 12:39:01 -07003449MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003450 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003451 Object* result;
3452 { MaybeObject* maybe_result =
3453 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3454 if (!maybe_result->ToObject(&result)) return maybe_result;
3455 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003456 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003457 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3458 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003459 return result;
3460}
3461
3462
John Reck59135872010-11-02 12:39:01 -07003463MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003464 if (length < 0 || length > FixedArray::kMaxLength) {
3465 return Failure::OutOfMemoryException();
3466 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003467 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003468 // Use the general function if we're forced to always allocate.
3469 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3470 // Allocate the raw data for a fixed array.
3471 int size = FixedArray::SizeFor(length);
3472 return size <= kMaxObjectSizeInNewSpace
3473 ? new_space_.AllocateRaw(size)
3474 : lo_space_->AllocateRawFixedArray(size);
3475}
3476
3477
John Reck59135872010-11-02 12:39:01 -07003478MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003479 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003480 Object* obj;
3481 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3482 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3483 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003484 if (Heap::InNewSpace(obj)) {
3485 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003486 dst->set_map(map);
3487 CopyBlock(dst->address() + kPointerSize,
3488 src->address() + kPointerSize,
3489 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003490 return obj;
3491 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003492 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003493 FixedArray* result = FixedArray::cast(obj);
3494 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003495
Steve Blocka7e24c12009-10-30 11:49:00 +00003496 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003497 AssertNoAllocation no_gc;
3498 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003499 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3500 return result;
3501}
3502
3503
John Reck59135872010-11-02 12:39:01 -07003504MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003505 ASSERT(length >= 0);
3506 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003507 Object* result;
3508 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3509 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003510 }
John Reck59135872010-11-02 12:39:01 -07003511 // Initialize header.
3512 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3513 array->set_map(fixed_array_map());
3514 array->set_length(length);
3515 // Initialize body.
3516 ASSERT(!Heap::InNewSpace(undefined_value()));
3517 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003518 return result;
3519}
3520
3521
John Reck59135872010-11-02 12:39:01 -07003522MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003523 if (length < 0 || length > FixedArray::kMaxLength) {
3524 return Failure::OutOfMemoryException();
3525 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003526
Leon Clarkee46be812010-01-19 14:06:41 +00003527 AllocationSpace space =
3528 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003529 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003530 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3531 // Too big for new space.
3532 space = LO_SPACE;
3533 } else if (space == OLD_POINTER_SPACE &&
3534 size > MaxObjectSizeInPagedSpace()) {
3535 // Too big for old pointer space.
3536 space = LO_SPACE;
3537 }
3538
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003539 AllocationSpace retry_space =
3540 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3541
3542 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003543}
3544
3545
John Reck59135872010-11-02 12:39:01 -07003546MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
3547 int length,
3548 PretenureFlag pretenure,
3549 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003550 ASSERT(length >= 0);
3551 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
3552 if (length == 0) return Heap::empty_fixed_array();
3553
3554 ASSERT(!Heap::InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003555 Object* result;
3556 { MaybeObject* maybe_result = Heap::AllocateRawFixedArray(length, pretenure);
3557 if (!maybe_result->ToObject(&result)) return maybe_result;
3558 }
Steve Block6ded16b2010-05-10 14:33:55 +01003559
3560 HeapObject::cast(result)->set_map(Heap::fixed_array_map());
3561 FixedArray* array = FixedArray::cast(result);
3562 array->set_length(length);
3563 MemsetPointer(array->data_start(), filler, length);
3564 return array;
3565}
3566
3567
John Reck59135872010-11-02 12:39:01 -07003568MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003569 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3570}
3571
3572
John Reck59135872010-11-02 12:39:01 -07003573MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3574 PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003575 return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
3576}
3577
3578
John Reck59135872010-11-02 12:39:01 -07003579MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003580 if (length == 0) return empty_fixed_array();
3581
John Reck59135872010-11-02 12:39:01 -07003582 Object* obj;
3583 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3584 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3585 }
Steve Block6ded16b2010-05-10 14:33:55 +01003586
3587 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3588 FixedArray::cast(obj)->set_length(length);
3589 return obj;
3590}
3591
3592
John Reck59135872010-11-02 12:39:01 -07003593MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3594 Object* result;
3595 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length, pretenure);
3596 if (!maybe_result->ToObject(&result)) return maybe_result;
3597 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003598 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003599 ASSERT(result->IsHashTable());
3600 return result;
3601}
3602
3603
John Reck59135872010-11-02 12:39:01 -07003604MaybeObject* Heap::AllocateGlobalContext() {
3605 Object* result;
3606 { MaybeObject* maybe_result =
3607 Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3608 if (!maybe_result->ToObject(&result)) return maybe_result;
3609 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003610 Context* context = reinterpret_cast<Context*>(result);
3611 context->set_map(global_context_map());
3612 ASSERT(context->IsGlobalContext());
3613 ASSERT(result->IsContext());
3614 return result;
3615}
3616
3617
John Reck59135872010-11-02 12:39:01 -07003618MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003619 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003620 Object* result;
3621 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length);
3622 if (!maybe_result->ToObject(&result)) return maybe_result;
3623 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003624 Context* context = reinterpret_cast<Context*>(result);
3625 context->set_map(context_map());
3626 context->set_closure(function);
3627 context->set_fcontext(context);
3628 context->set_previous(NULL);
3629 context->set_extension(NULL);
3630 context->set_global(function->context()->global());
3631 ASSERT(!context->IsGlobalContext());
3632 ASSERT(context->is_function_context());
3633 ASSERT(result->IsContext());
3634 return result;
3635}
3636
3637
John Reck59135872010-11-02 12:39:01 -07003638MaybeObject* Heap::AllocateWithContext(Context* previous,
3639 JSObject* extension,
3640 bool is_catch_context) {
3641 Object* result;
3642 { MaybeObject* maybe_result =
3643 Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3644 if (!maybe_result->ToObject(&result)) return maybe_result;
3645 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003646 Context* context = reinterpret_cast<Context*>(result);
3647 context->set_map(is_catch_context ? catch_context_map() : context_map());
3648 context->set_closure(previous->closure());
3649 context->set_fcontext(previous->fcontext());
3650 context->set_previous(previous);
3651 context->set_extension(extension);
3652 context->set_global(previous->global());
3653 ASSERT(!context->IsGlobalContext());
3654 ASSERT(!context->is_function_context());
3655 ASSERT(result->IsContext());
3656 return result;
3657}
3658
3659
John Reck59135872010-11-02 12:39:01 -07003660MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003661 Map* map;
3662 switch (type) {
3663#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
3664STRUCT_LIST(MAKE_CASE)
3665#undef MAKE_CASE
3666 default:
3667 UNREACHABLE();
3668 return Failure::InternalError();
3669 }
3670 int size = map->instance_size();
3671 AllocationSpace space =
3672 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003673 Object* result;
3674 { MaybeObject* maybe_result = Heap::Allocate(map, space);
3675 if (!maybe_result->ToObject(&result)) return maybe_result;
3676 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003677 Struct::cast(result)->InitializeBody(size);
3678 return result;
3679}
3680
3681
3682bool Heap::IdleNotification() {
3683 static const int kIdlesBeforeScavenge = 4;
3684 static const int kIdlesBeforeMarkSweep = 7;
3685 static const int kIdlesBeforeMarkCompact = 8;
3686 static int number_idle_notifications = 0;
3687 static int last_gc_count = gc_count_;
3688
Steve Block6ded16b2010-05-10 14:33:55 +01003689 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003690 bool finished = false;
3691
3692 if (last_gc_count == gc_count_) {
3693 number_idle_notifications++;
3694 } else {
3695 number_idle_notifications = 0;
3696 last_gc_count = gc_count_;
3697 }
3698
3699 if (number_idle_notifications == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003700 if (contexts_disposed_ > 0) {
3701 HistogramTimerScope scope(&Counters::gc_context);
3702 CollectAllGarbage(false);
3703 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003704 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003705 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003706 new_space_.Shrink();
3707 last_gc_count = gc_count_;
3708
3709 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003710 // Before doing the mark-sweep collections we clear the
3711 // compilation cache to avoid hanging on to source code and
3712 // generated code for cached functions.
3713 CompilationCache::Clear();
3714
Steve Blocka7e24c12009-10-30 11:49:00 +00003715 CollectAllGarbage(false);
3716 new_space_.Shrink();
3717 last_gc_count = gc_count_;
3718
3719 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
3720 CollectAllGarbage(true);
3721 new_space_.Shrink();
3722 last_gc_count = gc_count_;
3723 number_idle_notifications = 0;
3724 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003725
3726 } else if (contexts_disposed_ > 0) {
3727 if (FLAG_expose_gc) {
3728 contexts_disposed_ = 0;
3729 } else {
3730 HistogramTimerScope scope(&Counters::gc_context);
3731 CollectAllGarbage(false);
3732 last_gc_count = gc_count_;
3733 }
3734 // If this is the first idle notification, we reset the
3735 // notification count to avoid letting idle notifications for
3736 // context disposal garbage collections start a potentially too
3737 // aggressive idle GC cycle.
3738 if (number_idle_notifications <= 1) {
3739 number_idle_notifications = 0;
3740 uncommit = false;
3741 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003742 }
3743
Steve Block6ded16b2010-05-10 14:33:55 +01003744 // Make sure that we have no pending context disposals and
3745 // conditionally uncommit from space.
3746 ASSERT(contexts_disposed_ == 0);
3747 if (uncommit) Heap::UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003748 return finished;
3749}
3750
3751
3752#ifdef DEBUG
3753
3754void Heap::Print() {
3755 if (!HasBeenSetup()) return;
3756 Top::PrintStack();
3757 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003758 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3759 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003760}
3761
3762
3763void Heap::ReportCodeStatistics(const char* title) {
3764 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3765 PagedSpace::ResetCodeStatistics();
3766 // We do not look for code in new space, map space, or old space. If code
3767 // somehow ends up in those spaces, we would miss it here.
3768 code_space_->CollectCodeStatistics();
3769 lo_space_->CollectCodeStatistics();
3770 PagedSpace::ReportCodeStatistics();
3771}
3772
3773
3774// This function expects that NewSpace's allocated objects histogram is
3775// populated (via a call to CollectStatistics or else as a side effect of a
3776// just-completed scavenge collection).
3777void Heap::ReportHeapStatistics(const char* title) {
3778 USE(title);
3779 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3780 title, gc_count_);
3781 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003782 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3783 old_gen_promotion_limit_);
3784 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3785 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003786
3787 PrintF("\n");
3788 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3789 GlobalHandles::PrintStats();
3790 PrintF("\n");
3791
3792 PrintF("Heap statistics : ");
3793 MemoryAllocator::ReportStatistics();
3794 PrintF("To space : ");
3795 new_space_.ReportStatistics();
3796 PrintF("Old pointer space : ");
3797 old_pointer_space_->ReportStatistics();
3798 PrintF("Old data space : ");
3799 old_data_space_->ReportStatistics();
3800 PrintF("Code space : ");
3801 code_space_->ReportStatistics();
3802 PrintF("Map space : ");
3803 map_space_->ReportStatistics();
3804 PrintF("Cell space : ");
3805 cell_space_->ReportStatistics();
3806 PrintF("Large object space : ");
3807 lo_space_->ReportStatistics();
3808 PrintF(">>>>>> ========================================= >>>>>>\n");
3809}
3810
3811#endif // DEBUG
3812
3813bool Heap::Contains(HeapObject* value) {
3814 return Contains(value->address());
3815}
3816
3817
3818bool Heap::Contains(Address addr) {
3819 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3820 return HasBeenSetup() &&
3821 (new_space_.ToSpaceContains(addr) ||
3822 old_pointer_space_->Contains(addr) ||
3823 old_data_space_->Contains(addr) ||
3824 code_space_->Contains(addr) ||
3825 map_space_->Contains(addr) ||
3826 cell_space_->Contains(addr) ||
3827 lo_space_->SlowContains(addr));
3828}
3829
3830
3831bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3832 return InSpace(value->address(), space);
3833}
3834
3835
3836bool Heap::InSpace(Address addr, AllocationSpace space) {
3837 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3838 if (!HasBeenSetup()) return false;
3839
3840 switch (space) {
3841 case NEW_SPACE:
3842 return new_space_.ToSpaceContains(addr);
3843 case OLD_POINTER_SPACE:
3844 return old_pointer_space_->Contains(addr);
3845 case OLD_DATA_SPACE:
3846 return old_data_space_->Contains(addr);
3847 case CODE_SPACE:
3848 return code_space_->Contains(addr);
3849 case MAP_SPACE:
3850 return map_space_->Contains(addr);
3851 case CELL_SPACE:
3852 return cell_space_->Contains(addr);
3853 case LO_SPACE:
3854 return lo_space_->SlowContains(addr);
3855 }
3856
3857 return false;
3858}
3859
3860
3861#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003862static void DummyScavengePointer(HeapObject** p) {
3863}
3864
3865
3866static void VerifyPointersUnderWatermark(
3867 PagedSpace* space,
3868 DirtyRegionCallback visit_dirty_region) {
3869 PageIterator it(space, PageIterator::PAGES_IN_USE);
3870
3871 while (it.has_next()) {
3872 Page* page = it.next();
3873 Address start = page->ObjectAreaStart();
3874 Address end = page->AllocationWatermark();
3875
3876 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
3877 start,
3878 end,
3879 visit_dirty_region,
3880 &DummyScavengePointer);
3881 }
3882}
3883
3884
3885static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
3886 LargeObjectIterator it(space);
3887 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3888 if (object->IsFixedArray()) {
3889 Address slot_address = object->address();
3890 Address end = object->address() + object->Size();
3891
3892 while (slot_address < end) {
3893 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
3894 // When we are not in GC the Heap::InNewSpace() predicate
3895 // checks that pointers which satisfy predicate point into
3896 // the active semispace.
3897 Heap::InNewSpace(*slot);
3898 slot_address += kPointerSize;
3899 }
3900 }
3901 }
3902}
3903
3904
Steve Blocka7e24c12009-10-30 11:49:00 +00003905void Heap::Verify() {
3906 ASSERT(HasBeenSetup());
3907
3908 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00003909 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00003910
3911 new_space_.Verify();
3912
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003913 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
3914 old_pointer_space_->Verify(&dirty_regions_visitor);
3915 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00003916
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003917 VerifyPointersUnderWatermark(old_pointer_space_,
3918 &IteratePointersInDirtyRegion);
3919 VerifyPointersUnderWatermark(map_space_,
3920 &IteratePointersInDirtyMapsRegion);
3921 VerifyPointersUnderWatermark(lo_space_);
3922
3923 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
3924 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
3925
3926 VerifyPointersVisitor no_dirty_regions_visitor;
3927 old_data_space_->Verify(&no_dirty_regions_visitor);
3928 code_space_->Verify(&no_dirty_regions_visitor);
3929 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00003930
3931 lo_space_->Verify();
3932}
3933#endif // DEBUG
3934
3935
John Reck59135872010-11-02 12:39:01 -07003936MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003937 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07003938 Object* new_table;
3939 { MaybeObject* maybe_new_table =
3940 symbol_table()->LookupSymbol(string, &symbol);
3941 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
3942 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003943 // Can't use set_symbol_table because SymbolTable::cast knows that
3944 // SymbolTable is a singleton and checks for identity.
3945 roots_[kSymbolTableRootIndex] = new_table;
3946 ASSERT(symbol != NULL);
3947 return symbol;
3948}
3949
3950
John Reck59135872010-11-02 12:39:01 -07003951MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003952 if (string->IsSymbol()) return string;
3953 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07003954 Object* new_table;
3955 { MaybeObject* maybe_new_table =
3956 symbol_table()->LookupString(string, &symbol);
3957 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
3958 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003959 // Can't use set_symbol_table because SymbolTable::cast knows that
3960 // SymbolTable is a singleton and checks for identity.
3961 roots_[kSymbolTableRootIndex] = new_table;
3962 ASSERT(symbol != NULL);
3963 return symbol;
3964}
3965
3966
3967bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
3968 if (string->IsSymbol()) {
3969 *symbol = string;
3970 return true;
3971 }
3972 return symbol_table()->LookupSymbolIfExists(string, symbol);
3973}
3974
3975
3976#ifdef DEBUG
3977void Heap::ZapFromSpace() {
3978 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
3979 for (Address a = new_space_.FromSpaceLow();
3980 a < new_space_.FromSpaceHigh();
3981 a += kPointerSize) {
3982 Memory::Address_at(a) = kFromSpaceZapValue;
3983 }
3984}
3985#endif // DEBUG
3986
3987
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003988bool Heap::IteratePointersInDirtyRegion(Address start,
3989 Address end,
3990 ObjectSlotCallback copy_object_func) {
3991 Address slot_address = start;
3992 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00003993
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003994 while (slot_address < end) {
3995 Object** slot = reinterpret_cast<Object**>(slot_address);
3996 if (Heap::InNewSpace(*slot)) {
3997 ASSERT((*slot)->IsHeapObject());
3998 copy_object_func(reinterpret_cast<HeapObject**>(slot));
3999 if (Heap::InNewSpace(*slot)) {
4000 ASSERT((*slot)->IsHeapObject());
4001 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004002 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004003 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004004 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004005 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004006 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004007}
4008
4009
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004010// Compute start address of the first map following given addr.
4011static inline Address MapStartAlign(Address addr) {
4012 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4013 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4014}
Steve Blocka7e24c12009-10-30 11:49:00 +00004015
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004016
4017// Compute end address of the first map preceding given addr.
4018static inline Address MapEndAlign(Address addr) {
4019 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4020 return page + ((addr - page) / Map::kSize * Map::kSize);
4021}
4022
4023
4024static bool IteratePointersInDirtyMaps(Address start,
4025 Address end,
4026 ObjectSlotCallback copy_object_func) {
4027 ASSERT(MapStartAlign(start) == start);
4028 ASSERT(MapEndAlign(end) == end);
4029
4030 Address map_address = start;
4031 bool pointers_to_new_space_found = false;
4032
4033 while (map_address < end) {
4034 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
4035 ASSERT(Memory::Object_at(map_address)->IsMap());
4036
4037 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4038 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4039
4040 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
4041 pointer_fields_end,
4042 copy_object_func)) {
4043 pointers_to_new_space_found = true;
4044 }
4045
4046 map_address += Map::kSize;
4047 }
4048
4049 return pointers_to_new_space_found;
4050}
4051
4052
4053bool Heap::IteratePointersInDirtyMapsRegion(
4054 Address start,
4055 Address end,
4056 ObjectSlotCallback copy_object_func) {
4057 Address map_aligned_start = MapStartAlign(start);
4058 Address map_aligned_end = MapEndAlign(end);
4059
4060 bool contains_pointers_to_new_space = false;
4061
4062 if (map_aligned_start != start) {
4063 Address prev_map = map_aligned_start - Map::kSize;
4064 ASSERT(Memory::Object_at(prev_map)->IsMap());
4065
4066 Address pointer_fields_start =
4067 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4068
4069 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004070 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004071
4072 contains_pointers_to_new_space =
4073 IteratePointersInDirtyRegion(pointer_fields_start,
4074 pointer_fields_end,
4075 copy_object_func)
4076 || contains_pointers_to_new_space;
4077 }
4078
4079 contains_pointers_to_new_space =
4080 IteratePointersInDirtyMaps(map_aligned_start,
4081 map_aligned_end,
4082 copy_object_func)
4083 || contains_pointers_to_new_space;
4084
4085 if (map_aligned_end != end) {
4086 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4087
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004088 Address pointer_fields_start =
4089 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004090
4091 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004092 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004093
4094 contains_pointers_to_new_space =
4095 IteratePointersInDirtyRegion(pointer_fields_start,
4096 pointer_fields_end,
4097 copy_object_func)
4098 || contains_pointers_to_new_space;
4099 }
4100
4101 return contains_pointers_to_new_space;
4102}
4103
4104
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004105void Heap::IterateAndMarkPointersToFromSpace(Address start,
4106 Address end,
4107 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004108 Address slot_address = start;
4109 Page* page = Page::FromAddress(start);
4110
4111 uint32_t marks = page->GetRegionMarks();
4112
4113 while (slot_address < end) {
4114 Object** slot = reinterpret_cast<Object**>(slot_address);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004115 if (Heap::InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004116 ASSERT((*slot)->IsHeapObject());
4117 callback(reinterpret_cast<HeapObject**>(slot));
4118 if (Heap::InNewSpace(*slot)) {
4119 ASSERT((*slot)->IsHeapObject());
4120 marks |= page->GetRegionMaskForAddress(slot_address);
4121 }
4122 }
4123 slot_address += kPointerSize;
4124 }
4125
4126 page->SetRegionMarks(marks);
4127}
4128
4129
4130uint32_t Heap::IterateDirtyRegions(
4131 uint32_t marks,
4132 Address area_start,
4133 Address area_end,
4134 DirtyRegionCallback visit_dirty_region,
4135 ObjectSlotCallback copy_object_func) {
4136 uint32_t newmarks = 0;
4137 uint32_t mask = 1;
4138
4139 if (area_start >= area_end) {
4140 return newmarks;
4141 }
4142
4143 Address region_start = area_start;
4144
4145 // area_start does not necessarily coincide with start of the first region.
4146 // Thus to calculate the beginning of the next region we have to align
4147 // area_start by Page::kRegionSize.
4148 Address second_region =
4149 reinterpret_cast<Address>(
4150 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4151 ~Page::kRegionAlignmentMask);
4152
4153 // Next region might be beyond area_end.
4154 Address region_end = Min(second_region, area_end);
4155
4156 if (marks & mask) {
4157 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4158 newmarks |= mask;
4159 }
4160 }
4161 mask <<= 1;
4162
4163 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4164 region_start = region_end;
4165 region_end = region_start + Page::kRegionSize;
4166
4167 while (region_end <= area_end) {
4168 if (marks & mask) {
4169 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4170 newmarks |= mask;
4171 }
4172 }
4173
4174 region_start = region_end;
4175 region_end = region_start + Page::kRegionSize;
4176
4177 mask <<= 1;
4178 }
4179
4180 if (region_start != area_end) {
4181 // A small piece of area left uniterated because area_end does not coincide
4182 // with region end. Check whether region covering last part of area is
4183 // dirty.
4184 if (marks & mask) {
4185 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
4186 newmarks |= mask;
4187 }
4188 }
4189 }
4190
4191 return newmarks;
4192}
4193
4194
4195
4196void Heap::IterateDirtyRegions(
4197 PagedSpace* space,
4198 DirtyRegionCallback visit_dirty_region,
4199 ObjectSlotCallback copy_object_func,
4200 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004201
4202 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004203
Steve Blocka7e24c12009-10-30 11:49:00 +00004204 while (it.has_next()) {
4205 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004206 uint32_t marks = page->GetRegionMarks();
4207
4208 if (marks != Page::kAllRegionsCleanMarks) {
4209 Address start = page->ObjectAreaStart();
4210
4211 // Do not try to visit pointers beyond page allocation watermark.
4212 // Page can contain garbage pointers there.
4213 Address end;
4214
4215 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4216 page->IsWatermarkValid()) {
4217 end = page->AllocationWatermark();
4218 } else {
4219 end = page->CachedAllocationWatermark();
4220 }
4221
4222 ASSERT(space == old_pointer_space_ ||
4223 (space == map_space_ &&
4224 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4225
4226 page->SetRegionMarks(IterateDirtyRegions(marks,
4227 start,
4228 end,
4229 visit_dirty_region,
4230 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004231 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004232
4233 // Mark page watermark as invalid to maintain watermark validity invariant.
4234 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4235 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004236 }
4237}
4238
4239
Steve Blockd0582a62009-12-15 09:54:21 +00004240void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4241 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004242 IterateWeakRoots(v, mode);
4243}
4244
4245
4246void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004247 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004248 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004249 if (mode != VISIT_ALL_IN_SCAVENGE) {
4250 // Scavenge collections have special processing for this.
4251 ExternalStringTable::Iterate(v);
4252 }
4253 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004254}
4255
4256
Steve Blockd0582a62009-12-15 09:54:21 +00004257void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004258 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004259 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004260
Iain Merrick75681382010-08-19 15:07:18 +01004261 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004262 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004263
4264 Bootstrapper::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004265 v->Synchronize("bootstrapper");
Steve Blocka7e24c12009-10-30 11:49:00 +00004266 Top::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004267 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004268 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004269 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004270
4271#ifdef ENABLE_DEBUGGER_SUPPORT
4272 Debug::Iterate(v);
4273#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004274 v->Synchronize("debug");
Steve Blocka7e24c12009-10-30 11:49:00 +00004275 CompilationCache::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004276 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004277
4278 // Iterate over local handles in handle scopes.
4279 HandleScopeImplementer::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004280 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004281
Leon Clarkee46be812010-01-19 14:06:41 +00004282 // Iterate over the builtin code objects and code stubs in the
4283 // heap. Note that it is not necessary to iterate over code objects
4284 // on scavenge collections.
4285 if (mode != VISIT_ALL_IN_SCAVENGE) {
4286 Builtins::IterateBuiltins(v);
4287 }
Steve Blockd0582a62009-12-15 09:54:21 +00004288 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004289
4290 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004291 if (mode == VISIT_ONLY_STRONG) {
4292 GlobalHandles::IterateStrongRoots(v);
4293 } else {
4294 GlobalHandles::IterateAllRoots(v);
4295 }
4296 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004297
4298 // Iterate over pointers being held by inactive threads.
4299 ThreadManager::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004300 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004301
4302 // Iterate over the pointers the Serialization/Deserialization code is
4303 // holding.
4304 // During garbage collection this keeps the partial snapshot cache alive.
4305 // During deserialization of the startup snapshot this creates the partial
4306 // snapshot cache and deserializes the objects it refers to. During
4307 // serialization this does nothing, since the partial snapshot cache is
4308 // empty. However the next thing we do is create the partial snapshot,
4309 // filling up the partial snapshot cache with objects it needs as we go.
4310 SerializerDeserializer::Iterate(v);
4311 // We don't do a v->Synchronize call here, because in debug mode that will
4312 // output a flag to the snapshot. However at this point the serializer and
4313 // deserializer are deliberately a little unsynchronized (see above) so the
4314 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004315}
Steve Blocka7e24c12009-10-30 11:49:00 +00004316
4317
4318// Flag is set when the heap has been configured. The heap can be repeatedly
4319// configured through the API until it is setup.
4320static bool heap_configured = false;
4321
4322// TODO(1236194): Since the heap size is configurable on the command line
4323// and through the API, we should gracefully handle the case that the heap
4324// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004325bool Heap::ConfigureHeap(int max_semispace_size,
4326 int max_old_gen_size,
4327 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004328 if (HasBeenSetup()) return false;
4329
Steve Block3ce2e202009-11-05 08:53:23 +00004330 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4331
4332 if (Snapshot::IsEnabled()) {
4333 // If we are using a snapshot we always reserve the default amount
4334 // of memory for each semispace because code in the snapshot has
4335 // write-barrier code that relies on the size and alignment of new
4336 // space. We therefore cannot use a larger max semispace size
4337 // than the default reserved semispace size.
4338 if (max_semispace_size_ > reserved_semispace_size_) {
4339 max_semispace_size_ = reserved_semispace_size_;
4340 }
4341 } else {
4342 // If we are not using snapshots we reserve space for the actual
4343 // max semispace size.
4344 reserved_semispace_size_ = max_semispace_size_;
4345 }
4346
4347 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004348 if (max_executable_size > 0) {
4349 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4350 }
4351
4352 // The max executable size must be less than or equal to the max old
4353 // generation size.
4354 if (max_executable_size_ > max_old_generation_size_) {
4355 max_executable_size_ = max_old_generation_size_;
4356 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004357
4358 // The new space size must be a power of two to support single-bit testing
4359 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004360 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4361 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4362 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4363 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004364
4365 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004366 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004367
4368 heap_configured = true;
4369 return true;
4370}
4371
4372
4373bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004374 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4375 FLAG_max_old_space_size * MB,
4376 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004377}
4378
4379
Ben Murdochbb769b22010-08-11 14:56:33 +01004380void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004381 *stats->start_marker = HeapStats::kStartMarker;
4382 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004383 *stats->new_space_size = new_space_.SizeAsInt();
4384 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004385 *stats->old_pointer_space_size = old_pointer_space_->Size();
4386 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4387 *stats->old_data_space_size = old_data_space_->Size();
4388 *stats->old_data_space_capacity = old_data_space_->Capacity();
4389 *stats->code_space_size = code_space_->Size();
4390 *stats->code_space_capacity = code_space_->Capacity();
4391 *stats->map_space_size = map_space_->Size();
4392 *stats->map_space_capacity = map_space_->Capacity();
4393 *stats->cell_space_size = cell_space_->Size();
4394 *stats->cell_space_capacity = cell_space_->Capacity();
4395 *stats->lo_space_size = lo_space_->Size();
4396 GlobalHandles::RecordStats(stats);
Ben Murdochbb769b22010-08-11 14:56:33 +01004397 *stats->memory_allocator_size = MemoryAllocator::Size();
4398 *stats->memory_allocator_capacity =
4399 MemoryAllocator::Size() + MemoryAllocator::Available();
Iain Merrick75681382010-08-19 15:07:18 +01004400 *stats->os_error = OS::GetLastError();
Ben Murdochbb769b22010-08-11 14:56:33 +01004401 if (take_snapshot) {
4402 HeapIterator iterator;
4403 for (HeapObject* obj = iterator.next();
4404 obj != NULL;
4405 obj = iterator.next()) {
4406 // Note: snapshot won't be precise because IsFreeListNode returns true
4407 // for any bytearray.
4408 if (FreeListNode::IsFreeListNode(obj)) continue;
4409 InstanceType type = obj->map()->instance_type();
4410 ASSERT(0 <= type && type <= LAST_TYPE);
4411 stats->objects_per_type[type]++;
4412 stats->size_per_type[type] += obj->Size();
4413 }
4414 }
Steve Blockd0582a62009-12-15 09:54:21 +00004415}
4416
4417
Ben Murdochf87a2032010-10-22 12:50:53 +01004418intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004419 return old_pointer_space_->Size()
4420 + old_data_space_->Size()
4421 + code_space_->Size()
4422 + map_space_->Size()
4423 + cell_space_->Size()
4424 + lo_space_->Size();
4425}
4426
4427
4428int Heap::PromotedExternalMemorySize() {
4429 if (amount_of_external_allocated_memory_
4430 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4431 return amount_of_external_allocated_memory_
4432 - amount_of_external_allocated_memory_at_last_global_gc_;
4433}
4434
4435
4436bool Heap::Setup(bool create_heap_objects) {
4437 // Initialize heap spaces and initial maps and objects. Whenever something
4438 // goes wrong, just return false. The caller should check the results and
4439 // call Heap::TearDown() to release allocated memory.
4440 //
4441 // If the heap is not yet configured (eg, through the API), configure it.
4442 // Configuration is based on the flags new-space-size (really the semispace
4443 // size) and old-space-size if set or the initial values of semispace_size_
4444 // and old_generation_size_ otherwise.
4445 if (!heap_configured) {
4446 if (!ConfigureHeapDefault()) return false;
4447 }
4448
Iain Merrick75681382010-08-19 15:07:18 +01004449 ScavengingVisitor::Initialize();
4450 NewSpaceScavenger::Initialize();
4451 MarkCompactCollector::Initialize();
4452
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004453 MarkMapPointersAsEncoded(false);
4454
Steve Blocka7e24c12009-10-30 11:49:00 +00004455 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004456 // space. The chunk is double the size of the requested reserved
4457 // new space size to ensure that we can find a pair of semispaces that
4458 // are contiguous and aligned to their size.
Russell Brenner90bac252010-11-18 13:33:46 -08004459 if (!MemoryAllocator::Setup(MaxReserved(), MaxExecutableSize())) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004460 void* chunk =
Steve Block3ce2e202009-11-05 08:53:23 +00004461 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004462 if (chunk == NULL) return false;
4463
4464 // Align the pair of semispaces to their size, which must be a power
4465 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004466 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004467 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4468 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4469 return false;
4470 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004471
4472 // Initialize old pointer space.
4473 old_pointer_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004474 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004475 if (old_pointer_space_ == NULL) return false;
4476 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4477
4478 // Initialize old data space.
4479 old_data_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004480 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004481 if (old_data_space_ == NULL) return false;
4482 if (!old_data_space_->Setup(NULL, 0)) return false;
4483
4484 // Initialize the code space, set its maximum capacity to the old
4485 // generation size. It needs executable memory.
4486 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4487 // virtual address space, so that they can call each other with near calls.
4488 if (code_range_size_ > 0) {
4489 if (!CodeRange::Setup(code_range_size_)) {
4490 return false;
4491 }
4492 }
4493
4494 code_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004495 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004496 if (code_space_ == NULL) return false;
4497 if (!code_space_->Setup(NULL, 0)) return false;
4498
4499 // Initialize map space.
Leon Clarkee46be812010-01-19 14:06:41 +00004500 map_space_ = new MapSpace(FLAG_use_big_map_space
4501 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004502 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4503 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004504 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004505 if (map_space_ == NULL) return false;
4506 if (!map_space_->Setup(NULL, 0)) return false;
4507
4508 // Initialize global property cell space.
Steve Block3ce2e202009-11-05 08:53:23 +00004509 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004510 if (cell_space_ == NULL) return false;
4511 if (!cell_space_->Setup(NULL, 0)) return false;
4512
4513 // The large object code space may contain code or data. We set the memory
4514 // to be non-executable here for safety, but this means we need to enable it
4515 // explicitly when allocating large code objects.
4516 lo_space_ = new LargeObjectSpace(LO_SPACE);
4517 if (lo_space_ == NULL) return false;
4518 if (!lo_space_->Setup()) return false;
4519
4520 if (create_heap_objects) {
4521 // Create initial maps.
4522 if (!CreateInitialMaps()) return false;
4523 if (!CreateApiObjects()) return false;
4524
4525 // Create initial objects
4526 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004527
4528 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004529 }
4530
Ben Murdochf87a2032010-10-22 12:50:53 +01004531 LOG(IntPtrTEvent("heap-capacity", Capacity()));
4532 LOG(IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004533
Steve Block3ce2e202009-11-05 08:53:23 +00004534#ifdef ENABLE_LOGGING_AND_PROFILING
4535 // This should be called only after initial objects have been created.
4536 ProducerHeapProfile::Setup();
4537#endif
4538
Steve Blocka7e24c12009-10-30 11:49:00 +00004539 return true;
4540}
4541
4542
Steve Blockd0582a62009-12-15 09:54:21 +00004543void Heap::SetStackLimits() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004544 // On 64 bit machines, pointers are generally out of range of Smis. We write
4545 // something that looks like an out of range Smi to the GC.
4546
Steve Blockd0582a62009-12-15 09:54:21 +00004547 // Set up the special root array entries containing the stack limits.
4548 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004549 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004550 reinterpret_cast<Object*>(
4551 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
4552 roots_[kRealStackLimitRootIndex] =
4553 reinterpret_cast<Object*>(
4554 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004555}
4556
4557
4558void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004559 if (FLAG_print_cumulative_gc_stat) {
4560 PrintF("\n\n");
4561 PrintF("gc_count=%d ", gc_count_);
4562 PrintF("mark_sweep_count=%d ", ms_count_);
4563 PrintF("mark_compact_count=%d ", mc_count_);
4564 PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
4565 PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004566 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
4567 GCTracer::get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004568 PrintF("\n\n");
4569 }
4570
Steve Blocka7e24c12009-10-30 11:49:00 +00004571 GlobalHandles::TearDown();
4572
Leon Clarkee46be812010-01-19 14:06:41 +00004573 ExternalStringTable::TearDown();
4574
Steve Blocka7e24c12009-10-30 11:49:00 +00004575 new_space_.TearDown();
4576
4577 if (old_pointer_space_ != NULL) {
4578 old_pointer_space_->TearDown();
4579 delete old_pointer_space_;
4580 old_pointer_space_ = NULL;
4581 }
4582
4583 if (old_data_space_ != NULL) {
4584 old_data_space_->TearDown();
4585 delete old_data_space_;
4586 old_data_space_ = NULL;
4587 }
4588
4589 if (code_space_ != NULL) {
4590 code_space_->TearDown();
4591 delete code_space_;
4592 code_space_ = NULL;
4593 }
4594
4595 if (map_space_ != NULL) {
4596 map_space_->TearDown();
4597 delete map_space_;
4598 map_space_ = NULL;
4599 }
4600
4601 if (cell_space_ != NULL) {
4602 cell_space_->TearDown();
4603 delete cell_space_;
4604 cell_space_ = NULL;
4605 }
4606
4607 if (lo_space_ != NULL) {
4608 lo_space_->TearDown();
4609 delete lo_space_;
4610 lo_space_ = NULL;
4611 }
4612
4613 MemoryAllocator::TearDown();
4614}
4615
4616
4617void Heap::Shrink() {
4618 // Try to shrink all paged spaces.
4619 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004620 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
4621 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00004622}
4623
4624
4625#ifdef ENABLE_HEAP_PROTECTION
4626
4627void Heap::Protect() {
4628 if (HasBeenSetup()) {
4629 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004630 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4631 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004632 }
4633}
4634
4635
4636void Heap::Unprotect() {
4637 if (HasBeenSetup()) {
4638 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004639 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4640 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004641 }
4642}
4643
4644#endif
4645
4646
Steve Block6ded16b2010-05-10 14:33:55 +01004647void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
4648 ASSERT(callback != NULL);
4649 GCPrologueCallbackPair pair(callback, gc_type);
4650 ASSERT(!gc_prologue_callbacks_.Contains(pair));
4651 return gc_prologue_callbacks_.Add(pair);
4652}
4653
4654
4655void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
4656 ASSERT(callback != NULL);
4657 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
4658 if (gc_prologue_callbacks_[i].callback == callback) {
4659 gc_prologue_callbacks_.Remove(i);
4660 return;
4661 }
4662 }
4663 UNREACHABLE();
4664}
4665
4666
4667void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
4668 ASSERT(callback != NULL);
4669 GCEpilogueCallbackPair pair(callback, gc_type);
4670 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
4671 return gc_epilogue_callbacks_.Add(pair);
4672}
4673
4674
4675void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
4676 ASSERT(callback != NULL);
4677 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
4678 if (gc_epilogue_callbacks_[i].callback == callback) {
4679 gc_epilogue_callbacks_.Remove(i);
4680 return;
4681 }
4682 }
4683 UNREACHABLE();
4684}
4685
4686
Steve Blocka7e24c12009-10-30 11:49:00 +00004687#ifdef DEBUG
4688
4689class PrintHandleVisitor: public ObjectVisitor {
4690 public:
4691 void VisitPointers(Object** start, Object** end) {
4692 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01004693 PrintF(" handle %p to %p\n",
4694 reinterpret_cast<void*>(p),
4695 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00004696 }
4697};
4698
4699void Heap::PrintHandles() {
4700 PrintF("Handles:\n");
4701 PrintHandleVisitor v;
4702 HandleScopeImplementer::Iterate(&v);
4703}
4704
4705#endif
4706
4707
4708Space* AllSpaces::next() {
4709 switch (counter_++) {
4710 case NEW_SPACE:
4711 return Heap::new_space();
4712 case OLD_POINTER_SPACE:
4713 return Heap::old_pointer_space();
4714 case OLD_DATA_SPACE:
4715 return Heap::old_data_space();
4716 case CODE_SPACE:
4717 return Heap::code_space();
4718 case MAP_SPACE:
4719 return Heap::map_space();
4720 case CELL_SPACE:
4721 return Heap::cell_space();
4722 case LO_SPACE:
4723 return Heap::lo_space();
4724 default:
4725 return NULL;
4726 }
4727}
4728
4729
4730PagedSpace* PagedSpaces::next() {
4731 switch (counter_++) {
4732 case OLD_POINTER_SPACE:
4733 return Heap::old_pointer_space();
4734 case OLD_DATA_SPACE:
4735 return Heap::old_data_space();
4736 case CODE_SPACE:
4737 return Heap::code_space();
4738 case MAP_SPACE:
4739 return Heap::map_space();
4740 case CELL_SPACE:
4741 return Heap::cell_space();
4742 default:
4743 return NULL;
4744 }
4745}
4746
4747
4748
4749OldSpace* OldSpaces::next() {
4750 switch (counter_++) {
4751 case OLD_POINTER_SPACE:
4752 return Heap::old_pointer_space();
4753 case OLD_DATA_SPACE:
4754 return Heap::old_data_space();
4755 case CODE_SPACE:
4756 return Heap::code_space();
4757 default:
4758 return NULL;
4759 }
4760}
4761
4762
4763SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
4764}
4765
4766
4767SpaceIterator::~SpaceIterator() {
4768 // Delete active iterator if any.
4769 delete iterator_;
4770}
4771
4772
4773bool SpaceIterator::has_next() {
4774 // Iterate until no more spaces.
4775 return current_space_ != LAST_SPACE;
4776}
4777
4778
4779ObjectIterator* SpaceIterator::next() {
4780 if (iterator_ != NULL) {
4781 delete iterator_;
4782 iterator_ = NULL;
4783 // Move to the next space
4784 current_space_++;
4785 if (current_space_ > LAST_SPACE) {
4786 return NULL;
4787 }
4788 }
4789
4790 // Return iterator for the new current space.
4791 return CreateIterator();
4792}
4793
4794
4795// Create an iterator for the space to iterate.
4796ObjectIterator* SpaceIterator::CreateIterator() {
4797 ASSERT(iterator_ == NULL);
4798
4799 switch (current_space_) {
4800 case NEW_SPACE:
4801 iterator_ = new SemiSpaceIterator(Heap::new_space());
4802 break;
4803 case OLD_POINTER_SPACE:
4804 iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
4805 break;
4806 case OLD_DATA_SPACE:
4807 iterator_ = new HeapObjectIterator(Heap::old_data_space());
4808 break;
4809 case CODE_SPACE:
4810 iterator_ = new HeapObjectIterator(Heap::code_space());
4811 break;
4812 case MAP_SPACE:
4813 iterator_ = new HeapObjectIterator(Heap::map_space());
4814 break;
4815 case CELL_SPACE:
4816 iterator_ = new HeapObjectIterator(Heap::cell_space());
4817 break;
4818 case LO_SPACE:
4819 iterator_ = new LargeObjectIterator(Heap::lo_space());
4820 break;
4821 }
4822
4823 // Return the newly allocated iterator;
4824 ASSERT(iterator_ != NULL);
4825 return iterator_;
4826}
4827
4828
4829HeapIterator::HeapIterator() {
4830 Init();
4831}
4832
4833
4834HeapIterator::~HeapIterator() {
4835 Shutdown();
4836}
4837
4838
4839void HeapIterator::Init() {
4840 // Start the iteration.
4841 space_iterator_ = new SpaceIterator();
4842 object_iterator_ = space_iterator_->next();
4843}
4844
4845
4846void HeapIterator::Shutdown() {
4847 // Make sure the last iterator is deallocated.
4848 delete space_iterator_;
4849 space_iterator_ = NULL;
4850 object_iterator_ = NULL;
4851}
4852
4853
Leon Clarked91b9f72010-01-27 17:25:45 +00004854HeapObject* HeapIterator::next() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004855 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00004856 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00004857
Leon Clarked91b9f72010-01-27 17:25:45 +00004858 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004859 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00004860 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00004861 } else {
4862 // Go though the spaces looking for one that has objects.
4863 while (space_iterator_->has_next()) {
4864 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00004865 if (HeapObject* obj = object_iterator_->next_object()) {
4866 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00004867 }
4868 }
4869 }
4870 // Done with the last space.
4871 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00004872 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00004873}
4874
4875
4876void HeapIterator::reset() {
4877 // Restart the iterator.
4878 Shutdown();
4879 Init();
4880}
4881
4882
4883#ifdef DEBUG
4884
4885static bool search_for_any_global;
4886static Object* search_target;
4887static bool found_target;
4888static List<Object*> object_stack(20);
4889
4890
4891// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4892static const int kMarkTag = 2;
4893
4894static void MarkObjectRecursively(Object** p);
4895class MarkObjectVisitor : public ObjectVisitor {
4896 public:
4897 void VisitPointers(Object** start, Object** end) {
4898 // Copy all HeapObject pointers in [start, end)
4899 for (Object** p = start; p < end; p++) {
4900 if ((*p)->IsHeapObject())
4901 MarkObjectRecursively(p);
4902 }
4903 }
4904};
4905
4906static MarkObjectVisitor mark_visitor;
4907
4908static void MarkObjectRecursively(Object** p) {
4909 if (!(*p)->IsHeapObject()) return;
4910
4911 HeapObject* obj = HeapObject::cast(*p);
4912
4913 Object* map = obj->map();
4914
4915 if (!map->IsHeapObject()) return; // visited before
4916
4917 if (found_target) return; // stop if target found
4918 object_stack.Add(obj);
4919 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
4920 (!search_for_any_global && (obj == search_target))) {
4921 found_target = true;
4922 return;
4923 }
4924
4925 // not visited yet
4926 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4927
4928 Address map_addr = map_p->address();
4929
4930 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4931
4932 MarkObjectRecursively(&map);
4933
4934 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4935 &mark_visitor);
4936
4937 if (!found_target) // don't pop if found the target
4938 object_stack.RemoveLast();
4939}
4940
4941
4942static void UnmarkObjectRecursively(Object** p);
4943class UnmarkObjectVisitor : public ObjectVisitor {
4944 public:
4945 void VisitPointers(Object** start, Object** end) {
4946 // Copy all HeapObject pointers in [start, end)
4947 for (Object** p = start; p < end; p++) {
4948 if ((*p)->IsHeapObject())
4949 UnmarkObjectRecursively(p);
4950 }
4951 }
4952};
4953
4954static UnmarkObjectVisitor unmark_visitor;
4955
4956static void UnmarkObjectRecursively(Object** p) {
4957 if (!(*p)->IsHeapObject()) return;
4958
4959 HeapObject* obj = HeapObject::cast(*p);
4960
4961 Object* map = obj->map();
4962
4963 if (map->IsHeapObject()) return; // unmarked already
4964
4965 Address map_addr = reinterpret_cast<Address>(map);
4966
4967 map_addr -= kMarkTag;
4968
4969 ASSERT_TAG_ALIGNED(map_addr);
4970
4971 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4972
4973 obj->set_map(reinterpret_cast<Map*>(map_p));
4974
4975 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4976
4977 obj->IterateBody(Map::cast(map_p)->instance_type(),
4978 obj->SizeFromMap(Map::cast(map_p)),
4979 &unmark_visitor);
4980}
4981
4982
4983static void MarkRootObjectRecursively(Object** root) {
4984 if (search_for_any_global) {
4985 ASSERT(search_target == NULL);
4986 } else {
4987 ASSERT(search_target->IsHeapObject());
4988 }
4989 found_target = false;
4990 object_stack.Clear();
4991
4992 MarkObjectRecursively(root);
4993 UnmarkObjectRecursively(root);
4994
4995 if (found_target) {
4996 PrintF("=====================================\n");
4997 PrintF("==== Path to object ====\n");
4998 PrintF("=====================================\n\n");
4999
5000 ASSERT(!object_stack.is_empty());
5001 for (int i = 0; i < object_stack.length(); i++) {
5002 if (i > 0) PrintF("\n |\n |\n V\n\n");
5003 Object* obj = object_stack[i];
5004 obj->Print();
5005 }
5006 PrintF("=====================================\n");
5007 }
5008}
5009
5010
5011// Helper class for visiting HeapObjects recursively.
5012class MarkRootVisitor: public ObjectVisitor {
5013 public:
5014 void VisitPointers(Object** start, Object** end) {
5015 // Visit all HeapObject pointers in [start, end)
5016 for (Object** p = start; p < end; p++) {
5017 if ((*p)->IsHeapObject())
5018 MarkRootObjectRecursively(p);
5019 }
5020 }
5021};
5022
5023
5024// Triggers a depth-first traversal of reachable objects from roots
5025// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005026void Heap::TracePathToObject(Object* target) {
5027 search_target = target;
Steve Blocka7e24c12009-10-30 11:49:00 +00005028 search_for_any_global = false;
5029
5030 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005031 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005032}
5033
5034
5035// Triggers a depth-first traversal of reachable objects from roots
5036// and finds a path to any global object and prints it. Useful for
5037// determining the source for leaks of global objects.
5038void Heap::TracePathToGlobal() {
5039 search_target = NULL;
5040 search_for_any_global = true;
5041
5042 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005043 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005044}
5045#endif
5046
5047
Ben Murdochf87a2032010-10-22 12:50:53 +01005048static intptr_t CountTotalHolesSize() {
5049 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005050 OldSpaces spaces;
5051 for (OldSpace* space = spaces.next();
5052 space != NULL;
5053 space = spaces.next()) {
5054 holes_size += space->Waste() + space->AvailableFree();
5055 }
5056 return holes_size;
5057}
5058
5059
Steve Blocka7e24c12009-10-30 11:49:00 +00005060GCTracer::GCTracer()
5061 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005062 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005063 gc_count_(0),
5064 full_gc_count_(0),
5065 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005066 marked_count_(0),
5067 allocated_since_last_gc_(0),
5068 spent_in_mutator_(0),
5069 promoted_objects_size_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005070 // These two fields reflect the state of the previous full collection.
5071 // Set them before they are changed by the collector.
5072 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
5073 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005074 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005075 start_time_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005076 start_size_ = Heap::SizeOfObjects();
5077
5078 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5079 scopes_[i] = 0;
5080 }
5081
5082 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5083
5084 allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
5085
5086 if (last_gc_end_timestamp_ > 0) {
5087 spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
5088 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005089}
5090
5091
5092GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005093 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005094 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5095
5096 bool first_gc = (last_gc_end_timestamp_ == 0);
5097
5098 alive_after_last_gc_ = Heap::SizeOfObjects();
5099 last_gc_end_timestamp_ = OS::TimeCurrentMillis();
5100
5101 int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
5102
5103 // Update cumulative GC statistics if required.
5104 if (FLAG_print_cumulative_gc_stat) {
5105 max_gc_pause_ = Max(max_gc_pause_, time);
5106 max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
5107 if (!first_gc) {
5108 min_in_mutator_ = Min(min_in_mutator_,
5109 static_cast<int>(spent_in_mutator_));
5110 }
5111 }
5112
5113 if (!FLAG_trace_gc_nvp) {
5114 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5115
5116 PrintF("%s %.1f -> %.1f MB, ",
5117 CollectorString(),
5118 static_cast<double>(start_size_) / MB,
5119 SizeOfHeapObjects());
5120
5121 if (external_time > 0) PrintF("%d / ", external_time);
5122 PrintF("%d ms.\n", time);
5123 } else {
5124 PrintF("pause=%d ", time);
5125 PrintF("mutator=%d ",
5126 static_cast<int>(spent_in_mutator_));
5127
5128 PrintF("gc=");
5129 switch (collector_) {
5130 case SCAVENGER:
5131 PrintF("s");
5132 break;
5133 case MARK_COMPACTOR:
5134 PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
5135 break;
5136 default:
5137 UNREACHABLE();
5138 }
5139 PrintF(" ");
5140
5141 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5142 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5143 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005144 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005145 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5146
Ben Murdochf87a2032010-10-22 12:50:53 +01005147 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
5148 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
5149 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5150 in_free_list_or_wasted_before_gc_);
5151 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005152
Ben Murdochf87a2032010-10-22 12:50:53 +01005153 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5154 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005155
5156 PrintF("\n");
5157 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005158
5159#if defined(ENABLE_LOGGING_AND_PROFILING)
5160 Heap::PrintShortHeapStatistics();
5161#endif
5162}
5163
5164
5165const char* GCTracer::CollectorString() {
5166 switch (collector_) {
5167 case SCAVENGER:
5168 return "Scavenge";
5169 case MARK_COMPACTOR:
5170 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
5171 : "Mark-sweep";
5172 }
5173 return "Unknown GC";
5174}
5175
5176
5177int KeyedLookupCache::Hash(Map* map, String* name) {
5178 // Uses only lower 32 bits if pointers are larger.
5179 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005180 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005181 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005182}
5183
5184
5185int KeyedLookupCache::Lookup(Map* map, String* name) {
5186 int index = Hash(map, name);
5187 Key& key = keys_[index];
5188 if ((key.map == map) && key.name->Equals(name)) {
5189 return field_offsets_[index];
5190 }
5191 return -1;
5192}
5193
5194
5195void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5196 String* symbol;
5197 if (Heap::LookupSymbolIfExists(name, &symbol)) {
5198 int index = Hash(map, symbol);
5199 Key& key = keys_[index];
5200 key.map = map;
5201 key.name = symbol;
5202 field_offsets_[index] = field_offset;
5203 }
5204}
5205
5206
5207void KeyedLookupCache::Clear() {
5208 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5209}
5210
5211
5212KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
5213
5214
5215int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
5216
5217
5218void DescriptorLookupCache::Clear() {
5219 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5220}
5221
5222
5223DescriptorLookupCache::Key
5224DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
5225
5226int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
5227
5228
5229#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005230void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005231 ASSERT(FLAG_gc_greedy);
Ben Murdochf87a2032010-10-22 12:50:53 +01005232 if (Bootstrapper::IsActive()) return;
5233 if (disallow_allocation_failure()) return;
5234 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005235}
5236#endif
5237
5238
5239TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
5240 : type_(t) {
5241 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5242 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5243 for (int i = 0; i < kCacheSize; i++) {
5244 elements_[i].in[0] = in0;
5245 elements_[i].in[1] = in1;
5246 elements_[i].output = NULL;
5247 }
5248}
5249
5250
5251TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
5252
5253
5254void TranscendentalCache::Clear() {
5255 for (int i = 0; i < kNumberOfCaches; i++) {
5256 if (caches_[i] != NULL) {
5257 delete caches_[i];
5258 caches_[i] = NULL;
5259 }
5260 }
5261}
5262
5263
Leon Clarkee46be812010-01-19 14:06:41 +00005264void ExternalStringTable::CleanUp() {
5265 int last = 0;
5266 for (int i = 0; i < new_space_strings_.length(); ++i) {
5267 if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5268 if (Heap::InNewSpace(new_space_strings_[i])) {
5269 new_space_strings_[last++] = new_space_strings_[i];
5270 } else {
5271 old_space_strings_.Add(new_space_strings_[i]);
5272 }
5273 }
5274 new_space_strings_.Rewind(last);
5275 last = 0;
5276 for (int i = 0; i < old_space_strings_.length(); ++i) {
5277 if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5278 ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
5279 old_space_strings_[last++] = old_space_strings_[i];
5280 }
5281 old_space_strings_.Rewind(last);
5282 Verify();
5283}
5284
5285
5286void ExternalStringTable::TearDown() {
5287 new_space_strings_.Free();
5288 old_space_strings_.Free();
5289}
5290
5291
5292List<Object*> ExternalStringTable::new_space_strings_;
5293List<Object*> ExternalStringTable::old_space_strings_;
5294
Steve Blocka7e24c12009-10-30 11:49:00 +00005295} } // namespace v8::internal