blob: e0ee4bddebec35bad663214e315d4f6967f20383 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "execution.h"
33#include "global-handles.h"
34#include "ic-inl.h"
35#include "natives.h"
36#include "platform.h"
37#include "runtime.h"
38#include "serialize.h"
39#include "stub-cache.h"
40#include "v8threads.h"
41
42namespace v8 {
43namespace internal {
44
45// 32-bit encoding: a RelativeAddress must be able to fit in a
46// pointer: it is encoded as an Address with (from LS to MS bits):
47// - 2 bits identifying this as a HeapObject.
48// - 4 bits to encode the AllocationSpace (including special values for
49// code and fixed arrays in LO space)
50// - 27 bits identifying a word in the space, in one of three formats:
51// - paged spaces: 16 bits of page number, 11 bits of word offset in page
52// - NEW space: 27 bits of word offset
53// - LO space: 27 bits of page number
54
55const int kSpaceShift = kHeapObjectTagSize;
56const int kSpaceBits = 4;
57const int kSpaceMask = (1 << kSpaceBits) - 1;
58
59const int kOffsetShift = kSpaceShift + kSpaceBits;
60const int kOffsetBits = 11;
61const int kOffsetMask = (1 << kOffsetBits) - 1;
62
63const int kPageShift = kOffsetShift + kOffsetBits;
64const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
65const int kPageMask = (1 << kPageBits) - 1;
66
67const int kPageAndOffsetShift = kOffsetShift;
68const int kPageAndOffsetBits = kPageBits + kOffsetBits;
69const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
70
71// These values are special allocation space tags used for
72// serialization.
73// Mark the pages executable on platforms that support it.
74const int kLargeCode = LAST_SPACE + 1;
75// Allocate extra remembered-set bits.
76const int kLargeFixedArray = LAST_SPACE + 2;
77
78
79static inline AllocationSpace GetSpace(Address addr) {
80 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
81 int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
82 if (space_number > LAST_SPACE) space_number = LO_SPACE;
83 return static_cast<AllocationSpace>(space_number);
84}
85
86
87static inline bool IsLargeExecutableObject(Address addr) {
88 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
89 const int space_number =
90 (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
91 return (space_number == kLargeCode);
92}
93
94
95static inline bool IsLargeFixedArray(Address addr) {
96 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
97 const int space_number =
98 (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
99 return (space_number == kLargeFixedArray);
100}
101
102
103static inline int PageIndex(Address addr) {
104 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
105 return static_cast<int>(encoded >> kPageShift) & kPageMask;
106}
107
108
109static inline int PageOffset(Address addr) {
110 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
111 const int offset = static_cast<int>(encoded >> kOffsetShift) & kOffsetMask;
112 return offset << kObjectAlignmentBits;
113}
114
115
116static inline int NewSpaceOffset(Address addr) {
117 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
118 const int page_offset =
119 static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
120 return page_offset << kObjectAlignmentBits;
121}
122
123
124static inline int LargeObjectIndex(Address addr) {
125 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
126 return static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
127}
128
129
130// A RelativeAddress encodes a heap address that is independent of
131// the actual memory addresses in real heap. The general case (for the
132// OLD, CODE and MAP spaces) is as a (space id, page number, page offset)
133// triple. The NEW space has page number == 0, because there are no
134// pages. The LARGE_OBJECT space has page offset = 0, since there is
135// exactly one object per page. RelativeAddresses are encodable as
136// Addresses, so that they can replace the map() pointers of
137// HeapObjects. The encoded Addresses are also encoded as HeapObjects
138// and allow for marking (is_marked() see mark(), clear_mark()...) as
139// used by the Mark-Compact collector.
140
141class RelativeAddress {
142 public:
143 RelativeAddress(AllocationSpace space,
144 int page_index,
145 int page_offset)
146 : space_(space), page_index_(page_index), page_offset_(page_offset) {
147 // Assert that the space encoding (plus the two pseudo-spaces for
148 // special large objects) fits in the available bits.
149 ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0);
150 ASSERT(space <= LAST_SPACE && space >= 0);
151 }
152
153 // Return the encoding of 'this' as an Address. Decode with constructor.
154 Address Encode() const;
155
156 AllocationSpace space() const {
157 if (space_ > LAST_SPACE) return LO_SPACE;
158 return static_cast<AllocationSpace>(space_);
159 }
160 int page_index() const { return page_index_; }
161 int page_offset() const { return page_offset_; }
162
163 bool in_paged_space() const {
164 return space_ == CODE_SPACE ||
165 space_ == OLD_POINTER_SPACE ||
166 space_ == OLD_DATA_SPACE ||
167 space_ == MAP_SPACE ||
168 space_ == CELL_SPACE;
169 }
170
171 void next_address(int offset) { page_offset_ += offset; }
172 void next_page(int init_offset = 0) {
173 page_index_++;
174 page_offset_ = init_offset;
175 }
176
177#ifdef DEBUG
178 void Verify();
179#endif
180
181 void set_to_large_code_object() {
182 ASSERT(space_ == LO_SPACE);
183 space_ = kLargeCode;
184 }
185 void set_to_large_fixed_array() {
186 ASSERT(space_ == LO_SPACE);
187 space_ = kLargeFixedArray;
188 }
189
190
191 private:
192 int space_;
193 int page_index_;
194 int page_offset_;
195};
196
197
198Address RelativeAddress::Encode() const {
199 ASSERT(page_index_ >= 0);
200 int word_offset = 0;
201 int result = 0;
202 switch (space_) {
203 case MAP_SPACE:
204 case CELL_SPACE:
205 case OLD_POINTER_SPACE:
206 case OLD_DATA_SPACE:
207 case CODE_SPACE:
208 ASSERT_EQ(0, page_index_ & ~kPageMask);
209 word_offset = page_offset_ >> kObjectAlignmentBits;
210 ASSERT_EQ(0, word_offset & ~kOffsetMask);
211 result = (page_index_ << kPageShift) | (word_offset << kOffsetShift);
212 break;
213 case NEW_SPACE:
214 ASSERT_EQ(0, page_index_);
215 word_offset = page_offset_ >> kObjectAlignmentBits;
216 ASSERT_EQ(0, word_offset & ~kPageAndOffsetMask);
217 result = word_offset << kPageAndOffsetShift;
218 break;
219 case LO_SPACE:
220 case kLargeCode:
221 case kLargeFixedArray:
222 ASSERT_EQ(0, page_offset_);
223 ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
224 result = page_index_ << kPageAndOffsetShift;
225 break;
226 }
227 // OR in AllocationSpace and kHeapObjectTag
228 ASSERT_EQ(0, space_ & ~kSpaceMask);
229 result |= (space_ << kSpaceShift) | kHeapObjectTag;
230 return reinterpret_cast<Address>(result);
231}
232
233
234#ifdef DEBUG
235void RelativeAddress::Verify() {
236 ASSERT(page_offset_ >= 0 && page_index_ >= 0);
237 switch (space_) {
238 case MAP_SPACE:
239 case CELL_SPACE:
240 case OLD_POINTER_SPACE:
241 case OLD_DATA_SPACE:
242 case CODE_SPACE:
243 ASSERT(Page::kObjectStartOffset <= page_offset_ &&
244 page_offset_ <= Page::kPageSize);
245 break;
246 case NEW_SPACE:
247 ASSERT(page_index_ == 0);
248 break;
249 case LO_SPACE:
250 case kLargeCode:
251 case kLargeFixedArray:
252 ASSERT(page_offset_ == 0);
253 break;
254 }
255}
256#endif
257
258enum GCTreatment {
259 DataObject, // Object that cannot contain a reference to new space.
260 PointerObject, // Object that can contain a reference to new space.
261 CodeObject // Object that contains executable code.
262};
263
264// A SimulatedHeapSpace simulates the allocation of objects in a page in
265// the heap. It uses linear allocation - that is, it doesn't simulate the
266// use of a free list. This simulated
267// allocation must exactly match that done by Heap.
268
269class SimulatedHeapSpace {
270 public:
271 // The default constructor initializes to an invalid state.
272 SimulatedHeapSpace(): current_(LAST_SPACE, -1, -1) {}
273
274 // Sets 'this' to the first address in 'space' that would be
275 // returned by allocation in an empty heap.
276 void InitEmptyHeap(AllocationSpace space);
277
278 // Sets 'this' to the next address in 'space' that would be returned
279 // by allocation in the current heap. Intended only for testing
280 // serialization and deserialization in the current address space.
281 void InitCurrentHeap(AllocationSpace space);
282
283 // Returns the RelativeAddress where the next
284 // object of 'size' bytes will be allocated, and updates 'this' to
285 // point to the next free address beyond that object.
286 RelativeAddress Allocate(int size, GCTreatment special_gc_treatment);
287
288 private:
289 RelativeAddress current_;
290};
291
292
293void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
294 switch (space) {
295 case MAP_SPACE:
296 case CELL_SPACE:
297 case OLD_POINTER_SPACE:
298 case OLD_DATA_SPACE:
299 case CODE_SPACE:
300 current_ = RelativeAddress(space, 0, Page::kObjectStartOffset);
301 break;
302 case NEW_SPACE:
303 case LO_SPACE:
304 current_ = RelativeAddress(space, 0, 0);
305 break;
306 }
307}
308
309
310void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
311 switch (space) {
312 case MAP_SPACE:
313 case CELL_SPACE:
314 case OLD_POINTER_SPACE:
315 case OLD_DATA_SPACE:
316 case CODE_SPACE: {
317 PagedSpace* ps;
318 if (space == MAP_SPACE) {
319 ps = Heap::map_space();
320 } else if (space == CELL_SPACE) {
321 ps = Heap::cell_space();
322 } else if (space == OLD_POINTER_SPACE) {
323 ps = Heap::old_pointer_space();
324 } else if (space == OLD_DATA_SPACE) {
325 ps = Heap::old_data_space();
326 } else {
327 ASSERT(space == CODE_SPACE);
328 ps = Heap::code_space();
329 }
330 Address top = ps->top();
331 Page* top_page = Page::FromAllocationTop(top);
332 int page_index = 0;
333 PageIterator it(ps, PageIterator::PAGES_IN_USE);
334 while (it.has_next()) {
335 if (it.next() == top_page) break;
336 page_index++;
337 }
338 current_ = RelativeAddress(space,
339 page_index,
340 top_page->Offset(top));
341 break;
342 }
343 case NEW_SPACE:
344 current_ = RelativeAddress(space,
345 0,
346 Heap::NewSpaceTop() - Heap::NewSpaceStart());
347 break;
348 case LO_SPACE:
349 int page_index = 0;
350 for (LargeObjectIterator it(Heap::lo_space()); it.has_next(); it.next()) {
351 page_index++;
352 }
353 current_ = RelativeAddress(space, page_index, 0);
354 break;
355 }
356}
357
358
359RelativeAddress SimulatedHeapSpace::Allocate(int size,
360 GCTreatment special_gc_treatment) {
361#ifdef DEBUG
362 current_.Verify();
363#endif
364 int alloc_size = OBJECT_SIZE_ALIGN(size);
365 if (current_.in_paged_space() &&
366 current_.page_offset() + alloc_size > Page::kPageSize) {
367 ASSERT(alloc_size <= Page::kMaxHeapObjectSize);
368 current_.next_page(Page::kObjectStartOffset);
369 }
370 RelativeAddress result = current_;
371 if (current_.space() == LO_SPACE) {
372 current_.next_page();
373 if (special_gc_treatment == CodeObject) {
374 result.set_to_large_code_object();
375 } else if (special_gc_treatment == PointerObject) {
376 result.set_to_large_fixed_array();
377 }
378 } else {
379 current_.next_address(alloc_size);
380 }
381#ifdef DEBUG
382 current_.Verify();
383 result.Verify();
384#endif
385 return result;
386}
387
388// -----------------------------------------------------------------------------
389// Coding of external references.
390
391// The encoding of an external reference. The type is in the high word.
392// The id is in the low word.
393static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
394 return static_cast<uint32_t>(type) << 16 | id;
395}
396
397
398static int* GetInternalPointer(StatsCounter* counter) {
399 // All counters refer to dummy_counter, if deserializing happens without
400 // setting up counters.
401 static int dummy_counter = 0;
402 return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
403}
404
405
406// ExternalReferenceTable is a helper class that defines the relationship
407// between external references and their encodings. It is used to build
408// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
409class ExternalReferenceTable {
410 public:
411 static ExternalReferenceTable* instance() {
412 if (!instance_) instance_ = new ExternalReferenceTable();
413 return instance_;
414 }
415
416 int size() const { return refs_.length(); }
417
418 Address address(int i) { return refs_[i].address; }
419
420 uint32_t code(int i) { return refs_[i].code; }
421
422 const char* name(int i) { return refs_[i].name; }
423
424 int max_id(int code) { return max_id_[code]; }
425
426 private:
427 static ExternalReferenceTable* instance_;
428
429 ExternalReferenceTable() : refs_(64) { PopulateTable(); }
430 ~ExternalReferenceTable() { }
431
432 struct ExternalReferenceEntry {
433 Address address;
434 uint32_t code;
435 const char* name;
436 };
437
438 void PopulateTable();
439
440 // For a few types of references, we can get their address from their id.
441 void AddFromId(TypeCode type, uint16_t id, const char* name);
442
443 // For other types of references, the caller will figure out the address.
444 void Add(Address address, TypeCode type, uint16_t id, const char* name);
445
446 List<ExternalReferenceEntry> refs_;
447 int max_id_[kTypeCodeCount];
448};
449
450
451ExternalReferenceTable* ExternalReferenceTable::instance_ = NULL;
452
453
454void ExternalReferenceTable::AddFromId(TypeCode type,
455 uint16_t id,
456 const char* name) {
457 Address address;
458 switch (type) {
459 case C_BUILTIN: {
460 ExternalReference ref(static_cast<Builtins::CFunctionId>(id));
461 address = ref.address();
462 break;
463 }
464 case BUILTIN: {
465 ExternalReference ref(static_cast<Builtins::Name>(id));
466 address = ref.address();
467 break;
468 }
469 case RUNTIME_FUNCTION: {
470 ExternalReference ref(static_cast<Runtime::FunctionId>(id));
471 address = ref.address();
472 break;
473 }
474 case IC_UTILITY: {
475 ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)));
476 address = ref.address();
477 break;
478 }
479 default:
480 UNREACHABLE();
481 return;
482 }
483 Add(address, type, id, name);
484}
485
486
487void ExternalReferenceTable::Add(Address address,
488 TypeCode type,
489 uint16_t id,
490 const char* name) {
491 CHECK_NE(NULL, address);
492 ExternalReferenceEntry entry;
493 entry.address = address;
494 entry.code = EncodeExternal(type, id);
495 entry.name = name;
496 CHECK_NE(0, entry.code);
497 refs_.Add(entry);
498 if (id > max_id_[type]) max_id_[type] = id;
499}
500
501
502void ExternalReferenceTable::PopulateTable() {
503 for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
504 max_id_[type_code] = 0;
505 }
506
507 // The following populates all of the different type of external references
508 // into the ExternalReferenceTable.
509 //
510 // NOTE: This function was originally 100k of code. It has since been
511 // rewritten to be mostly table driven, as the callback macro style tends to
512 // very easily cause code bloat. Please be careful in the future when adding
513 // new references.
514
515 struct RefTableEntry {
516 TypeCode type;
517 uint16_t id;
518 const char* name;
519 };
520
521 static const RefTableEntry ref_table[] = {
522 // Builtins
523#define DEF_ENTRY_C(name) \
524 { C_BUILTIN, \
525 Builtins::c_##name, \
526 "Builtins::" #name },
527
528 BUILTIN_LIST_C(DEF_ENTRY_C)
529#undef DEF_ENTRY_C
530
531#define DEF_ENTRY_C(name) \
532 { BUILTIN, \
533 Builtins::name, \
534 "Builtins::" #name },
535#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name)
536
537 BUILTIN_LIST_C(DEF_ENTRY_C)
538 BUILTIN_LIST_A(DEF_ENTRY_A)
539 BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
540#undef DEF_ENTRY_C
541#undef DEF_ENTRY_A
542
543 // Runtime functions
544#define RUNTIME_ENTRY(name, nargs, ressize) \
545 { RUNTIME_FUNCTION, \
546 Runtime::k##name, \
547 "Runtime::" #name },
548
549 RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
550#undef RUNTIME_ENTRY
551
552 // IC utilities
553#define IC_ENTRY(name) \
554 { IC_UTILITY, \
555 IC::k##name, \
556 "IC::" #name },
557
558 IC_UTIL_LIST(IC_ENTRY)
559#undef IC_ENTRY
560 }; // end of ref_table[].
561
562 for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
563 AddFromId(ref_table[i].type, ref_table[i].id, ref_table[i].name);
564 }
565
566#ifdef ENABLE_DEBUGGER_SUPPORT
567 // Debug addresses
568 Add(Debug_Address(Debug::k_after_break_target_address).address(),
569 DEBUG_ADDRESS,
570 Debug::k_after_break_target_address << kDebugIdShift,
571 "Debug::after_break_target_address()");
572 Add(Debug_Address(Debug::k_debug_break_return_address).address(),
573 DEBUG_ADDRESS,
574 Debug::k_debug_break_return_address << kDebugIdShift,
575 "Debug::debug_break_return_address()");
576 const char* debug_register_format = "Debug::register_address(%i)";
577 size_t dr_format_length = strlen(debug_register_format);
578 for (int i = 0; i < kNumJSCallerSaved; ++i) {
579 Vector<char> name = Vector<char>::New(dr_format_length + 1);
580 OS::SNPrintF(name, debug_register_format, i);
581 Add(Debug_Address(Debug::k_register_address, i).address(),
582 DEBUG_ADDRESS,
583 Debug::k_register_address << kDebugIdShift | i,
584 name.start());
585 }
586#endif
587
588 // Stat counters
589 struct StatsRefTableEntry {
590 StatsCounter* counter;
591 uint16_t id;
592 const char* name;
593 };
594
595 static const StatsRefTableEntry stats_ref_table[] = {
596#define COUNTER_ENTRY(name, caption) \
597 { &Counters::name, \
598 Counters::k_##name, \
599 "Counters::" #name },
600
601 STATS_COUNTER_LIST_1(COUNTER_ENTRY)
602 STATS_COUNTER_LIST_2(COUNTER_ENTRY)
603#undef COUNTER_ENTRY
604 }; // end of stats_ref_table[].
605
606 for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
607 Add(reinterpret_cast<Address>(
608 GetInternalPointer(stats_ref_table[i].counter)),
609 STATS_COUNTER,
610 stats_ref_table[i].id,
611 stats_ref_table[i].name);
612 }
613
614 // Top addresses
615 const char* top_address_format = "Top::get_address_from_id(%i)";
616 size_t top_format_length = strlen(top_address_format);
617 for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
618 Vector<char> name = Vector<char>::New(top_format_length + 1);
619 const char* chars = name.start();
620 OS::SNPrintF(name, top_address_format, i);
621 Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
622 }
623
624 // Extensions
625 Add(FUNCTION_ADDR(GCExtension::GC), EXTENSION, 1,
626 "GCExtension::GC");
627
628 // Accessors
629#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
630 Add((Address)&Accessors::name, \
631 ACCESSOR, \
632 Accessors::k##name, \
633 "Accessors::" #name);
634
635 ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
636#undef ACCESSOR_DESCRIPTOR_DECLARATION
637
638 // Stub cache tables
639 Add(SCTableReference::keyReference(StubCache::kPrimary).address(),
640 STUB_CACHE_TABLE,
641 1,
642 "StubCache::primary_->key");
643 Add(SCTableReference::valueReference(StubCache::kPrimary).address(),
644 STUB_CACHE_TABLE,
645 2,
646 "StubCache::primary_->value");
647 Add(SCTableReference::keyReference(StubCache::kSecondary).address(),
648 STUB_CACHE_TABLE,
649 3,
650 "StubCache::secondary_->key");
651 Add(SCTableReference::valueReference(StubCache::kSecondary).address(),
652 STUB_CACHE_TABLE,
653 4,
654 "StubCache::secondary_->value");
655
656 // Runtime entries
657 Add(ExternalReference::perform_gc_function().address(),
658 RUNTIME_ENTRY,
659 1,
660 "Runtime::PerformGC");
661 Add(ExternalReference::random_positive_smi_function().address(),
662 RUNTIME_ENTRY,
663 2,
664 "V8::RandomPositiveSmi");
665
666 // Miscellaneous
667 Add(ExternalReference::builtin_passed_function().address(),
668 UNCLASSIFIED,
669 1,
670 "Builtins::builtin_passed_function");
671 Add(ExternalReference::the_hole_value_location().address(),
672 UNCLASSIFIED,
673 2,
674 "Factory::the_hole_value().location()");
675 Add(ExternalReference::roots_address().address(),
676 UNCLASSIFIED,
677 3,
678 "Heap::roots_address()");
679 Add(ExternalReference::address_of_stack_guard_limit().address(),
680 UNCLASSIFIED,
681 4,
682 "StackGuard::address_of_jslimit()");
683 Add(ExternalReference::address_of_regexp_stack_limit().address(),
684 UNCLASSIFIED,
685 5,
686 "RegExpStack::limit_address()");
687 Add(ExternalReference::new_space_start().address(),
688 UNCLASSIFIED,
689 6,
690 "Heap::NewSpaceStart()");
691 Add(ExternalReference::heap_always_allocate_scope_depth().address(),
692 UNCLASSIFIED,
693 7,
694 "Heap::always_allocate_scope_depth()");
695 Add(ExternalReference::new_space_allocation_limit_address().address(),
696 UNCLASSIFIED,
697 8,
698 "Heap::NewSpaceAllocationLimitAddress()");
699 Add(ExternalReference::new_space_allocation_top_address().address(),
700 UNCLASSIFIED,
701 9,
702 "Heap::NewSpaceAllocationTopAddress()");
703#ifdef ENABLE_DEBUGGER_SUPPORT
704 Add(ExternalReference::debug_break().address(),
705 UNCLASSIFIED,
706 10,
707 "Debug::Break()");
708 Add(ExternalReference::debug_step_in_fp_address().address(),
709 UNCLASSIFIED,
710 11,
711 "Debug::step_in_fp_addr()");
712#endif
713 Add(ExternalReference::double_fp_operation(Token::ADD).address(),
714 UNCLASSIFIED,
715 12,
716 "add_two_doubles");
717 Add(ExternalReference::double_fp_operation(Token::SUB).address(),
718 UNCLASSIFIED,
719 13,
720 "sub_two_doubles");
721 Add(ExternalReference::double_fp_operation(Token::MUL).address(),
722 UNCLASSIFIED,
723 14,
724 "mul_two_doubles");
725 Add(ExternalReference::double_fp_operation(Token::DIV).address(),
726 UNCLASSIFIED,
727 15,
728 "div_two_doubles");
729 Add(ExternalReference::double_fp_operation(Token::MOD).address(),
730 UNCLASSIFIED,
731 16,
732 "mod_two_doubles");
733 Add(ExternalReference::compare_doubles().address(),
734 UNCLASSIFIED,
735 17,
736 "compare_doubles");
737#ifdef V8_NATIVE_REGEXP
738 Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
739 UNCLASSIFIED,
740 18,
741 "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
742 Add(ExternalReference::re_check_stack_guard_state().address(),
743 UNCLASSIFIED,
744 19,
745 "RegExpMacroAssembler*::CheckStackGuardState()");
746 Add(ExternalReference::re_grow_stack().address(),
747 UNCLASSIFIED,
748 20,
749 "NativeRegExpMacroAssembler::GrowStack()");
750#endif
751}
752
753
754ExternalReferenceEncoder::ExternalReferenceEncoder()
755 : encodings_(Match) {
756 ExternalReferenceTable* external_references =
757 ExternalReferenceTable::instance();
758 for (int i = 0; i < external_references->size(); ++i) {
759 Put(external_references->address(i), i);
760 }
761}
762
763
764uint32_t ExternalReferenceEncoder::Encode(Address key) const {
765 int index = IndexOf(key);
766 return index >=0 ? ExternalReferenceTable::instance()->code(index) : 0;
767}
768
769
770const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
771 int index = IndexOf(key);
772 return index >=0 ? ExternalReferenceTable::instance()->name(index) : NULL;
773}
774
775
776int ExternalReferenceEncoder::IndexOf(Address key) const {
777 if (key == NULL) return -1;
778 HashMap::Entry* entry =
779 const_cast<HashMap &>(encodings_).Lookup(key, Hash(key), false);
780 return entry == NULL
781 ? -1
782 : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
783}
784
785
786void ExternalReferenceEncoder::Put(Address key, int index) {
787 HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
788 entry->value = reinterpret_cast<void *>(index);
789}
790
791
792ExternalReferenceDecoder::ExternalReferenceDecoder()
793 : encodings_(NewArray<Address*>(kTypeCodeCount)) {
794 ExternalReferenceTable* external_references =
795 ExternalReferenceTable::instance();
796 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
797 int max = external_references->max_id(type) + 1;
798 encodings_[type] = NewArray<Address>(max + 1);
799 }
800 for (int i = 0; i < external_references->size(); ++i) {
801 Put(external_references->code(i), external_references->address(i));
802 }
803}
804
805
806ExternalReferenceDecoder::~ExternalReferenceDecoder() {
807 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
808 DeleteArray(encodings_[type]);
809 }
810 DeleteArray(encodings_);
811}
812
813
814//------------------------------------------------------------------------------
815// Implementation of Serializer
816
817
818// Helper class to write the bytes of the serialized heap.
819
820class SnapshotWriter {
821 public:
822 SnapshotWriter() {
823 len_ = 0;
824 max_ = 8 << 10; // 8K initial size
825 str_ = NewArray<byte>(max_);
826 }
827
828 ~SnapshotWriter() {
829 DeleteArray(str_);
830 }
831
832 void GetBytes(byte** str, int* len) {
833 *str = NewArray<byte>(len_);
834 memcpy(*str, str_, len_);
835 *len = len_;
836 }
837
838 void Reserve(int bytes, int pos);
839
840 void PutC(char c) {
841 InsertC(c, len_);
842 }
843
844 void PutInt(int i) {
845 InsertInt(i, len_);
846 }
847
848 void PutAddress(Address p) {
849 PutBytes(reinterpret_cast<byte*>(&p), sizeof(p));
850 }
851
852 void PutBytes(const byte* a, int size) {
853 InsertBytes(a, len_, size);
854 }
855
856 void PutString(const char* s) {
857 InsertString(s, len_);
858 }
859
860 int InsertC(char c, int pos) {
861 Reserve(1, pos);
862 str_[pos] = c;
863 len_++;
864 return pos + 1;
865 }
866
867 int InsertInt(int i, int pos) {
868 return InsertBytes(reinterpret_cast<byte*>(&i), pos, sizeof(i));
869 }
870
871 int InsertBytes(const byte* a, int pos, int size) {
872 Reserve(size, pos);
873 memcpy(&str_[pos], a, size);
874 len_ += size;
875 return pos + size;
876 }
877
878 int InsertString(const char* s, int pos);
879
880 int length() { return len_; }
881
882 Address position() { return reinterpret_cast<Address>(&str_[len_]); }
883
884 private:
885 byte* str_; // the snapshot
886 int len_; // the current length of str_
887 int max_; // the allocated size of str_
888};
889
890
891void SnapshotWriter::Reserve(int bytes, int pos) {
892 CHECK(0 <= pos && pos <= len_);
893 while (len_ + bytes >= max_) {
894 max_ *= 2;
895 byte* old = str_;
896 str_ = NewArray<byte>(max_);
897 memcpy(str_, old, len_);
898 DeleteArray(old);
899 }
900 if (pos < len_) {
901 byte* old = str_;
902 str_ = NewArray<byte>(max_);
903 memcpy(str_, old, pos);
904 memcpy(str_ + pos + bytes, old + pos, len_ - pos);
905 DeleteArray(old);
906 }
907}
908
909int SnapshotWriter::InsertString(const char* s, int pos) {
910 int size = strlen(s);
911 pos = InsertC('[', pos);
912 pos = InsertInt(size, pos);
913 pos = InsertC(']', pos);
914 return InsertBytes(reinterpret_cast<const byte*>(s), pos, size);
915}
916
917
918class ReferenceUpdater: public ObjectVisitor {
919 public:
920 ReferenceUpdater(HeapObject* obj, Serializer* serializer)
921 : obj_address_(obj->address()),
922 serializer_(serializer),
923 reference_encoder_(serializer->reference_encoder_),
924 offsets_(8),
925 addresses_(8) {
926 }
927
928 virtual void VisitPointers(Object** start, Object** end) {
929 for (Object** p = start; p < end; ++p) {
930 if ((*p)->IsHeapObject()) {
931 offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
932 Address a = serializer_->GetSavedAddress(HeapObject::cast(*p));
933 addresses_.Add(a);
934 }
935 }
936 }
937
938 virtual void VisitCodeTarget(RelocInfo* rinfo) {
939 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
940 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
941 Address encoded_target = serializer_->GetSavedAddress(target);
942 offsets_.Add(rinfo->target_address_address() - obj_address_);
943 addresses_.Add(encoded_target);
944 }
945
946
947 virtual void VisitExternalReferences(Address* start, Address* end) {
948 for (Address* p = start; p < end; ++p) {
949 uint32_t code = reference_encoder_->Encode(*p);
950 CHECK(*p == NULL ? code == 0 : code != 0);
951 offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
952 addresses_.Add(reinterpret_cast<Address>(code));
953 }
954 }
955
956 virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
957 Address target = rinfo->target_address();
958 uint32_t encoding = reference_encoder_->Encode(target);
959 CHECK(target == NULL ? encoding == 0 : encoding != 0);
960 offsets_.Add(rinfo->target_address_address() - obj_address_);
961 addresses_.Add(reinterpret_cast<Address>(encoding));
962 }
963
964 void Update(Address start_address) {
965 for (int i = 0; i < offsets_.length(); i++) {
966 memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address));
967 }
968 }
969
970 private:
971 Address obj_address_;
972 Serializer* serializer_;
973 ExternalReferenceEncoder* reference_encoder_;
974 List<int> offsets_;
975 List<Address> addresses_;
976};
977
978
979// Helper functions for a map of encoded heap object addresses.
980static uint32_t HeapObjectHash(HeapObject* key) {
981 uint32_t low32bits = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key));
982 return low32bits >> 2;
983}
984
985
986static bool MatchHeapObject(void* key1, void* key2) {
987 return key1 == key2;
988}
989
990
991Serializer::Serializer()
992 : global_handles_(4),
993 saved_addresses_(MatchHeapObject) {
994 root_ = true;
995 roots_ = 0;
996 objects_ = 0;
997 reference_encoder_ = NULL;
998 writer_ = new SnapshotWriter();
999 for (int i = 0; i <= LAST_SPACE; i++) {
1000 allocator_[i] = new SimulatedHeapSpace();
1001 }
1002}
1003
1004
1005Serializer::~Serializer() {
1006 for (int i = 0; i <= LAST_SPACE; i++) {
1007 delete allocator_[i];
1008 }
1009 if (reference_encoder_) delete reference_encoder_;
1010 delete writer_;
1011}
1012
1013
1014bool Serializer::serialization_enabled_ = false;
1015
1016
1017#ifdef DEBUG
1018static const int kMaxTagLength = 32;
1019
1020void Serializer::Synchronize(const char* tag) {
1021 if (FLAG_debug_serialization) {
1022 int length = strlen(tag);
1023 ASSERT(length <= kMaxTagLength);
1024 writer_->PutC('S');
1025 writer_->PutInt(length);
1026 writer_->PutBytes(reinterpret_cast<const byte*>(tag), length);
1027 }
1028}
1029#endif
1030
1031
1032void Serializer::InitializeAllocators() {
1033 for (int i = 0; i <= LAST_SPACE; i++) {
1034 allocator_[i]->InitEmptyHeap(static_cast<AllocationSpace>(i));
1035 }
1036}
1037
1038
1039bool Serializer::IsVisited(HeapObject* obj) {
1040 HashMap::Entry* entry =
1041 saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
1042 return entry != NULL;
1043}
1044
1045
1046Address Serializer::GetSavedAddress(HeapObject* obj) {
1047 HashMap::Entry* entry =
1048 saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
1049 ASSERT(entry != NULL);
1050 return reinterpret_cast<Address>(entry->value);
1051}
1052
1053
1054void Serializer::SaveAddress(HeapObject* obj, Address addr) {
1055 HashMap::Entry* entry =
1056 saved_addresses_.Lookup(obj, HeapObjectHash(obj), true);
1057 entry->value = addr;
1058}
1059
1060
1061void Serializer::Serialize() {
1062 // No active threads.
1063 CHECK_EQ(NULL, ThreadState::FirstInUse());
1064 // No active or weak handles.
1065 CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
1066 CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
1067 // We need a counter function during serialization to resolve the
1068 // references to counters in the code on the heap.
1069 CHECK(StatsTable::HasCounterFunction());
1070 CHECK(enabled());
1071 InitializeAllocators();
1072 reference_encoder_ = new ExternalReferenceEncoder();
1073 PutHeader();
1074 Heap::IterateRoots(this);
1075 PutLog();
1076 PutContextStack();
1077 Disable();
1078}
1079
1080
1081void Serializer::Finalize(byte** str, int* len) {
1082 writer_->GetBytes(str, len);
1083}
1084
1085
1086// Serialize objects by writing them into the stream.
1087
1088void Serializer::VisitPointers(Object** start, Object** end) {
1089 bool root = root_;
1090 root_ = false;
1091 for (Object** p = start; p < end; ++p) {
1092 bool serialized;
1093 Address a = Encode(*p, &serialized);
1094 if (root) {
1095 roots_++;
1096 // If the object was not just serialized,
1097 // write its encoded address instead.
1098 if (!serialized) PutEncodedAddress(a);
1099 }
1100 }
1101 root_ = root;
1102}
1103
1104
1105void Serializer::VisitCodeTarget(RelocInfo* rinfo) {
1106 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1107 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1108 bool serialized;
1109 Encode(target, &serialized);
1110}
1111
1112
1113class GlobalHandlesRetriever: public ObjectVisitor {
1114 public:
1115 explicit GlobalHandlesRetriever(List<Object**>* handles)
1116 : global_handles_(handles) {}
1117
1118 virtual void VisitPointers(Object** start, Object** end) {
1119 for (; start != end; ++start) {
1120 global_handles_->Add(start);
1121 }
1122 }
1123
1124 private:
1125 List<Object**>* global_handles_;
1126};
1127
1128
1129void Serializer::PutFlags() {
1130 writer_->PutC('F');
1131 List<const char*>* argv = FlagList::argv();
1132 writer_->PutInt(argv->length());
1133 writer_->PutC('[');
1134 for (int i = 0; i < argv->length(); i++) {
1135 if (i > 0) writer_->PutC('|');
1136 writer_->PutString((*argv)[i]);
1137 DeleteArray((*argv)[i]);
1138 }
1139 writer_->PutC(']');
1140 flags_end_ = writer_->length();
1141 delete argv;
1142}
1143
1144
1145void Serializer::PutHeader() {
1146 PutFlags();
1147 writer_->PutC('D');
1148#ifdef DEBUG
1149 writer_->PutC(FLAG_debug_serialization ? '1' : '0');
1150#else
1151 writer_->PutC('0');
1152#endif
1153#ifdef V8_NATIVE_REGEXP
1154 writer_->PutC('N');
1155#else // Interpreted regexp
1156 writer_->PutC('I');
1157#endif
1158 // Write sizes of paged memory spaces. Allocate extra space for the old
1159 // and code spaces, because objects in new space will be promoted to them.
1160 writer_->PutC('S');
1161 writer_->PutC('[');
1162 writer_->PutInt(Heap::old_pointer_space()->Size() +
1163 Heap::new_space()->Size());
1164 writer_->PutC('|');
1165 writer_->PutInt(Heap::old_data_space()->Size() + Heap::new_space()->Size());
1166 writer_->PutC('|');
1167 writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
1168 writer_->PutC('|');
1169 writer_->PutInt(Heap::map_space()->Size());
1170 writer_->PutC('|');
1171 writer_->PutInt(Heap::cell_space()->Size());
1172 writer_->PutC(']');
1173 // Write global handles.
1174 writer_->PutC('G');
1175 writer_->PutC('[');
1176 GlobalHandlesRetriever ghr(&global_handles_);
1177 GlobalHandles::IterateRoots(&ghr);
1178 for (int i = 0; i < global_handles_.length(); i++) {
1179 writer_->PutC('N');
1180 }
1181 writer_->PutC(']');
1182}
1183
1184
1185void Serializer::PutLog() {
1186#ifdef ENABLE_LOGGING_AND_PROFILING
1187 if (FLAG_log_code) {
1188 Logger::TearDown();
1189 int pos = writer_->InsertC('L', flags_end_);
1190 bool exists;
1191 Vector<const char> log = ReadFile(FLAG_logfile, &exists);
1192 writer_->InsertString(log.start(), pos);
1193 log.Dispose();
1194 }
1195#endif
1196}
1197
1198
1199static int IndexOf(const List<Object**>& list, Object** element) {
1200 for (int i = 0; i < list.length(); i++) {
1201 if (list[i] == element) return i;
1202 }
1203 return -1;
1204}
1205
1206
1207void Serializer::PutGlobalHandleStack(const List<Handle<Object> >& stack) {
1208 writer_->PutC('[');
1209 writer_->PutInt(stack.length());
1210 for (int i = stack.length() - 1; i >= 0; i--) {
1211 writer_->PutC('|');
1212 int gh_index = IndexOf(global_handles_, stack[i].location());
1213 CHECK_GE(gh_index, 0);
1214 writer_->PutInt(gh_index);
1215 }
1216 writer_->PutC(']');
1217}
1218
1219
1220void Serializer::PutContextStack() {
1221 List<Context*> contexts(2);
1222 while (HandleScopeImplementer::instance()->HasSavedContexts()) {
1223 Context* context =
1224 HandleScopeImplementer::instance()->RestoreContext();
1225 contexts.Add(context);
1226 }
1227 for (int i = contexts.length() - 1; i >= 0; i--) {
1228 HandleScopeImplementer::instance()->SaveContext(contexts[i]);
1229 }
1230 writer_->PutC('C');
1231 writer_->PutC('[');
1232 writer_->PutInt(contexts.length());
1233 if (!contexts.is_empty()) {
1234 Object** start = reinterpret_cast<Object**>(&contexts.first());
1235 VisitPointers(start, start + contexts.length());
1236 }
1237 writer_->PutC(']');
1238}
1239
1240void Serializer::PutEncodedAddress(Address addr) {
1241 writer_->PutC('P');
1242 writer_->PutAddress(addr);
1243}
1244
1245
1246Address Serializer::Encode(Object* o, bool* serialized) {
1247 *serialized = false;
1248 if (o->IsSmi()) {
1249 return reinterpret_cast<Address>(o);
1250 } else {
1251 HeapObject* obj = HeapObject::cast(o);
1252 if (IsVisited(obj)) {
1253 return GetSavedAddress(obj);
1254 } else {
1255 // First visit: serialize the object.
1256 *serialized = true;
1257 return PutObject(obj);
1258 }
1259 }
1260}
1261
1262
1263Address Serializer::PutObject(HeapObject* obj) {
1264 Map* map = obj->map();
1265 InstanceType type = map->instance_type();
1266 int size = obj->SizeFromMap(map);
1267
1268 // Simulate the allocation of obj to predict where it will be
1269 // allocated during deserialization.
1270 Address addr = Allocate(obj).Encode();
1271
1272 SaveAddress(obj, addr);
1273
1274 if (type == CODE_TYPE) {
1275 LOG(CodeMoveEvent(obj->address(), addr));
1276 }
1277
1278 // Write out the object prologue: type, size, and simulated address of obj.
1279 writer_->PutC('[');
1280 CHECK_EQ(0, static_cast<int>(size & kObjectAlignmentMask));
1281 writer_->PutInt(type);
1282 writer_->PutInt(size >> kObjectAlignmentBits);
1283 PutEncodedAddress(addr); // encodes AllocationSpace
1284
1285 // Visit all the pointers in the object other than the map. This
1286 // will recursively serialize any as-yet-unvisited objects.
1287 obj->Iterate(this);
1288
1289 // Mark end of recursively embedded objects, start of object body.
1290 writer_->PutC('|');
1291 // Write out the raw contents of the object. No compression, but
1292 // fast to deserialize.
1293 writer_->PutBytes(obj->address(), size);
1294 // Update pointers and external references in the written object.
1295 ReferenceUpdater updater(obj, this);
1296 obj->Iterate(&updater);
1297 updater.Update(writer_->position() - size);
1298
1299#ifdef DEBUG
1300 if (FLAG_debug_serialization) {
1301 // Write out the object epilogue to catch synchronization errors.
1302 PutEncodedAddress(addr);
1303 writer_->PutC(']');
1304 }
1305#endif
1306
1307 objects_++;
1308 return addr;
1309}
1310
1311
1312RelativeAddress Serializer::Allocate(HeapObject* obj) {
1313 // Find out which AllocationSpace 'obj' is in.
1314 AllocationSpace s;
1315 bool found = false;
1316 for (int i = FIRST_SPACE; !found && i <= LAST_SPACE; i++) {
1317 s = static_cast<AllocationSpace>(i);
1318 found = Heap::InSpace(obj, s);
1319 }
1320 CHECK(found);
1321 int size = obj->Size();
1322 if (s == NEW_SPACE) {
1323 if (size > Heap::MaxObjectSizeInPagedSpace()) {
1324 s = LO_SPACE;
1325 } else {
1326 OldSpace* space = Heap::TargetSpace(obj);
1327 ASSERT(space == Heap::old_pointer_space() ||
1328 space == Heap::old_data_space());
1329 s = (space == Heap::old_pointer_space()) ?
1330 OLD_POINTER_SPACE :
1331 OLD_DATA_SPACE;
1332 }
1333 }
1334 GCTreatment gc_treatment = DataObject;
1335 if (obj->IsFixedArray()) gc_treatment = PointerObject;
1336 else if (obj->IsCode()) gc_treatment = CodeObject;
1337 return allocator_[s]->Allocate(size, gc_treatment);
1338}
1339
1340
1341//------------------------------------------------------------------------------
1342// Implementation of Deserializer
1343
1344
1345static const int kInitArraySize = 32;
1346
1347
1348Deserializer::Deserializer(const byte* str, int len)
1349 : reader_(str, len),
1350 map_pages_(kInitArraySize),
1351 cell_pages_(kInitArraySize),
1352 old_pointer_pages_(kInitArraySize),
1353 old_data_pages_(kInitArraySize),
1354 code_pages_(kInitArraySize),
1355 large_objects_(kInitArraySize),
1356 global_handles_(4) {
1357 root_ = true;
1358 roots_ = 0;
1359 objects_ = 0;
1360 reference_decoder_ = NULL;
1361#ifdef DEBUG
1362 expect_debug_information_ = false;
1363#endif
1364}
1365
1366
1367Deserializer::~Deserializer() {
1368 if (reference_decoder_) delete reference_decoder_;
1369}
1370
1371
1372void Deserializer::ExpectEncodedAddress(Address expected) {
1373 Address a = GetEncodedAddress();
1374 USE(a);
1375 ASSERT(a == expected);
1376}
1377
1378
1379#ifdef DEBUG
1380void Deserializer::Synchronize(const char* tag) {
1381 if (expect_debug_information_) {
1382 char buf[kMaxTagLength];
1383 reader_.ExpectC('S');
1384 int length = reader_.GetInt();
1385 ASSERT(length <= kMaxTagLength);
1386 reader_.GetBytes(reinterpret_cast<Address>(buf), length);
1387 ASSERT_EQ(strlen(tag), length);
1388 ASSERT(strncmp(tag, buf, length) == 0);
1389 }
1390}
1391#endif
1392
1393
1394void Deserializer::Deserialize() {
1395 // No active threads.
1396 ASSERT_EQ(NULL, ThreadState::FirstInUse());
1397 // No active handles.
1398 ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
1399 reference_decoder_ = new ExternalReferenceDecoder();
1400 // By setting linear allocation only, we forbid the use of free list
1401 // allocation which is not predicted by SimulatedAddress.
1402 GetHeader();
1403 Heap::IterateRoots(this);
1404 GetContextStack();
1405}
1406
1407
1408void Deserializer::VisitPointers(Object** start, Object** end) {
1409 bool root = root_;
1410 root_ = false;
1411 for (Object** p = start; p < end; ++p) {
1412 if (root) {
1413 roots_++;
1414 // Read the next object or pointer from the stream
1415 // pointer in the stream.
1416 int c = reader_.GetC();
1417 if (c == '[') {
1418 *p = GetObject(); // embedded object
1419 } else {
1420 ASSERT(c == 'P'); // pointer to previously serialized object
1421 *p = Resolve(reader_.GetAddress());
1422 }
1423 } else {
1424 // A pointer internal to a HeapObject that we've already
1425 // read: resolve it to a true address (or Smi)
1426 *p = Resolve(reinterpret_cast<Address>(*p));
1427 }
1428 }
1429 root_ = root;
1430}
1431
1432
1433void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
1434 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1435 Address encoded_address = reinterpret_cast<Address>(rinfo->target_object());
1436 Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
1437 rinfo->set_target_address(target_object->instruction_start());
1438}
1439
1440
1441void Deserializer::VisitExternalReferences(Address* start, Address* end) {
1442 for (Address* p = start; p < end; ++p) {
1443 uint32_t code = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*p));
1444 *p = reference_decoder_->Decode(code);
1445 }
1446}
1447
1448
1449void Deserializer::VisitRuntimeEntry(RelocInfo* rinfo) {
1450 uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->target_address_address());
1451 uint32_t encoding = *pc;
1452 Address target = reference_decoder_->Decode(encoding);
1453 rinfo->set_target_address(target);
1454}
1455
1456
1457void Deserializer::GetFlags() {
1458 reader_.ExpectC('F');
1459 int argc = reader_.GetInt() + 1;
1460 char** argv = NewArray<char*>(argc);
1461 reader_.ExpectC('[');
1462 for (int i = 1; i < argc; i++) {
1463 if (i > 1) reader_.ExpectC('|');
1464 argv[i] = reader_.GetString();
1465 }
1466 reader_.ExpectC(']');
1467 has_log_ = false;
1468 for (int i = 1; i < argc; i++) {
1469 if (strcmp("--log_code", argv[i]) == 0) {
1470 has_log_ = true;
1471 } else if (strcmp("--nouse_ic", argv[i]) == 0) {
1472 FLAG_use_ic = false;
1473 } else if (strcmp("--debug_code", argv[i]) == 0) {
1474 FLAG_debug_code = true;
1475 } else if (strcmp("--nolazy", argv[i]) == 0) {
1476 FLAG_lazy = false;
1477 }
1478 DeleteArray(argv[i]);
1479 }
1480
1481 DeleteArray(argv);
1482}
1483
1484
1485void Deserializer::GetLog() {
1486 if (has_log_) {
1487 reader_.ExpectC('L');
1488 char* snapshot_log = reader_.GetString();
1489#ifdef ENABLE_LOGGING_AND_PROFILING
1490 if (FLAG_log_code) {
1491 LOG(Preamble(snapshot_log));
1492 }
1493#endif
1494 DeleteArray(snapshot_log);
1495 }
1496}
1497
1498
1499static void InitPagedSpace(PagedSpace* space,
1500 int capacity,
1501 List<Page*>* page_list) {
1502 if (!space->EnsureCapacity(capacity)) {
1503 V8::FatalProcessOutOfMemory("InitPagedSpace");
1504 }
1505 PageIterator it(space, PageIterator::ALL_PAGES);
1506 while (it.has_next()) page_list->Add(it.next());
1507}
1508
1509
1510void Deserializer::GetHeader() {
1511 reader_.ExpectC('D');
1512#ifdef DEBUG
1513 expect_debug_information_ = reader_.GetC() == '1';
1514#else
1515 // In release mode, don't attempt to read a snapshot containing
1516 // synchronization tags.
1517 if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags.");
1518#endif
1519#ifdef V8_NATIVE_REGEXP
1520 reader_.ExpectC('N');
1521#else // Interpreted regexp.
1522 reader_.ExpectC('I');
1523#endif
1524 // Ensure sufficient capacity in paged memory spaces to avoid growth
1525 // during deserialization.
1526 reader_.ExpectC('S');
1527 reader_.ExpectC('[');
1528 InitPagedSpace(Heap::old_pointer_space(),
1529 reader_.GetInt(),
1530 &old_pointer_pages_);
1531 reader_.ExpectC('|');
1532 InitPagedSpace(Heap::old_data_space(), reader_.GetInt(), &old_data_pages_);
1533 reader_.ExpectC('|');
1534 InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
1535 reader_.ExpectC('|');
1536 InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
1537 reader_.ExpectC('|');
1538 InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_);
1539 reader_.ExpectC(']');
1540 // Create placeholders for global handles later to be fill during
1541 // IterateRoots.
1542 reader_.ExpectC('G');
1543 reader_.ExpectC('[');
1544 int c = reader_.GetC();
1545 while (c != ']') {
1546 ASSERT(c == 'N');
1547 global_handles_.Add(GlobalHandles::Create(NULL).location());
1548 c = reader_.GetC();
1549 }
1550}
1551
1552
1553void Deserializer::GetGlobalHandleStack(List<Handle<Object> >* stack) {
1554 reader_.ExpectC('[');
1555 int length = reader_.GetInt();
1556 for (int i = 0; i < length; i++) {
1557 reader_.ExpectC('|');
1558 int gh_index = reader_.GetInt();
1559 stack->Add(global_handles_[gh_index]);
1560 }
1561 reader_.ExpectC(']');
1562}
1563
1564
1565void Deserializer::GetContextStack() {
1566 reader_.ExpectC('C');
1567 CHECK_EQ(reader_.GetC(), '[');
1568 int count = reader_.GetInt();
1569 List<Context*> entered_contexts(count);
1570 if (count > 0) {
1571 Object** start = reinterpret_cast<Object**>(&entered_contexts.first());
1572 VisitPointers(start, start + count);
1573 }
1574 reader_.ExpectC(']');
1575 for (int i = 0; i < count; i++) {
1576 HandleScopeImplementer::instance()->SaveContext(entered_contexts[i]);
1577 }
1578}
1579
1580
1581Address Deserializer::GetEncodedAddress() {
1582 reader_.ExpectC('P');
1583 return reader_.GetAddress();
1584}
1585
1586
1587Object* Deserializer::GetObject() {
1588 // Read the prologue: type, size and encoded address.
1589 InstanceType type = static_cast<InstanceType>(reader_.GetInt());
1590 int size = reader_.GetInt() << kObjectAlignmentBits;
1591 Address a = GetEncodedAddress();
1592
1593 // Get a raw object of the right size in the right space.
1594 AllocationSpace space = GetSpace(a);
1595 Object* o;
1596 if (IsLargeExecutableObject(a)) {
1597 o = Heap::lo_space()->AllocateRawCode(size);
1598 } else if (IsLargeFixedArray(a)) {
1599 o = Heap::lo_space()->AllocateRawFixedArray(size);
1600 } else {
1601 AllocationSpace retry_space = (space == NEW_SPACE)
1602 ? Heap::TargetSpaceId(type)
1603 : space;
1604 o = Heap::AllocateRaw(size, space, retry_space);
1605 }
1606 ASSERT(!o->IsFailure());
1607 // Check that the simulation of heap allocation was correct.
1608 ASSERT(o == Resolve(a));
1609
1610 // Read any recursively embedded objects.
1611 int c = reader_.GetC();
1612 while (c == '[') {
1613 GetObject();
1614 c = reader_.GetC();
1615 }
1616 ASSERT(c == '|');
1617
1618 HeapObject* obj = reinterpret_cast<HeapObject*>(o);
1619 // Read the uninterpreted contents of the object after the map
1620 reader_.GetBytes(obj->address(), size);
1621#ifdef DEBUG
1622 if (expect_debug_information_) {
1623 // Read in the epilogue to check that we're still synchronized
1624 ExpectEncodedAddress(a);
1625 reader_.ExpectC(']');
1626 }
1627#endif
1628
1629 // Resolve the encoded pointers we just read in.
1630 // Same as obj->Iterate(this), but doesn't rely on the map pointer being set.
1631 VisitPointer(reinterpret_cast<Object**>(obj->address()));
1632 obj->IterateBody(type, size, this);
1633
1634 if (type == CODE_TYPE) {
1635 LOG(CodeMoveEvent(a, obj->address()));
1636 }
1637 objects_++;
1638 return o;
1639}
1640
1641
1642static inline Object* ResolvePaged(int page_index,
1643 int page_offset,
1644 PagedSpace* space,
1645 List<Page*>* page_list) {
1646 ASSERT(page_index < page_list->length());
1647 Address address = (*page_list)[page_index]->OffsetToAddress(page_offset);
1648 return HeapObject::FromAddress(address);
1649}
1650
1651
1652template<typename T>
1653void ConcatReversed(List<T>* target, const List<T>& source) {
1654 for (int i = source.length() - 1; i >= 0; i--) {
1655 target->Add(source[i]);
1656 }
1657}
1658
1659
1660Object* Deserializer::Resolve(Address encoded) {
1661 Object* o = reinterpret_cast<Object*>(encoded);
1662 if (o->IsSmi()) return o;
1663
1664 // Encoded addresses of HeapObjects always have 'HeapObject' tags.
1665 ASSERT(o->IsHeapObject());
1666
1667 switch (GetSpace(encoded)) {
1668 // For Map space and Old space, we cache the known Pages in map_pages,
1669 // old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
1670 // of page addresses, we don't rely on it since GetObject uses AllocateRaw,
1671 // and that appears not to update the page list.
1672 case MAP_SPACE:
1673 return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1674 Heap::map_space(), &map_pages_);
1675 case CELL_SPACE:
1676 return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1677 Heap::cell_space(), &cell_pages_);
1678 case OLD_POINTER_SPACE:
1679 return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1680 Heap::old_pointer_space(), &old_pointer_pages_);
1681 case OLD_DATA_SPACE:
1682 return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1683 Heap::old_data_space(), &old_data_pages_);
1684 case CODE_SPACE:
1685 return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1686 Heap::code_space(), &code_pages_);
1687 case NEW_SPACE:
1688 return HeapObject::FromAddress(Heap::NewSpaceStart() +
1689 NewSpaceOffset(encoded));
1690 case LO_SPACE:
1691 // Cache the known large_objects, allocated one per 'page'
1692 int index = LargeObjectIndex(encoded);
1693 if (index >= large_objects_.length()) {
1694 int new_object_count =
1695 Heap::lo_space()->PageCount() - large_objects_.length();
1696 List<Object*> new_objects(new_object_count);
1697 LargeObjectIterator it(Heap::lo_space());
1698 for (int i = 0; i < new_object_count; i++) {
1699 new_objects.Add(it.next());
1700 }
1701#ifdef DEBUG
1702 for (int i = large_objects_.length() - 1; i >= 0; i--) {
1703 ASSERT(it.next() == large_objects_[i]);
1704 }
1705#endif
1706 ConcatReversed(&large_objects_, new_objects);
1707 ASSERT(index < large_objects_.length());
1708 }
1709 return large_objects_[index]; // s.page_offset() is ignored.
1710 }
1711 UNREACHABLE();
1712 return NULL;
1713}
1714
1715
1716} } // namespace v8::internal