blob: 6a318f1936c32643e14fc0667cc81607baedcd06 [file] [log] [blame]
Steve Blockd0582a62009-12-15 09:54:21 +00001// Copyright 2006-2009 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SERIALIZE_H_
29#define V8_SERIALIZE_H_
30
31#include "hashmap.h"
32
33namespace v8 {
34namespace internal {
35
36// A TypeCode is used to distinguish different kinds of external reference.
37// It is a single bit to make testing for types easy.
38enum TypeCode {
39 UNCLASSIFIED, // One-of-a-kind references.
40 BUILTIN,
41 RUNTIME_FUNCTION,
42 IC_UTILITY,
43 DEBUG_ADDRESS,
44 STATS_COUNTER,
45 TOP_ADDRESS,
46 C_BUILTIN,
47 EXTENSION,
48 ACCESSOR,
49 RUNTIME_ENTRY,
50 STUB_CACHE_TABLE
51};
52
53const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
54const int kFirstTypeCode = UNCLASSIFIED;
55
56const int kReferenceIdBits = 16;
57const int kReferenceIdMask = (1 << kReferenceIdBits) - 1;
58const int kReferenceTypeShift = kReferenceIdBits;
59const int kDebugRegisterBits = 4;
60const int kDebugIdShift = kDebugRegisterBits;
61
62
63class ExternalReferenceEncoder {
64 public:
65 ExternalReferenceEncoder();
66
67 uint32_t Encode(Address key) const;
68
69 const char* NameOfAddress(Address key) const;
70
71 private:
72 HashMap encodings_;
73 static uint32_t Hash(Address key) {
74 return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2);
75 }
76
77 int IndexOf(Address key) const;
78
79 static bool Match(void* key1, void* key2) { return key1 == key2; }
80
81 void Put(Address key, int index);
82};
83
84
85class ExternalReferenceDecoder {
86 public:
87 ExternalReferenceDecoder();
88 ~ExternalReferenceDecoder();
89
90 Address Decode(uint32_t key) const {
91 if (key == 0) return NULL;
92 return *Lookup(key);
93 }
94
95 private:
96 Address** encodings_;
97
98 Address* Lookup(uint32_t key) const {
99 int type = key >> kReferenceTypeShift;
100 ASSERT(kFirstTypeCode <= type && type < kTypeCodeCount);
101 int id = key & kReferenceIdMask;
102 return &encodings_[type][id];
103 }
104
105 void Put(uint32_t key, Address value) {
106 *Lookup(key) = value;
107 }
108};
109
110
Steve Blockd0582a62009-12-15 09:54:21 +0000111class SnapshotByteSource {
Steve Blocka7e24c12009-10-30 11:49:00 +0000112 public:
Steve Blockd0582a62009-12-15 09:54:21 +0000113 SnapshotByteSource(const byte* array, int length)
114 : data_(array), length_(length), position_(0) { }
Steve Blocka7e24c12009-10-30 11:49:00 +0000115
Steve Blockd0582a62009-12-15 09:54:21 +0000116 bool HasMore() { return position_ < length_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000117
Steve Blockd0582a62009-12-15 09:54:21 +0000118 int Get() {
119 ASSERT(position_ < length_);
120 return data_[position_++];
Steve Blocka7e24c12009-10-30 11:49:00 +0000121 }
122
Andrei Popescu31002712010-02-23 13:46:05 +0000123 inline void CopyRaw(byte* to, int number_of_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +0000124
Andrei Popescu31002712010-02-23 13:46:05 +0000125 inline int GetInt();
Steve Blocka7e24c12009-10-30 11:49:00 +0000126
Steve Blockd0582a62009-12-15 09:54:21 +0000127 bool AtEOF() {
128 return position_ == length_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000129 }
130
Leon Clarked91b9f72010-01-27 17:25:45 +0000131 int position() { return position_; }
Leon Clarkee46be812010-01-19 14:06:41 +0000132
Steve Blocka7e24c12009-10-30 11:49:00 +0000133 private:
Steve Blockd0582a62009-12-15 09:54:21 +0000134 const byte* data_;
135 int length_;
136 int position_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000137};
138
139
Leon Clarkef7060e22010-06-03 12:02:55 +0100140// It is very common to have a reference to objects at certain offsets in the
141// heap. These offsets have been determined experimentally. We code
142// references to such objects in a single byte that encodes the way the pointer
143// is written (only plain pointers allowed), the space number and the offset.
144// This only works for objects in the first page of a space. Don't use this for
145// things in newspace since it bypasses the write barrier.
146
147static const int k64 = (sizeof(uintptr_t) - 4) / 4;
148
149#define COMMON_REFERENCE_PATTERNS(f) \
150 f(kNumberOfSpaces, 2, (11 - k64)) \
151 f((kNumberOfSpaces + 1), 2, 0) \
152 f((kNumberOfSpaces + 2), 2, (142 - 16 * k64)) \
153 f((kNumberOfSpaces + 3), 2, (74 - 15 * k64)) \
154 f((kNumberOfSpaces + 4), 2, 5) \
155 f((kNumberOfSpaces + 5), 1, 135) \
156 f((kNumberOfSpaces + 6), 2, (228 - 39 * k64))
Steve Blockd0582a62009-12-15 09:54:21 +0000157
158#define COMMON_RAW_LENGTHS(f) \
159 f(1, 1) \
160 f(2, 2) \
161 f(3, 3) \
162 f(4, 4) \
163 f(5, 5) \
164 f(6, 6) \
165 f(7, 7) \
166 f(8, 8) \
167 f(9, 12) \
168 f(10, 16) \
169 f(11, 20) \
170 f(12, 24) \
171 f(13, 28) \
172 f(14, 32) \
173 f(15, 36)
174
Leon Clarked91b9f72010-01-27 17:25:45 +0000175// The Serializer/Deserializer class is a common superclass for Serializer and
176// Deserializer which is used to store common constants and methods used by
177// both.
178class SerializerDeserializer: public ObjectVisitor {
179 public:
180 static void Iterate(ObjectVisitor* visitor);
181 static void SetSnapshotCacheSize(int size);
182
Steve Blockd0582a62009-12-15 09:54:21 +0000183 protected:
Leon Clarkef7060e22010-06-03 12:02:55 +0100184 // Where the pointed-to object can be found:
185 enum Where {
186 kNewObject = 0, // Object is next in snapshot.
187 // 1-8 One per space.
188 kRootArray = 0x9, // Object is found in root array.
189 kPartialSnapshotCache = 0xa, // Object is in the cache.
190 kExternalReference = 0xb, // Pointer to an external reference.
191 // 0xc-0xf Free.
192 kBackref = 0x10, // Object is described relative to end.
193 // 0x11-0x18 One per space.
194 // 0x19-0x1f Common backref offsets.
195 kFromStart = 0x20, // Object is described relative to start.
196 // 0x21-0x28 One per space.
197 // 0x29-0x2f Free.
198 // 0x30-0x3f Used by misc tags below.
199 kPointedToMask = 0x3f
Steve Blockd0582a62009-12-15 09:54:21 +0000200 };
Leon Clarkef7060e22010-06-03 12:02:55 +0100201
202 // How to code the pointer to the object.
203 enum HowToCode {
204 kPlain = 0, // Straight pointer.
205 // What this means depends on the architecture:
206 kFromCode = 0x40, // A pointer inlined in code.
207 kHowToCodeMask = 0x40
208 };
209
210 // Where to point within the object.
211 enum WhereToPoint {
212 kStartOfObject = 0,
213 kFirstInstruction = 0x80,
214 kWhereToPointMask = 0x80
215 };
216
217 // Misc.
218 // Raw data to be copied from the snapshot.
219 static const int kRawData = 0x30;
220 // Some common raw lengths: 0x31-0x3f
221 // A tag emitted at strategic points in the snapshot to delineate sections.
222 // If the deserializer does not find these at the expected moments then it
223 // is an indication that the snapshot and the VM do not fit together.
224 // Examine the build process for architecture, version or configuration
225 // mismatches.
226 static const int kSynchronize = 0x70;
227 // Used for the source code of the natives, which is in the executable, but
228 // is referred to from external strings in the snapshot.
229 static const int kNativesStringResource = 0x71;
230 static const int kNewPage = 0x72;
231 // 0x73-0x7f Free.
232 // 0xb0-0xbf Free.
233 // 0xf0-0xff Free.
234
235
Steve Blockd0582a62009-12-15 09:54:21 +0000236 static const int kLargeData = LAST_SPACE;
237 static const int kLargeCode = kLargeData + 1;
238 static const int kLargeFixedArray = kLargeCode + 1;
239 static const int kNumberOfSpaces = kLargeFixedArray + 1;
Leon Clarkef7060e22010-06-03 12:02:55 +0100240 static const int kAnyOldSpace = -1;
Steve Blockd0582a62009-12-15 09:54:21 +0000241
242 // A bitmask for getting the space out of an instruction.
243 static const int kSpaceMask = 15;
244
245 static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
246 static inline bool SpaceIsPaged(int space) {
247 return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
248 }
Leon Clarked91b9f72010-01-27 17:25:45 +0000249
250 static int partial_snapshot_cache_length_;
Andrei Popescu31002712010-02-23 13:46:05 +0000251 static const int kPartialSnapshotCacheCapacity = 1300;
Leon Clarked91b9f72010-01-27 17:25:45 +0000252 static Object* partial_snapshot_cache_[];
Steve Blockd0582a62009-12-15 09:54:21 +0000253};
254
255
Andrei Popescu31002712010-02-23 13:46:05 +0000256int SnapshotByteSource::GetInt() {
257 // A little unwind to catch the really small ints.
258 int snapshot_byte = Get();
259 if ((snapshot_byte & 0x80) == 0) {
260 return snapshot_byte;
261 }
262 int accumulator = (snapshot_byte & 0x7f) << 7;
263 while (true) {
264 snapshot_byte = Get();
265 if ((snapshot_byte & 0x80) == 0) {
266 return accumulator | snapshot_byte;
267 }
268 accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
269 }
270 UNREACHABLE();
271 return accumulator;
272}
273
274
275void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
276 memcpy(to, data_ + position_, number_of_bytes);
277 position_ += number_of_bytes;
278}
279
Steve Blockd0582a62009-12-15 09:54:21 +0000280
Steve Blocka7e24c12009-10-30 11:49:00 +0000281// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
Leon Clarked91b9f72010-01-27 17:25:45 +0000282class Deserializer: public SerializerDeserializer {
Steve Blocka7e24c12009-10-30 11:49:00 +0000283 public:
Steve Blockd0582a62009-12-15 09:54:21 +0000284 // Create a deserializer from a snapshot byte source.
285 explicit Deserializer(SnapshotByteSource* source);
Steve Blocka7e24c12009-10-30 11:49:00 +0000286
Leon Clarked91b9f72010-01-27 17:25:45 +0000287 virtual ~Deserializer();
Steve Blocka7e24c12009-10-30 11:49:00 +0000288
289 // Deserialize the snapshot into an empty heap.
290 void Deserialize();
Leon Clarkee46be812010-01-19 14:06:41 +0000291
292 // Deserialize a single object and the objects reachable from it.
293 void DeserializePartial(Object** root);
294
Steve Blocka7e24c12009-10-30 11:49:00 +0000295#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000296 virtual void Synchronize(const char* tag);
297#endif
298
299 private:
300 virtual void VisitPointers(Object** start, Object** end);
Steve Blocka7e24c12009-10-30 11:49:00 +0000301
Steve Blockd0582a62009-12-15 09:54:21 +0000302 virtual void VisitExternalReferences(Address* start, Address* end) {
303 UNREACHABLE();
304 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000305
Steve Blockd0582a62009-12-15 09:54:21 +0000306 virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
307 UNREACHABLE();
308 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000309
Steve Blockd0582a62009-12-15 09:54:21 +0000310 void ReadChunk(Object** start, Object** end, int space, Address address);
311 HeapObject* GetAddressFromStart(int space);
312 inline HeapObject* GetAddressFromEnd(int space);
313 Address Allocate(int space_number, Space* space, int size);
314 void ReadObject(int space_number, Space* space, Object** write_back);
Steve Blocka7e24c12009-10-30 11:49:00 +0000315
Steve Blockd0582a62009-12-15 09:54:21 +0000316 // Keep track of the pages in the paged spaces.
317 // (In large object space we are keeping track of individual objects
318 // rather than pages.) In new space we just need the address of the
319 // first object and the others will flow from that.
Leon Clarked91b9f72010-01-27 17:25:45 +0000320 List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
Steve Blocka7e24c12009-10-30 11:49:00 +0000321
Steve Blockd0582a62009-12-15 09:54:21 +0000322 SnapshotByteSource* source_;
Leon Clarkee46be812010-01-19 14:06:41 +0000323 static ExternalReferenceDecoder* external_reference_decoder_;
Steve Blockd0582a62009-12-15 09:54:21 +0000324 // This is the address of the next object that will be allocated in each
325 // space. It is used to calculate the addresses of back-references.
326 Address high_water_[LAST_SPACE + 1];
327 // This is the address of the most recent object that was allocated. It
328 // is used to set the location of the new page when we encounter a
329 // START_NEW_PAGE_SERIALIZATION tag.
330 Address last_object_address_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000331
332 DISALLOW_COPY_AND_ASSIGN(Deserializer);
333};
334
Steve Blockd0582a62009-12-15 09:54:21 +0000335
336class SnapshotByteSink {
337 public:
338 virtual ~SnapshotByteSink() { }
339 virtual void Put(int byte, const char* description) = 0;
340 virtual void PutSection(int byte, const char* description) {
341 Put(byte, description);
342 }
343 void PutInt(uintptr_t integer, const char* description);
Leon Clarkee46be812010-01-19 14:06:41 +0000344 virtual int Position() = 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000345};
346
347
Leon Clarked91b9f72010-01-27 17:25:45 +0000348// Mapping objects to their location after deserialization.
349// This is used during building, but not at runtime by V8.
350class SerializationAddressMapper {
351 public:
352 SerializationAddressMapper()
353 : serialization_map_(new HashMap(&SerializationMatchFun)),
354 no_allocation_(new AssertNoAllocation()) { }
355
356 ~SerializationAddressMapper() {
357 delete serialization_map_;
358 delete no_allocation_;
359 }
360
361 bool IsMapped(HeapObject* obj) {
362 return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
363 }
364
365 int MappedTo(HeapObject* obj) {
366 ASSERT(IsMapped(obj));
367 return static_cast<int>(reinterpret_cast<intptr_t>(
368 serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
369 }
370
371 void AddMapping(HeapObject* obj, int to) {
372 ASSERT(!IsMapped(obj));
373 HashMap::Entry* entry =
374 serialization_map_->Lookup(Key(obj), Hash(obj), true);
375 entry->value = Value(to);
376 }
377
378 private:
379 static bool SerializationMatchFun(void* key1, void* key2) {
380 return key1 == key2;
381 }
382
383 static uint32_t Hash(HeapObject* obj) {
384 return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
385 }
386
387 static void* Key(HeapObject* obj) {
388 return reinterpret_cast<void*>(obj->address());
389 }
390
391 static void* Value(int v) {
392 return reinterpret_cast<void*>(v);
393 }
394
395 HashMap* serialization_map_;
396 AssertNoAllocation* no_allocation_;
397 DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
398};
399
400
401class Serializer : public SerializerDeserializer {
Steve Blockd0582a62009-12-15 09:54:21 +0000402 public:
403 explicit Serializer(SnapshotByteSink* sink);
Andrei Popescu31002712010-02-23 13:46:05 +0000404 ~Serializer();
Steve Blockd0582a62009-12-15 09:54:21 +0000405 void VisitPointers(Object** start, Object** end);
Leon Clarkee46be812010-01-19 14:06:41 +0000406 // You can call this after serialization to find out how much space was used
407 // in each space.
408 int CurrentAllocationAddress(int space) {
409 if (SpaceIsLarge(space)) return large_object_total_;
410 return fullness_[space];
411 }
Steve Blockd0582a62009-12-15 09:54:21 +0000412
413 static void Enable() {
414 if (!serialization_enabled_) {
415 ASSERT(!too_late_to_enable_now_);
416 }
417 serialization_enabled_ = true;
418 }
419
420 static void Disable() { serialization_enabled_ = false; }
421 // Call this when you have made use of the fact that there is no serialization
422 // going on.
423 static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
424 static bool enabled() { return serialization_enabled_; }
Leon Clarked91b9f72010-01-27 17:25:45 +0000425 SerializationAddressMapper* address_mapper() { return &address_mapper_; }
Steve Blockd0582a62009-12-15 09:54:21 +0000426#ifdef DEBUG
427 virtual void Synchronize(const char* tag);
428#endif
429
Leon Clarked91b9f72010-01-27 17:25:45 +0000430 protected:
Leon Clarked91b9f72010-01-27 17:25:45 +0000431 static const int kInvalidRootIndex = -1;
432 virtual int RootIndex(HeapObject* heap_object) = 0;
433 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
434
Steve Blockd0582a62009-12-15 09:54:21 +0000435 class ObjectSerializer : public ObjectVisitor {
436 public:
437 ObjectSerializer(Serializer* serializer,
438 Object* o,
439 SnapshotByteSink* sink,
Leon Clarkef7060e22010-06-03 12:02:55 +0100440 HowToCode how_to_code,
441 WhereToPoint where_to_point)
Steve Blockd0582a62009-12-15 09:54:21 +0000442 : serializer_(serializer),
443 object_(HeapObject::cast(o)),
444 sink_(sink),
Leon Clarkef7060e22010-06-03 12:02:55 +0100445 reference_representation_(how_to_code + where_to_point),
Steve Blockd0582a62009-12-15 09:54:21 +0000446 bytes_processed_so_far_(0) { }
447 void Serialize();
448 void VisitPointers(Object** start, Object** end);
449 void VisitExternalReferences(Address* start, Address* end);
450 void VisitCodeTarget(RelocInfo* target);
451 void VisitRuntimeEntry(RelocInfo* reloc);
452 // Used for seralizing the external strings that hold the natives source.
453 void VisitExternalAsciiString(
454 v8::String::ExternalAsciiStringResource** resource);
455 // We can't serialize a heap with external two byte strings.
456 void VisitExternalTwoByteString(
457 v8::String::ExternalStringResource** resource) {
458 UNREACHABLE();
459 }
460
461 private:
462 void OutputRawData(Address up_to);
463
464 Serializer* serializer_;
465 HeapObject* object_;
466 SnapshotByteSink* sink_;
Leon Clarkef7060e22010-06-03 12:02:55 +0100467 int reference_representation_;
Steve Blockd0582a62009-12-15 09:54:21 +0000468 int bytes_processed_so_far_;
469 };
470
Leon Clarked91b9f72010-01-27 17:25:45 +0000471 virtual void SerializeObject(Object* o,
Leon Clarkef7060e22010-06-03 12:02:55 +0100472 HowToCode how_to_code,
473 WhereToPoint where_to_point) = 0;
Leon Clarked91b9f72010-01-27 17:25:45 +0000474 void SerializeReferenceToPreviousObject(
475 int space,
476 int address,
Leon Clarkef7060e22010-06-03 12:02:55 +0100477 HowToCode how_to_code,
478 WhereToPoint where_to_point);
Steve Blockd0582a62009-12-15 09:54:21 +0000479 void InitializeAllocators();
480 // This will return the space for an object. If the object is in large
481 // object space it may return kLargeCode or kLargeFixedArray in order
482 // to indicate to the deserializer what kind of large object allocation
483 // to make.
484 static int SpaceOfObject(HeapObject* object);
485 // This just returns the space of the object. It will return LO_SPACE
486 // for all large objects since you can't check the type of the object
487 // once the map has been used for the serialization address.
488 static int SpaceOfAlreadySerializedObject(HeapObject* object);
489 int Allocate(int space, int size, bool* new_page_started);
Steve Blockd0582a62009-12-15 09:54:21 +0000490 int EncodeExternalReference(Address addr) {
491 return external_reference_encoder_->Encode(addr);
492 }
493
494 // Keep track of the fullness of each space in order to generate
495 // relative addresses for back references. Large objects are
496 // just numbered sequentially since relative addresses make no
497 // sense in large object space.
498 int fullness_[LAST_SPACE + 1];
499 SnapshotByteSink* sink_;
500 int current_root_index_;
501 ExternalReferenceEncoder* external_reference_encoder_;
502 static bool serialization_enabled_;
503 // Did we already make use of the fact that serialization was not enabled?
504 static bool too_late_to_enable_now_;
Leon Clarkee46be812010-01-19 14:06:41 +0000505 int large_object_total_;
Leon Clarked91b9f72010-01-27 17:25:45 +0000506 SerializationAddressMapper address_mapper_;
Steve Blockd0582a62009-12-15 09:54:21 +0000507
508 friend class ObjectSerializer;
509 friend class Deserializer;
510
511 DISALLOW_COPY_AND_ASSIGN(Serializer);
512};
513
Leon Clarked91b9f72010-01-27 17:25:45 +0000514
515class PartialSerializer : public Serializer {
516 public:
517 PartialSerializer(Serializer* startup_snapshot_serializer,
518 SnapshotByteSink* sink)
519 : Serializer(sink),
520 startup_serializer_(startup_snapshot_serializer) {
521 }
522
523 // Serialize the objects reachable from a single object pointer.
524 virtual void Serialize(Object** o);
525 virtual void SerializeObject(Object* o,
Leon Clarkef7060e22010-06-03 12:02:55 +0100526 HowToCode how_to_code,
527 WhereToPoint where_to_point);
Leon Clarked91b9f72010-01-27 17:25:45 +0000528
529 protected:
530 virtual int RootIndex(HeapObject* o);
531 virtual int PartialSnapshotCacheIndex(HeapObject* o);
532 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
Andrei Popescu31002712010-02-23 13:46:05 +0000533 // Scripts should be referred only through shared function infos. We can't
534 // allow them to be part of the partial snapshot because they contain a
535 // unique ID, and deserializing several partial snapshots containing script
536 // would cause dupes.
537 ASSERT(!o->IsScript());
Steve Block6ded16b2010-05-10 14:33:55 +0100538 return o->IsString() || o->IsSharedFunctionInfo() ||
539 o->IsHeapNumber() || o->IsCode();
Leon Clarked91b9f72010-01-27 17:25:45 +0000540 }
541
542 private:
543 Serializer* startup_serializer_;
544 DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
545};
546
547
548class StartupSerializer : public Serializer {
549 public:
550 explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
551 // Clear the cache of objects used by the partial snapshot. After the
552 // strong roots have been serialized we can create a partial snapshot
553 // which will repopulate the cache with objects neede by that partial
554 // snapshot.
555 partial_snapshot_cache_length_ = 0;
556 }
557 // Serialize the current state of the heap. The order is:
558 // 1) Strong references.
559 // 2) Partial snapshot cache.
560 // 3) Weak references (eg the symbol table).
561 virtual void SerializeStrongReferences();
562 virtual void SerializeObject(Object* o,
Leon Clarkef7060e22010-06-03 12:02:55 +0100563 HowToCode how_to_code,
564 WhereToPoint where_to_point);
Leon Clarked91b9f72010-01-27 17:25:45 +0000565 void SerializeWeakReferences();
566 void Serialize() {
567 SerializeStrongReferences();
568 SerializeWeakReferences();
569 }
570
571 private:
572 virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
573 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
574 return false;
575 }
576};
577
Andrei Popescu31002712010-02-23 13:46:05 +0000578
Steve Blocka7e24c12009-10-30 11:49:00 +0000579} } // namespace v8::internal
580
581#endif // V8_SERIALIZE_H_