Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 1 | // Copyright 2006-2009 the V8 project authors. All rights reserved. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | #ifndef V8_SERIALIZE_H_ |
| 29 | #define V8_SERIALIZE_H_ |
| 30 | |
| 31 | #include "hashmap.h" |
| 32 | |
| 33 | namespace v8 { |
| 34 | namespace internal { |
| 35 | |
| 36 | // A TypeCode is used to distinguish different kinds of external reference. |
| 37 | // It is a single bit to make testing for types easy. |
| 38 | enum TypeCode { |
| 39 | UNCLASSIFIED, // One-of-a-kind references. |
| 40 | BUILTIN, |
| 41 | RUNTIME_FUNCTION, |
| 42 | IC_UTILITY, |
| 43 | DEBUG_ADDRESS, |
| 44 | STATS_COUNTER, |
| 45 | TOP_ADDRESS, |
| 46 | C_BUILTIN, |
| 47 | EXTENSION, |
| 48 | ACCESSOR, |
| 49 | RUNTIME_ENTRY, |
| 50 | STUB_CACHE_TABLE |
| 51 | }; |
| 52 | |
| 53 | const int kTypeCodeCount = STUB_CACHE_TABLE + 1; |
| 54 | const int kFirstTypeCode = UNCLASSIFIED; |
| 55 | |
| 56 | const int kReferenceIdBits = 16; |
| 57 | const int kReferenceIdMask = (1 << kReferenceIdBits) - 1; |
| 58 | const int kReferenceTypeShift = kReferenceIdBits; |
| 59 | const int kDebugRegisterBits = 4; |
| 60 | const int kDebugIdShift = kDebugRegisterBits; |
| 61 | |
| 62 | |
| 63 | class ExternalReferenceEncoder { |
| 64 | public: |
| 65 | ExternalReferenceEncoder(); |
| 66 | |
| 67 | uint32_t Encode(Address key) const; |
| 68 | |
| 69 | const char* NameOfAddress(Address key) const; |
| 70 | |
| 71 | private: |
| 72 | HashMap encodings_; |
| 73 | static uint32_t Hash(Address key) { |
| 74 | return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2); |
| 75 | } |
| 76 | |
| 77 | int IndexOf(Address key) const; |
| 78 | |
| 79 | static bool Match(void* key1, void* key2) { return key1 == key2; } |
| 80 | |
| 81 | void Put(Address key, int index); |
| 82 | }; |
| 83 | |
| 84 | |
| 85 | class ExternalReferenceDecoder { |
| 86 | public: |
| 87 | ExternalReferenceDecoder(); |
| 88 | ~ExternalReferenceDecoder(); |
| 89 | |
| 90 | Address Decode(uint32_t key) const { |
| 91 | if (key == 0) return NULL; |
| 92 | return *Lookup(key); |
| 93 | } |
| 94 | |
| 95 | private: |
| 96 | Address** encodings_; |
| 97 | |
| 98 | Address* Lookup(uint32_t key) const { |
| 99 | int type = key >> kReferenceTypeShift; |
| 100 | ASSERT(kFirstTypeCode <= type && type < kTypeCodeCount); |
| 101 | int id = key & kReferenceIdMask; |
| 102 | return &encodings_[type][id]; |
| 103 | } |
| 104 | |
| 105 | void Put(uint32_t key, Address value) { |
| 106 | *Lookup(key) = value; |
| 107 | } |
| 108 | }; |
| 109 | |
| 110 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 111 | class SnapshotByteSource { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 112 | public: |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 113 | SnapshotByteSource(const byte* array, int length) |
| 114 | : data_(array), length_(length), position_(0) { } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 115 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 116 | bool HasMore() { return position_ < length_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 117 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 118 | int Get() { |
| 119 | ASSERT(position_ < length_); |
| 120 | return data_[position_++]; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 121 | } |
| 122 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 123 | void CopyRaw(byte* to, int number_of_bytes) { |
| 124 | memcpy(to, data_ + position_, number_of_bytes); |
| 125 | position_ += number_of_bytes; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | int GetInt() { |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 129 | // A little unwind to catch the really small ints. |
| 130 | int snapshot_byte = Get(); |
| 131 | if ((snapshot_byte & 0x80) == 0) { |
| 132 | return snapshot_byte; |
| 133 | } |
| 134 | int accumulator = (snapshot_byte & 0x7f) << 7; |
| 135 | while (true) { |
| 136 | snapshot_byte = Get(); |
| 137 | if ((snapshot_byte & 0x80) == 0) { |
| 138 | return accumulator | snapshot_byte; |
| 139 | } |
| 140 | accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7; |
| 141 | } |
| 142 | UNREACHABLE(); |
| 143 | return accumulator; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 144 | } |
| 145 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 146 | bool AtEOF() { |
| 147 | return position_ == length_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 148 | } |
| 149 | |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 150 | int position() { return position_; } |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 151 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 152 | private: |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 153 | const byte* data_; |
| 154 | int length_; |
| 155 | int position_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 156 | }; |
| 157 | |
| 158 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 159 | // It is very common to have a reference to the object at word 10 in space 2, |
| 160 | // the object at word 5 in space 2 and the object at word 28 in space 4. This |
| 161 | // only works for objects in the first page of a space. |
| 162 | #define COMMON_REFERENCE_PATTERNS(f) \ |
| 163 | f(kNumberOfSpaces, 2, 10) \ |
| 164 | f(kNumberOfSpaces + 1, 2, 5) \ |
| 165 | f(kNumberOfSpaces + 2, 4, 28) \ |
| 166 | f(kNumberOfSpaces + 3, 2, 21) \ |
| 167 | f(kNumberOfSpaces + 4, 2, 98) \ |
| 168 | f(kNumberOfSpaces + 5, 2, 67) \ |
| 169 | f(kNumberOfSpaces + 6, 4, 132) |
| 170 | |
| 171 | #define COMMON_RAW_LENGTHS(f) \ |
| 172 | f(1, 1) \ |
| 173 | f(2, 2) \ |
| 174 | f(3, 3) \ |
| 175 | f(4, 4) \ |
| 176 | f(5, 5) \ |
| 177 | f(6, 6) \ |
| 178 | f(7, 7) \ |
| 179 | f(8, 8) \ |
| 180 | f(9, 12) \ |
| 181 | f(10, 16) \ |
| 182 | f(11, 20) \ |
| 183 | f(12, 24) \ |
| 184 | f(13, 28) \ |
| 185 | f(14, 32) \ |
| 186 | f(15, 36) |
| 187 | |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 188 | // The Serializer/Deserializer class is a common superclass for Serializer and |
| 189 | // Deserializer which is used to store common constants and methods used by |
| 190 | // both. |
| 191 | class SerializerDeserializer: public ObjectVisitor { |
| 192 | public: |
| 193 | static void Iterate(ObjectVisitor* visitor); |
| 194 | static void SetSnapshotCacheSize(int size); |
| 195 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 196 | protected: |
| 197 | enum DataType { |
| 198 | RAW_DATA_SERIALIZATION = 0, |
| 199 | // And 15 common raw lengths. |
| 200 | OBJECT_SERIALIZATION = 16, |
| 201 | // One variant per space. |
| 202 | CODE_OBJECT_SERIALIZATION = 25, |
| 203 | // One per space (only code spaces in use). |
| 204 | EXTERNAL_REFERENCE_SERIALIZATION = 34, |
| 205 | EXTERNAL_BRANCH_TARGET_SERIALIZATION = 35, |
| 206 | SYNCHRONIZE = 36, |
| 207 | START_NEW_PAGE_SERIALIZATION = 37, |
| 208 | NATIVES_STRING_RESOURCE = 38, |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 209 | ROOT_SERIALIZATION = 39, |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 210 | PARTIAL_SNAPSHOT_CACHE_ENTRY = 40, |
| 211 | // Free: 41-47. |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 212 | BACKREF_SERIALIZATION = 48, |
| 213 | // One per space, must be kSpaceMask aligned. |
| 214 | // Free: 57-63. |
| 215 | REFERENCE_SERIALIZATION = 64, |
| 216 | // One per space and common references. Must be kSpaceMask aligned. |
| 217 | CODE_BACKREF_SERIALIZATION = 80, |
| 218 | // One per space, must be kSpaceMask aligned. |
| 219 | // Free: 89-95. |
| 220 | CODE_REFERENCE_SERIALIZATION = 96 |
| 221 | // One per space, must be kSpaceMask aligned. |
| 222 | // Free: 105-255. |
| 223 | }; |
| 224 | static const int kLargeData = LAST_SPACE; |
| 225 | static const int kLargeCode = kLargeData + 1; |
| 226 | static const int kLargeFixedArray = kLargeCode + 1; |
| 227 | static const int kNumberOfSpaces = kLargeFixedArray + 1; |
| 228 | |
| 229 | // A bitmask for getting the space out of an instruction. |
| 230 | static const int kSpaceMask = 15; |
| 231 | |
| 232 | static inline bool SpaceIsLarge(int space) { return space >= kLargeData; } |
| 233 | static inline bool SpaceIsPaged(int space) { |
| 234 | return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE; |
| 235 | } |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 236 | |
| 237 | static int partial_snapshot_cache_length_; |
| 238 | static const int kPartialSnapshotCacheCapacity = 1024; |
| 239 | static Object* partial_snapshot_cache_[]; |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 240 | }; |
| 241 | |
| 242 | |
| 243 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 244 | // A Deserializer reads a snapshot and reconstructs the Object graph it defines. |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 245 | class Deserializer: public SerializerDeserializer { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 246 | public: |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 247 | // Create a deserializer from a snapshot byte source. |
| 248 | explicit Deserializer(SnapshotByteSource* source); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 249 | |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 250 | virtual ~Deserializer(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 251 | |
| 252 | // Deserialize the snapshot into an empty heap. |
| 253 | void Deserialize(); |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 254 | |
| 255 | // Deserialize a single object and the objects reachable from it. |
| 256 | void DeserializePartial(Object** root); |
| 257 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 258 | #ifdef DEBUG |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 259 | virtual void Synchronize(const char* tag); |
| 260 | #endif |
| 261 | |
| 262 | private: |
| 263 | virtual void VisitPointers(Object** start, Object** end); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 264 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 265 | virtual void VisitExternalReferences(Address* start, Address* end) { |
| 266 | UNREACHABLE(); |
| 267 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 268 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 269 | virtual void VisitRuntimeEntry(RelocInfo* rinfo) { |
| 270 | UNREACHABLE(); |
| 271 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 272 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 273 | void ReadChunk(Object** start, Object** end, int space, Address address); |
| 274 | HeapObject* GetAddressFromStart(int space); |
| 275 | inline HeapObject* GetAddressFromEnd(int space); |
| 276 | Address Allocate(int space_number, Space* space, int size); |
| 277 | void ReadObject(int space_number, Space* space, Object** write_back); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 278 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 279 | // Keep track of the pages in the paged spaces. |
| 280 | // (In large object space we are keeping track of individual objects |
| 281 | // rather than pages.) In new space we just need the address of the |
| 282 | // first object and the others will flow from that. |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 283 | List<Address> pages_[SerializerDeserializer::kNumberOfSpaces]; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 284 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 285 | SnapshotByteSource* source_; |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 286 | static ExternalReferenceDecoder* external_reference_decoder_; |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 287 | // This is the address of the next object that will be allocated in each |
| 288 | // space. It is used to calculate the addresses of back-references. |
| 289 | Address high_water_[LAST_SPACE + 1]; |
| 290 | // This is the address of the most recent object that was allocated. It |
| 291 | // is used to set the location of the new page when we encounter a |
| 292 | // START_NEW_PAGE_SERIALIZATION tag. |
| 293 | Address last_object_address_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 294 | |
| 295 | DISALLOW_COPY_AND_ASSIGN(Deserializer); |
| 296 | }; |
| 297 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 298 | |
| 299 | class SnapshotByteSink { |
| 300 | public: |
| 301 | virtual ~SnapshotByteSink() { } |
| 302 | virtual void Put(int byte, const char* description) = 0; |
| 303 | virtual void PutSection(int byte, const char* description) { |
| 304 | Put(byte, description); |
| 305 | } |
| 306 | void PutInt(uintptr_t integer, const char* description); |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 307 | virtual int Position() = 0; |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 308 | }; |
| 309 | |
| 310 | |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 311 | // Mapping objects to their location after deserialization. |
| 312 | // This is used during building, but not at runtime by V8. |
| 313 | class SerializationAddressMapper { |
| 314 | public: |
| 315 | SerializationAddressMapper() |
| 316 | : serialization_map_(new HashMap(&SerializationMatchFun)), |
| 317 | no_allocation_(new AssertNoAllocation()) { } |
| 318 | |
| 319 | ~SerializationAddressMapper() { |
| 320 | delete serialization_map_; |
| 321 | delete no_allocation_; |
| 322 | } |
| 323 | |
| 324 | bool IsMapped(HeapObject* obj) { |
| 325 | return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL; |
| 326 | } |
| 327 | |
| 328 | int MappedTo(HeapObject* obj) { |
| 329 | ASSERT(IsMapped(obj)); |
| 330 | return static_cast<int>(reinterpret_cast<intptr_t>( |
| 331 | serialization_map_->Lookup(Key(obj), Hash(obj), false)->value)); |
| 332 | } |
| 333 | |
| 334 | void AddMapping(HeapObject* obj, int to) { |
| 335 | ASSERT(!IsMapped(obj)); |
| 336 | HashMap::Entry* entry = |
| 337 | serialization_map_->Lookup(Key(obj), Hash(obj), true); |
| 338 | entry->value = Value(to); |
| 339 | } |
| 340 | |
| 341 | private: |
| 342 | static bool SerializationMatchFun(void* key1, void* key2) { |
| 343 | return key1 == key2; |
| 344 | } |
| 345 | |
| 346 | static uint32_t Hash(HeapObject* obj) { |
| 347 | return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address())); |
| 348 | } |
| 349 | |
| 350 | static void* Key(HeapObject* obj) { |
| 351 | return reinterpret_cast<void*>(obj->address()); |
| 352 | } |
| 353 | |
| 354 | static void* Value(int v) { |
| 355 | return reinterpret_cast<void*>(v); |
| 356 | } |
| 357 | |
| 358 | HashMap* serialization_map_; |
| 359 | AssertNoAllocation* no_allocation_; |
| 360 | DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper); |
| 361 | }; |
| 362 | |
| 363 | |
| 364 | class Serializer : public SerializerDeserializer { |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 365 | public: |
| 366 | explicit Serializer(SnapshotByteSink* sink); |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 367 | void VisitPointers(Object** start, Object** end); |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 368 | // You can call this after serialization to find out how much space was used |
| 369 | // in each space. |
| 370 | int CurrentAllocationAddress(int space) { |
| 371 | if (SpaceIsLarge(space)) return large_object_total_; |
| 372 | return fullness_[space]; |
| 373 | } |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 374 | |
| 375 | static void Enable() { |
| 376 | if (!serialization_enabled_) { |
| 377 | ASSERT(!too_late_to_enable_now_); |
| 378 | } |
| 379 | serialization_enabled_ = true; |
| 380 | } |
| 381 | |
| 382 | static void Disable() { serialization_enabled_ = false; } |
| 383 | // Call this when you have made use of the fact that there is no serialization |
| 384 | // going on. |
| 385 | static void TooLateToEnableNow() { too_late_to_enable_now_ = true; } |
| 386 | static bool enabled() { return serialization_enabled_; } |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 387 | SerializationAddressMapper* address_mapper() { return &address_mapper_; } |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 388 | #ifdef DEBUG |
| 389 | virtual void Synchronize(const char* tag); |
| 390 | #endif |
| 391 | |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 392 | protected: |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 393 | enum ReferenceRepresentation { |
| 394 | TAGGED_REPRESENTATION, // A tagged object reference. |
| 395 | CODE_TARGET_REPRESENTATION // A reference to first instruction in target. |
| 396 | }; |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 397 | static const int kInvalidRootIndex = -1; |
| 398 | virtual int RootIndex(HeapObject* heap_object) = 0; |
| 399 | virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0; |
| 400 | |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 401 | class ObjectSerializer : public ObjectVisitor { |
| 402 | public: |
| 403 | ObjectSerializer(Serializer* serializer, |
| 404 | Object* o, |
| 405 | SnapshotByteSink* sink, |
| 406 | ReferenceRepresentation representation) |
| 407 | : serializer_(serializer), |
| 408 | object_(HeapObject::cast(o)), |
| 409 | sink_(sink), |
| 410 | reference_representation_(representation), |
| 411 | bytes_processed_so_far_(0) { } |
| 412 | void Serialize(); |
| 413 | void VisitPointers(Object** start, Object** end); |
| 414 | void VisitExternalReferences(Address* start, Address* end); |
| 415 | void VisitCodeTarget(RelocInfo* target); |
| 416 | void VisitRuntimeEntry(RelocInfo* reloc); |
| 417 | // Used for seralizing the external strings that hold the natives source. |
| 418 | void VisitExternalAsciiString( |
| 419 | v8::String::ExternalAsciiStringResource** resource); |
| 420 | // We can't serialize a heap with external two byte strings. |
| 421 | void VisitExternalTwoByteString( |
| 422 | v8::String::ExternalStringResource** resource) { |
| 423 | UNREACHABLE(); |
| 424 | } |
| 425 | |
| 426 | private: |
| 427 | void OutputRawData(Address up_to); |
| 428 | |
| 429 | Serializer* serializer_; |
| 430 | HeapObject* object_; |
| 431 | SnapshotByteSink* sink_; |
| 432 | ReferenceRepresentation reference_representation_; |
| 433 | int bytes_processed_so_far_; |
| 434 | }; |
| 435 | |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 436 | virtual void SerializeObject(Object* o, |
| 437 | ReferenceRepresentation representation) = 0; |
| 438 | void SerializeReferenceToPreviousObject( |
| 439 | int space, |
| 440 | int address, |
| 441 | ReferenceRepresentation reference_representation); |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 442 | void InitializeAllocators(); |
| 443 | // This will return the space for an object. If the object is in large |
| 444 | // object space it may return kLargeCode or kLargeFixedArray in order |
| 445 | // to indicate to the deserializer what kind of large object allocation |
| 446 | // to make. |
| 447 | static int SpaceOfObject(HeapObject* object); |
| 448 | // This just returns the space of the object. It will return LO_SPACE |
| 449 | // for all large objects since you can't check the type of the object |
| 450 | // once the map has been used for the serialization address. |
| 451 | static int SpaceOfAlreadySerializedObject(HeapObject* object); |
| 452 | int Allocate(int space, int size, bool* new_page_started); |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 453 | int EncodeExternalReference(Address addr) { |
| 454 | return external_reference_encoder_->Encode(addr); |
| 455 | } |
| 456 | |
| 457 | // Keep track of the fullness of each space in order to generate |
| 458 | // relative addresses for back references. Large objects are |
| 459 | // just numbered sequentially since relative addresses make no |
| 460 | // sense in large object space. |
| 461 | int fullness_[LAST_SPACE + 1]; |
| 462 | SnapshotByteSink* sink_; |
| 463 | int current_root_index_; |
| 464 | ExternalReferenceEncoder* external_reference_encoder_; |
| 465 | static bool serialization_enabled_; |
| 466 | // Did we already make use of the fact that serialization was not enabled? |
| 467 | static bool too_late_to_enable_now_; |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 468 | int large_object_total_; |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 469 | SerializationAddressMapper address_mapper_; |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 470 | |
| 471 | friend class ObjectSerializer; |
| 472 | friend class Deserializer; |
| 473 | |
| 474 | DISALLOW_COPY_AND_ASSIGN(Serializer); |
| 475 | }; |
| 476 | |
Leon Clarke | 888f672 | 2010-01-27 15:57:47 +0000 | [diff] [blame^] | 477 | |
| 478 | class PartialSerializer : public Serializer { |
| 479 | public: |
| 480 | PartialSerializer(Serializer* startup_snapshot_serializer, |
| 481 | SnapshotByteSink* sink) |
| 482 | : Serializer(sink), |
| 483 | startup_serializer_(startup_snapshot_serializer) { |
| 484 | } |
| 485 | |
| 486 | // Serialize the objects reachable from a single object pointer. |
| 487 | virtual void Serialize(Object** o); |
| 488 | virtual void SerializeObject(Object* o, |
| 489 | ReferenceRepresentation representation); |
| 490 | |
| 491 | protected: |
| 492 | virtual int RootIndex(HeapObject* o); |
| 493 | virtual int PartialSnapshotCacheIndex(HeapObject* o); |
| 494 | virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { |
| 495 | return o->IsString() || o->IsSharedFunctionInfo(); |
| 496 | } |
| 497 | |
| 498 | private: |
| 499 | Serializer* startup_serializer_; |
| 500 | DISALLOW_COPY_AND_ASSIGN(PartialSerializer); |
| 501 | }; |
| 502 | |
| 503 | |
| 504 | class StartupSerializer : public Serializer { |
| 505 | public: |
| 506 | explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) { |
| 507 | // Clear the cache of objects used by the partial snapshot. After the |
| 508 | // strong roots have been serialized we can create a partial snapshot |
| 509 | // which will repopulate the cache with objects neede by that partial |
| 510 | // snapshot. |
| 511 | partial_snapshot_cache_length_ = 0; |
| 512 | } |
| 513 | // Serialize the current state of the heap. The order is: |
| 514 | // 1) Strong references. |
| 515 | // 2) Partial snapshot cache. |
| 516 | // 3) Weak references (eg the symbol table). |
| 517 | virtual void SerializeStrongReferences(); |
| 518 | virtual void SerializeObject(Object* o, |
| 519 | ReferenceRepresentation representation); |
| 520 | void SerializeWeakReferences(); |
| 521 | void Serialize() { |
| 522 | SerializeStrongReferences(); |
| 523 | SerializeWeakReferences(); |
| 524 | } |
| 525 | |
| 526 | private: |
| 527 | virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; } |
| 528 | virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { |
| 529 | return false; |
| 530 | } |
| 531 | }; |
| 532 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 533 | } } // namespace v8::internal |
| 534 | |
| 535 | #endif // V8_SERIALIZE_H_ |