Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1 | // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_SNAPSHOT_SERIALIZER_H_ |
| 6 | #define V8_SNAPSHOT_SERIALIZER_H_ |
| 7 | |
| 8 | #include "src/isolate.h" |
| 9 | #include "src/log.h" |
| 10 | #include "src/objects.h" |
| 11 | #include "src/snapshot/serializer-common.h" |
| 12 | #include "src/snapshot/snapshot-source-sink.h" |
| 13 | |
| 14 | namespace v8 { |
| 15 | namespace internal { |
| 16 | |
| 17 | class CodeAddressMap : public CodeEventLogger { |
| 18 | public: |
| 19 | explicit CodeAddressMap(Isolate* isolate) : isolate_(isolate) { |
| 20 | isolate->logger()->addCodeEventListener(this); |
| 21 | } |
| 22 | |
| 23 | ~CodeAddressMap() override { |
| 24 | isolate_->logger()->removeCodeEventListener(this); |
| 25 | } |
| 26 | |
| 27 | void CodeMoveEvent(AbstractCode* from, Address to) override { |
| 28 | address_to_name_map_.Move(from->address(), to); |
| 29 | } |
| 30 | |
| 31 | void CodeDisableOptEvent(AbstractCode* code, |
| 32 | SharedFunctionInfo* shared) override {} |
| 33 | |
| 34 | const char* Lookup(Address address) { |
| 35 | return address_to_name_map_.Lookup(address); |
| 36 | } |
| 37 | |
| 38 | private: |
| 39 | class NameMap { |
| 40 | public: |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 41 | NameMap() : impl_(base::HashMap::PointersMatch) {} |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 42 | |
| 43 | ~NameMap() { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 44 | for (base::HashMap::Entry* p = impl_.Start(); p != NULL; |
| 45 | p = impl_.Next(p)) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 46 | DeleteArray(static_cast<const char*>(p->value)); |
| 47 | } |
| 48 | } |
| 49 | |
| 50 | void Insert(Address code_address, const char* name, int name_size) { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 51 | base::HashMap::Entry* entry = FindOrCreateEntry(code_address); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 52 | if (entry->value == NULL) { |
| 53 | entry->value = CopyName(name, name_size); |
| 54 | } |
| 55 | } |
| 56 | |
| 57 | const char* Lookup(Address code_address) { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 58 | base::HashMap::Entry* entry = FindEntry(code_address); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 59 | return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL; |
| 60 | } |
| 61 | |
| 62 | void Remove(Address code_address) { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 63 | base::HashMap::Entry* entry = FindEntry(code_address); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 64 | if (entry != NULL) { |
| 65 | DeleteArray(static_cast<char*>(entry->value)); |
| 66 | RemoveEntry(entry); |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | void Move(Address from, Address to) { |
| 71 | if (from == to) return; |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 72 | base::HashMap::Entry* from_entry = FindEntry(from); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 73 | DCHECK(from_entry != NULL); |
| 74 | void* value = from_entry->value; |
| 75 | RemoveEntry(from_entry); |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 76 | base::HashMap::Entry* to_entry = FindOrCreateEntry(to); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 77 | DCHECK(to_entry->value == NULL); |
| 78 | to_entry->value = value; |
| 79 | } |
| 80 | |
| 81 | private: |
| 82 | static char* CopyName(const char* name, int name_size) { |
| 83 | char* result = NewArray<char>(name_size + 1); |
| 84 | for (int i = 0; i < name_size; ++i) { |
| 85 | char c = name[i]; |
| 86 | if (c == '\0') c = ' '; |
| 87 | result[i] = c; |
| 88 | } |
| 89 | result[name_size] = '\0'; |
| 90 | return result; |
| 91 | } |
| 92 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 93 | base::HashMap::Entry* FindOrCreateEntry(Address code_address) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 94 | return impl_.LookupOrInsert(code_address, |
| 95 | ComputePointerHash(code_address)); |
| 96 | } |
| 97 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 98 | base::HashMap::Entry* FindEntry(Address code_address) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 99 | return impl_.Lookup(code_address, ComputePointerHash(code_address)); |
| 100 | } |
| 101 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 102 | void RemoveEntry(base::HashMap::Entry* entry) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 103 | impl_.Remove(entry->key, entry->hash); |
| 104 | } |
| 105 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 106 | base::HashMap impl_; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 107 | |
| 108 | DISALLOW_COPY_AND_ASSIGN(NameMap); |
| 109 | }; |
| 110 | |
| 111 | void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*, |
| 112 | const char* name, int length) override { |
| 113 | address_to_name_map_.Insert(code->address(), name, length); |
| 114 | } |
| 115 | |
| 116 | NameMap address_to_name_map_; |
| 117 | Isolate* isolate_; |
| 118 | }; |
| 119 | |
| 120 | // There can be only one serializer per V8 process. |
| 121 | class Serializer : public SerializerDeserializer { |
| 122 | public: |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 123 | explicit Serializer(Isolate* isolate); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 124 | ~Serializer() override; |
| 125 | |
| 126 | void EncodeReservations(List<SerializedData::Reservation>* out) const; |
| 127 | |
| 128 | void SerializeDeferredObjects(); |
| 129 | |
| 130 | Isolate* isolate() const { return isolate_; } |
| 131 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 132 | SerializerReferenceMap* reference_map() { return &reference_map_; } |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 133 | RootIndexMap* root_index_map() { return &root_index_map_; } |
| 134 | |
| 135 | #ifdef OBJECT_PRINT |
| 136 | void CountInstanceType(Map* map, int size); |
| 137 | #endif // OBJECT_PRINT |
| 138 | |
| 139 | protected: |
| 140 | class ObjectSerializer; |
| 141 | class RecursionScope { |
| 142 | public: |
| 143 | explicit RecursionScope(Serializer* serializer) : serializer_(serializer) { |
| 144 | serializer_->recursion_depth_++; |
| 145 | } |
| 146 | ~RecursionScope() { serializer_->recursion_depth_--; } |
| 147 | bool ExceedsMaximum() { |
| 148 | return serializer_->recursion_depth_ >= kMaxRecursionDepth; |
| 149 | } |
| 150 | |
| 151 | private: |
| 152 | static const int kMaxRecursionDepth = 32; |
| 153 | Serializer* serializer_; |
| 154 | }; |
| 155 | |
| 156 | virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, |
| 157 | WhereToPoint where_to_point, int skip) = 0; |
| 158 | |
| 159 | void VisitPointers(Object** start, Object** end) override; |
| 160 | |
| 161 | void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where, |
| 162 | int skip); |
| 163 | |
| 164 | void PutSmi(Smi* smi); |
| 165 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 166 | void PutBackReference(HeapObject* object, SerializerReference reference); |
| 167 | |
| 168 | void PutAttachedReference(SerializerReference reference, |
| 169 | HowToCode how_to_code, WhereToPoint where_to_point); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 170 | |
| 171 | // Emit alignment prefix if necessary, return required padding space in bytes. |
| 172 | int PutAlignmentPrefix(HeapObject* object); |
| 173 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 174 | // Returns true if the object was successfully serialized as hot object. |
| 175 | bool SerializeHotObject(HeapObject* obj, HowToCode how_to_code, |
| 176 | WhereToPoint where_to_point, int skip); |
| 177 | |
| 178 | // Returns true if the object was successfully serialized as back reference. |
| 179 | bool SerializeBackReference(HeapObject* obj, HowToCode how_to_code, |
| 180 | WhereToPoint where_to_point, int skip); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 181 | |
| 182 | inline void FlushSkip(int skip) { |
| 183 | if (skip != 0) { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 184 | sink_.Put(kSkip, "SkipFromSerializeObject"); |
| 185 | sink_.PutInt(skip, "SkipDistanceFromSerializeObject"); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 186 | } |
| 187 | } |
| 188 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 189 | bool BackReferenceIsAlreadyAllocated(SerializerReference back_reference); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 190 | |
| 191 | // This will return the space for an object. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 192 | SerializerReference AllocateLargeObject(int size); |
| 193 | SerializerReference Allocate(AllocationSpace space, int size); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 194 | int EncodeExternalReference(Address addr) { |
| 195 | return external_reference_encoder_.Encode(addr); |
| 196 | } |
| 197 | |
| 198 | bool HasNotExceededFirstPageOfEachSpace(); |
| 199 | |
| 200 | // GetInt reads 4 bytes at once, requiring padding at the end. |
| 201 | void Pad(); |
| 202 | |
| 203 | // We may not need the code address map for logging for every instance |
| 204 | // of the serializer. Initialize it on demand. |
| 205 | void InitializeCodeAddressMap(); |
| 206 | |
| 207 | Code* CopyCode(Code* code); |
| 208 | |
| 209 | inline uint32_t max_chunk_size(int space) const { |
| 210 | DCHECK_LE(0, space); |
| 211 | DCHECK_LT(space, kNumberOfSpaces); |
| 212 | return max_chunk_size_[space]; |
| 213 | } |
| 214 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 215 | const SnapshotByteSink* sink() const { return &sink_; } |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 216 | |
| 217 | void QueueDeferredObject(HeapObject* obj) { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 218 | DCHECK(reference_map_.Lookup(obj).is_back_reference()); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 219 | deferred_objects_.Add(obj); |
| 220 | } |
| 221 | |
| 222 | void OutputStatistics(const char* name); |
| 223 | |
| 224 | Isolate* isolate_; |
| 225 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 226 | SnapshotByteSink sink_; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 227 | ExternalReferenceEncoder external_reference_encoder_; |
| 228 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 229 | SerializerReferenceMap reference_map_; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 230 | RootIndexMap root_index_map_; |
| 231 | |
| 232 | int recursion_depth_; |
| 233 | |
| 234 | friend class Deserializer; |
| 235 | friend class ObjectSerializer; |
| 236 | friend class RecursionScope; |
| 237 | friend class SnapshotData; |
| 238 | |
| 239 | private: |
| 240 | CodeAddressMap* code_address_map_; |
| 241 | // Objects from the same space are put into chunks for bulk-allocation |
| 242 | // when deserializing. We have to make sure that each chunk fits into a |
| 243 | // page. So we track the chunk size in pending_chunk_ of a space, but |
| 244 | // when it exceeds a page, we complete the current chunk and start a new one. |
| 245 | uint32_t pending_chunk_[kNumberOfPreallocatedSpaces]; |
| 246 | List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces]; |
| 247 | uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces]; |
| 248 | |
| 249 | // We map serialized large objects to indexes for back-referencing. |
| 250 | uint32_t large_objects_total_size_; |
| 251 | uint32_t seen_large_objects_index_; |
| 252 | |
| 253 | List<byte> code_buffer_; |
| 254 | |
| 255 | // To handle stack overflow. |
| 256 | List<HeapObject*> deferred_objects_; |
| 257 | |
| 258 | #ifdef OBJECT_PRINT |
| 259 | static const int kInstanceTypes = 256; |
| 260 | int* instance_type_count_; |
| 261 | size_t* instance_type_size_; |
| 262 | #endif // OBJECT_PRINT |
| 263 | |
| 264 | DISALLOW_COPY_AND_ASSIGN(Serializer); |
| 265 | }; |
| 266 | |
| 267 | class Serializer::ObjectSerializer : public ObjectVisitor { |
| 268 | public: |
| 269 | ObjectSerializer(Serializer* serializer, HeapObject* obj, |
| 270 | SnapshotByteSink* sink, HowToCode how_to_code, |
| 271 | WhereToPoint where_to_point) |
| 272 | : serializer_(serializer), |
| 273 | object_(obj), |
| 274 | sink_(sink), |
| 275 | reference_representation_(how_to_code + where_to_point), |
| 276 | bytes_processed_so_far_(0), |
| 277 | code_has_been_output_(false) {} |
| 278 | ~ObjectSerializer() override {} |
| 279 | void Serialize(); |
| 280 | void SerializeDeferred(); |
| 281 | void VisitPointers(Object** start, Object** end) override; |
| 282 | void VisitEmbeddedPointer(RelocInfo* target) override; |
| 283 | void VisitExternalReference(Address* p) override; |
| 284 | void VisitExternalReference(RelocInfo* rinfo) override; |
| 285 | void VisitInternalReference(RelocInfo* rinfo) override; |
| 286 | void VisitCodeTarget(RelocInfo* target) override; |
| 287 | void VisitCodeEntry(Address entry_address) override; |
| 288 | void VisitCell(RelocInfo* rinfo) override; |
| 289 | void VisitRuntimeEntry(RelocInfo* reloc) override; |
| 290 | // Used for seralizing the external strings that hold the natives source. |
| 291 | void VisitExternalOneByteString( |
| 292 | v8::String::ExternalOneByteStringResource** resource) override; |
| 293 | // We can't serialize a heap with external two byte strings. |
| 294 | void VisitExternalTwoByteString( |
| 295 | v8::String::ExternalStringResource** resource) override { |
| 296 | UNREACHABLE(); |
| 297 | } |
| 298 | |
| 299 | private: |
| 300 | void SerializePrologue(AllocationSpace space, int size, Map* map); |
| 301 | |
| 302 | bool SerializeExternalNativeSourceString( |
| 303 | int builtin_count, |
| 304 | v8::String::ExternalOneByteStringResource** resource_pointer, |
| 305 | FixedArray* source_cache, int resource_index); |
| 306 | |
| 307 | enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn }; |
| 308 | // This function outputs or skips the raw data between the last pointer and |
| 309 | // up to the current position. It optionally can just return the number of |
| 310 | // bytes to skip instead of performing a skip instruction, in case the skip |
| 311 | // can be merged into the next instruction. |
| 312 | int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn); |
| 313 | // External strings are serialized in a way to resemble sequential strings. |
| 314 | void SerializeExternalString(); |
| 315 | |
| 316 | Address PrepareCode(); |
| 317 | |
| 318 | Serializer* serializer_; |
| 319 | HeapObject* object_; |
| 320 | SnapshotByteSink* sink_; |
| 321 | int reference_representation_; |
| 322 | int bytes_processed_so_far_; |
| 323 | bool code_has_been_output_; |
| 324 | }; |
| 325 | |
| 326 | } // namespace internal |
| 327 | } // namespace v8 |
| 328 | |
| 329 | #endif // V8_SNAPSHOT_SERIALIZER_H_ |