Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1 | // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "src/snapshot/deserializer.h" |
| 6 | |
| 7 | #include "src/bootstrapper.h" |
| 8 | #include "src/external-reference-table.h" |
| 9 | #include "src/heap/heap.h" |
| 10 | #include "src/isolate.h" |
| 11 | #include "src/macro-assembler.h" |
| 12 | #include "src/snapshot/natives.h" |
| 13 | #include "src/v8.h" |
| 14 | |
| 15 | namespace v8 { |
| 16 | namespace internal { |
| 17 | |
| 18 | void Deserializer::DecodeReservation( |
| 19 | Vector<const SerializedData::Reservation> res) { |
| 20 | DCHECK_EQ(0, reservations_[NEW_SPACE].length()); |
| 21 | STATIC_ASSERT(NEW_SPACE == 0); |
| 22 | int current_space = NEW_SPACE; |
| 23 | for (auto& r : res) { |
| 24 | reservations_[current_space].Add({r.chunk_size(), NULL, NULL}); |
| 25 | if (r.is_last()) current_space++; |
| 26 | } |
| 27 | DCHECK_EQ(kNumberOfSpaces, current_space); |
| 28 | for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0; |
| 29 | } |
| 30 | |
| 31 | void Deserializer::FlushICacheForNewIsolate() { |
| 32 | DCHECK(!deserializing_user_code_); |
| 33 | // The entire isolate is newly deserialized. Simply flush all code pages. |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 34 | for (Page* p : *isolate_->heap()->code_space()) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 35 | Assembler::FlushICache(isolate_, p->area_start(), |
| 36 | p->area_end() - p->area_start()); |
| 37 | } |
| 38 | } |
| 39 | |
| 40 | void Deserializer::FlushICacheForNewCodeObjects() { |
| 41 | DCHECK(deserializing_user_code_); |
| 42 | for (Code* code : new_code_objects_) { |
| 43 | if (FLAG_serialize_age_code) code->PreAge(isolate_); |
| 44 | Assembler::FlushICache(isolate_, code->instruction_start(), |
| 45 | code->instruction_size()); |
| 46 | } |
| 47 | } |
| 48 | |
| 49 | bool Deserializer::ReserveSpace() { |
| 50 | #ifdef DEBUG |
| 51 | for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) { |
| 52 | CHECK(reservations_[i].length() > 0); |
| 53 | } |
| 54 | #endif // DEBUG |
| 55 | if (!isolate_->heap()->ReserveSpace(reservations_)) return false; |
| 56 | for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { |
| 57 | high_water_[i] = reservations_[i][0].start; |
| 58 | } |
| 59 | return true; |
| 60 | } |
| 61 | |
| 62 | void Deserializer::Initialize(Isolate* isolate) { |
| 63 | DCHECK_NULL(isolate_); |
| 64 | DCHECK_NOT_NULL(isolate); |
| 65 | isolate_ = isolate; |
| 66 | DCHECK_NULL(external_reference_table_); |
| 67 | external_reference_table_ = ExternalReferenceTable::instance(isolate); |
| 68 | CHECK_EQ(magic_number_, |
| 69 | SerializedData::ComputeMagicNumber(external_reference_table_)); |
| 70 | } |
| 71 | |
| 72 | void Deserializer::Deserialize(Isolate* isolate) { |
| 73 | Initialize(isolate); |
| 74 | if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context"); |
| 75 | // No active threads. |
| 76 | DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse()); |
| 77 | // No active handles. |
| 78 | DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty()); |
| 79 | // Partial snapshot cache is not yet populated. |
| 80 | DCHECK(isolate_->partial_snapshot_cache()->is_empty()); |
| 81 | |
| 82 | { |
| 83 | DisallowHeapAllocation no_gc; |
| 84 | isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST); |
| 85 | isolate_->heap()->IterateSmiRoots(this); |
| 86 | isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); |
| 87 | isolate_->heap()->RepairFreeListsAfterDeserialization(); |
| 88 | isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); |
| 89 | DeserializeDeferredObjects(); |
| 90 | FlushICacheForNewIsolate(); |
| 91 | } |
| 92 | |
| 93 | isolate_->heap()->set_native_contexts_list( |
| 94 | isolate_->heap()->undefined_value()); |
| 95 | // The allocation site list is build during root iteration, but if no sites |
| 96 | // were encountered then it needs to be initialized to undefined. |
| 97 | if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { |
| 98 | isolate_->heap()->set_allocation_sites_list( |
| 99 | isolate_->heap()->undefined_value()); |
| 100 | } |
| 101 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 102 | // Issue code events for newly deserialized code objects. |
| 103 | LOG_CODE_EVENT(isolate_, LogCodeObjects()); |
| 104 | LOG_CODE_EVENT(isolate_, LogBytecodeHandlers()); |
| 105 | LOG_CODE_EVENT(isolate_, LogCompiledFunctions()); |
| 106 | } |
| 107 | |
| 108 | MaybeHandle<Object> Deserializer::DeserializePartial( |
| 109 | Isolate* isolate, Handle<JSGlobalProxy> global_proxy) { |
| 110 | Initialize(isolate); |
| 111 | if (!ReserveSpace()) { |
| 112 | V8::FatalProcessOutOfMemory("deserialize context"); |
| 113 | return MaybeHandle<Object>(); |
| 114 | } |
| 115 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 116 | AddAttachedObject(global_proxy); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 117 | |
| 118 | DisallowHeapAllocation no_gc; |
| 119 | // Keep track of the code space start and end pointers in case new |
| 120 | // code objects were unserialized |
| 121 | OldSpace* code_space = isolate_->heap()->code_space(); |
| 122 | Address start_address = code_space->top(); |
| 123 | Object* root; |
| 124 | VisitPointer(&root); |
| 125 | DeserializeDeferredObjects(); |
| 126 | |
| 127 | isolate->heap()->RegisterReservationsForBlackAllocation(reservations_); |
| 128 | |
| 129 | // There's no code deserialized here. If this assert fires then that's |
| 130 | // changed and logging should be added to notify the profiler et al of the |
| 131 | // new code, which also has to be flushed from instruction cache. |
| 132 | CHECK_EQ(start_address, code_space->top()); |
| 133 | return Handle<Object>(root, isolate); |
| 134 | } |
| 135 | |
| 136 | MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode( |
| 137 | Isolate* isolate) { |
| 138 | Initialize(isolate); |
| 139 | if (!ReserveSpace()) { |
| 140 | return Handle<SharedFunctionInfo>(); |
| 141 | } else { |
| 142 | deserializing_user_code_ = true; |
| 143 | HandleScope scope(isolate); |
| 144 | Handle<SharedFunctionInfo> result; |
| 145 | { |
| 146 | DisallowHeapAllocation no_gc; |
| 147 | Object* root; |
| 148 | VisitPointer(&root); |
| 149 | DeserializeDeferredObjects(); |
| 150 | FlushICacheForNewCodeObjects(); |
| 151 | result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root)); |
| 152 | isolate->heap()->RegisterReservationsForBlackAllocation(reservations_); |
| 153 | } |
| 154 | CommitPostProcessedObjects(isolate); |
| 155 | return scope.CloseAndEscape(result); |
| 156 | } |
| 157 | } |
| 158 | |
| 159 | Deserializer::~Deserializer() { |
| 160 | // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed. |
| 161 | // DCHECK(source_.AtEOF()); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | // This is called on the roots. It is the driver of the deserialization |
| 165 | // process. It is also called on the body of each function. |
| 166 | void Deserializer::VisitPointers(Object** start, Object** end) { |
| 167 | // The space must be new space. Any other space would cause ReadChunk to try |
| 168 | // to update the remembered using NULL as the address. |
| 169 | ReadData(start, end, NEW_SPACE, NULL); |
| 170 | } |
| 171 | |
| 172 | void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) { |
| 173 | static const byte expected = kSynchronize; |
| 174 | CHECK_EQ(expected, source_.Get()); |
| 175 | } |
| 176 | |
| 177 | void Deserializer::DeserializeDeferredObjects() { |
| 178 | for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) { |
| 179 | switch (code) { |
| 180 | case kAlignmentPrefix: |
| 181 | case kAlignmentPrefix + 1: |
| 182 | case kAlignmentPrefix + 2: |
| 183 | SetAlignment(code); |
| 184 | break; |
| 185 | default: { |
| 186 | int space = code & kSpaceMask; |
| 187 | DCHECK(space <= kNumberOfSpaces); |
| 188 | DCHECK(code - space == kNewObject); |
| 189 | HeapObject* object = GetBackReferencedObject(space); |
| 190 | int size = source_.GetInt() << kPointerSizeLog2; |
| 191 | Address obj_address = object->address(); |
| 192 | Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize); |
| 193 | Object** end = reinterpret_cast<Object**>(obj_address + size); |
| 194 | bool filled = ReadData(start, end, space, obj_address); |
| 195 | CHECK(filled); |
| 196 | DCHECK(CanBeDeferred(object)); |
| 197 | PostProcessNewObject(object, space); |
| 198 | } |
| 199 | } |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | // Used to insert a deserialized internalized string into the string table. |
| 204 | class StringTableInsertionKey : public HashTableKey { |
| 205 | public: |
| 206 | explicit StringTableInsertionKey(String* string) |
| 207 | : string_(string), hash_(HashForObject(string)) { |
| 208 | DCHECK(string->IsInternalizedString()); |
| 209 | } |
| 210 | |
| 211 | bool IsMatch(Object* string) override { |
| 212 | // We know that all entries in a hash table had their hash keys created. |
| 213 | // Use that knowledge to have fast failure. |
| 214 | if (hash_ != HashForObject(string)) return false; |
| 215 | // We want to compare the content of two internalized strings here. |
| 216 | return string_->SlowEquals(String::cast(string)); |
| 217 | } |
| 218 | |
| 219 | uint32_t Hash() override { return hash_; } |
| 220 | |
| 221 | uint32_t HashForObject(Object* key) override { |
| 222 | return String::cast(key)->Hash(); |
| 223 | } |
| 224 | |
| 225 | MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override { |
| 226 | return handle(string_, isolate); |
| 227 | } |
| 228 | |
| 229 | private: |
| 230 | String* string_; |
| 231 | uint32_t hash_; |
| 232 | DisallowHeapAllocation no_gc; |
| 233 | }; |
| 234 | |
| 235 | HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) { |
| 236 | if (deserializing_user_code()) { |
| 237 | if (obj->IsString()) { |
| 238 | String* string = String::cast(obj); |
| 239 | // Uninitialize hash field as the hash seed may have changed. |
| 240 | string->set_hash_field(String::kEmptyHashField); |
| 241 | if (string->IsInternalizedString()) { |
| 242 | // Canonicalize the internalized string. If it already exists in the |
| 243 | // string table, set it to forward to the existing one. |
| 244 | StringTableInsertionKey key(string); |
| 245 | String* canonical = StringTable::LookupKeyIfExists(isolate_, &key); |
| 246 | if (canonical == NULL) { |
| 247 | new_internalized_strings_.Add(handle(string)); |
| 248 | return string; |
| 249 | } else { |
| 250 | string->SetForwardedInternalizedString(canonical); |
| 251 | return canonical; |
| 252 | } |
| 253 | } |
| 254 | } else if (obj->IsScript()) { |
| 255 | new_scripts_.Add(handle(Script::cast(obj))); |
| 256 | } else { |
| 257 | DCHECK(CanBeDeferred(obj)); |
| 258 | } |
| 259 | } |
| 260 | if (obj->IsAllocationSite()) { |
| 261 | DCHECK(obj->IsAllocationSite()); |
| 262 | // Allocation sites are present in the snapshot, and must be linked into |
| 263 | // a list at deserialization time. |
| 264 | AllocationSite* site = AllocationSite::cast(obj); |
| 265 | // TODO(mvstanton): consider treating the heap()->allocation_sites_list() |
| 266 | // as a (weak) root. If this root is relocated correctly, this becomes |
| 267 | // unnecessary. |
| 268 | if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { |
| 269 | site->set_weak_next(isolate_->heap()->undefined_value()); |
| 270 | } else { |
| 271 | site->set_weak_next(isolate_->heap()->allocation_sites_list()); |
| 272 | } |
| 273 | isolate_->heap()->set_allocation_sites_list(site); |
| 274 | } else if (obj->IsCode()) { |
| 275 | // We flush all code pages after deserializing the startup snapshot. In that |
| 276 | // case, we only need to remember code objects in the large object space. |
| 277 | // When deserializing user code, remember each individual code object. |
| 278 | if (deserializing_user_code() || space == LO_SPACE) { |
| 279 | new_code_objects_.Add(Code::cast(obj)); |
| 280 | } |
| 281 | } |
| 282 | // Check alignment. |
| 283 | DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment())); |
| 284 | return obj; |
| 285 | } |
| 286 | |
| 287 | void Deserializer::CommitPostProcessedObjects(Isolate* isolate) { |
| 288 | StringTable::EnsureCapacityForDeserialization( |
| 289 | isolate, new_internalized_strings_.length()); |
| 290 | for (Handle<String> string : new_internalized_strings_) { |
| 291 | StringTableInsertionKey key(*string); |
| 292 | DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key)); |
| 293 | StringTable::LookupKey(isolate, &key); |
| 294 | } |
| 295 | |
| 296 | Heap* heap = isolate->heap(); |
| 297 | Factory* factory = isolate->factory(); |
| 298 | for (Handle<Script> script : new_scripts_) { |
| 299 | // Assign a new script id to avoid collision. |
| 300 | script->set_id(isolate_->heap()->NextScriptId()); |
| 301 | // Add script to list. |
| 302 | Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script); |
| 303 | heap->SetRootScriptList(*list); |
| 304 | } |
| 305 | } |
| 306 | |
| 307 | HeapObject* Deserializer::GetBackReferencedObject(int space) { |
| 308 | HeapObject* obj; |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 309 | SerializerReference back_reference = |
| 310 | SerializerReference::FromBitfield(source_.GetInt()); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 311 | if (space == LO_SPACE) { |
| 312 | CHECK(back_reference.chunk_index() == 0); |
| 313 | uint32_t index = back_reference.large_object_index(); |
| 314 | obj = deserialized_large_objects_[index]; |
| 315 | } else { |
| 316 | DCHECK(space < kNumberOfPreallocatedSpaces); |
| 317 | uint32_t chunk_index = back_reference.chunk_index(); |
| 318 | DCHECK_LE(chunk_index, current_chunk_[space]); |
| 319 | uint32_t chunk_offset = back_reference.chunk_offset(); |
| 320 | Address address = reservations_[space][chunk_index].start + chunk_offset; |
| 321 | if (next_alignment_ != kWordAligned) { |
| 322 | int padding = Heap::GetFillToAlign(address, next_alignment_); |
| 323 | next_alignment_ = kWordAligned; |
| 324 | DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller()); |
| 325 | address += padding; |
| 326 | } |
| 327 | obj = HeapObject::FromAddress(address); |
| 328 | } |
| 329 | if (deserializing_user_code() && obj->IsInternalizedString()) { |
| 330 | obj = String::cast(obj)->GetForwardedInternalizedString(); |
| 331 | } |
| 332 | hot_objects_.Add(obj); |
| 333 | return obj; |
| 334 | } |
| 335 | |
| 336 | // This routine writes the new object into the pointer provided and then |
| 337 | // returns true if the new object was in young space and false otherwise. |
| 338 | // The reason for this strange interface is that otherwise the object is |
| 339 | // written very late, which means the FreeSpace map is not set up by the |
| 340 | // time we need to use it to mark the space at the end of a page free. |
| 341 | void Deserializer::ReadObject(int space_number, Object** write_back) { |
| 342 | Address address; |
| 343 | HeapObject* obj; |
| 344 | int size = source_.GetInt() << kObjectAlignmentBits; |
| 345 | |
| 346 | if (next_alignment_ != kWordAligned) { |
| 347 | int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_); |
| 348 | address = Allocate(space_number, reserved); |
| 349 | obj = HeapObject::FromAddress(address); |
| 350 | // If one of the following assertions fails, then we are deserializing an |
| 351 | // aligned object when the filler maps have not been deserialized yet. |
| 352 | // We require filler maps as padding to align the object. |
| 353 | Heap* heap = isolate_->heap(); |
| 354 | DCHECK(heap->free_space_map()->IsMap()); |
| 355 | DCHECK(heap->one_pointer_filler_map()->IsMap()); |
| 356 | DCHECK(heap->two_pointer_filler_map()->IsMap()); |
| 357 | obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_); |
| 358 | address = obj->address(); |
| 359 | next_alignment_ = kWordAligned; |
| 360 | } else { |
| 361 | address = Allocate(space_number, size); |
| 362 | obj = HeapObject::FromAddress(address); |
| 363 | } |
| 364 | |
| 365 | isolate_->heap()->OnAllocationEvent(obj, size); |
| 366 | Object** current = reinterpret_cast<Object**>(address); |
| 367 | Object** limit = current + (size >> kPointerSizeLog2); |
| 368 | |
| 369 | if (ReadData(current, limit, space_number, address)) { |
| 370 | // Only post process if object content has not been deferred. |
| 371 | obj = PostProcessNewObject(obj, space_number); |
| 372 | } |
| 373 | |
| 374 | Object* write_back_obj = obj; |
| 375 | UnalignedCopy(write_back, &write_back_obj); |
| 376 | #ifdef DEBUG |
| 377 | if (obj->IsCode()) { |
| 378 | DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE); |
| 379 | } else { |
| 380 | DCHECK(space_number != CODE_SPACE); |
| 381 | } |
| 382 | #endif // DEBUG |
| 383 | } |
| 384 | |
| 385 | // We know the space requirements before deserialization and can |
| 386 | // pre-allocate that reserved space. During deserialization, all we need |
| 387 | // to do is to bump up the pointer for each space in the reserved |
| 388 | // space. This is also used for fixing back references. |
| 389 | // We may have to split up the pre-allocation into several chunks |
| 390 | // because it would not fit onto a single page. We do not have to keep |
| 391 | // track of when to move to the next chunk. An opcode will signal this. |
| 392 | // Since multiple large objects cannot be folded into one large object |
| 393 | // space allocation, we have to do an actual allocation when deserializing |
| 394 | // each large object. Instead of tracking offset for back references, we |
| 395 | // reference large objects by index. |
| 396 | Address Deserializer::Allocate(int space_index, int size) { |
| 397 | if (space_index == LO_SPACE) { |
| 398 | AlwaysAllocateScope scope(isolate_); |
| 399 | LargeObjectSpace* lo_space = isolate_->heap()->lo_space(); |
| 400 | Executability exec = static_cast<Executability>(source_.Get()); |
| 401 | AllocationResult result = lo_space->AllocateRaw(size, exec); |
| 402 | HeapObject* obj = HeapObject::cast(result.ToObjectChecked()); |
| 403 | deserialized_large_objects_.Add(obj); |
| 404 | return obj->address(); |
| 405 | } else { |
| 406 | DCHECK(space_index < kNumberOfPreallocatedSpaces); |
| 407 | Address address = high_water_[space_index]; |
| 408 | DCHECK_NOT_NULL(address); |
| 409 | high_water_[space_index] += size; |
| 410 | #ifdef DEBUG |
| 411 | // Assert that the current reserved chunk is still big enough. |
| 412 | const Heap::Reservation& reservation = reservations_[space_index]; |
| 413 | int chunk_index = current_chunk_[space_index]; |
| 414 | CHECK_LE(high_water_[space_index], reservation[chunk_index].end); |
| 415 | #endif |
| 416 | if (space_index == CODE_SPACE) SkipList::Update(address, size); |
| 417 | return address; |
| 418 | } |
| 419 | } |
| 420 | |
| 421 | Object** Deserializer::CopyInNativesSource(Vector<const char> source_vector, |
| 422 | Object** current) { |
| 423 | DCHECK(!isolate_->heap()->deserialization_complete()); |
| 424 | NativesExternalStringResource* resource = new NativesExternalStringResource( |
| 425 | source_vector.start(), source_vector.length()); |
| 426 | Object* resource_obj = reinterpret_cast<Object*>(resource); |
| 427 | UnalignedCopy(current++, &resource_obj); |
| 428 | return current; |
| 429 | } |
| 430 | |
| 431 | bool Deserializer::ReadData(Object** current, Object** limit, int source_space, |
| 432 | Address current_object_address) { |
| 433 | Isolate* const isolate = isolate_; |
| 434 | // Write barrier support costs around 1% in startup time. In fact there |
| 435 | // are no new space objects in current boot snapshots, so it's not needed, |
| 436 | // but that may change. |
| 437 | bool write_barrier_needed = |
| 438 | (current_object_address != NULL && source_space != NEW_SPACE && |
| 439 | source_space != CODE_SPACE); |
| 440 | while (current < limit) { |
| 441 | byte data = source_.Get(); |
| 442 | switch (data) { |
| 443 | #define CASE_STATEMENT(where, how, within, space_number) \ |
| 444 | case where + how + within + space_number: \ |
| 445 | STATIC_ASSERT((where & ~kWhereMask) == 0); \ |
| 446 | STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \ |
| 447 | STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \ |
| 448 | STATIC_ASSERT((space_number & ~kSpaceMask) == 0); |
| 449 | |
| 450 | #define CASE_BODY(where, how, within, space_number_if_any) \ |
| 451 | { \ |
| 452 | bool emit_write_barrier = false; \ |
| 453 | bool current_was_incremented = false; \ |
| 454 | int space_number = space_number_if_any == kAnyOldSpace \ |
| 455 | ? (data & kSpaceMask) \ |
| 456 | : space_number_if_any; \ |
| 457 | if (where == kNewObject && how == kPlain && within == kStartOfObject) { \ |
| 458 | ReadObject(space_number, current); \ |
| 459 | emit_write_barrier = (space_number == NEW_SPACE); \ |
| 460 | } else { \ |
| 461 | Object* new_object = NULL; /* May not be a real Object pointer. */ \ |
| 462 | if (where == kNewObject) { \ |
| 463 | ReadObject(space_number, &new_object); \ |
| 464 | } else if (where == kBackref) { \ |
| 465 | emit_write_barrier = (space_number == NEW_SPACE); \ |
| 466 | new_object = GetBackReferencedObject(data & kSpaceMask); \ |
| 467 | } else if (where == kBackrefWithSkip) { \ |
| 468 | int skip = source_.GetInt(); \ |
| 469 | current = reinterpret_cast<Object**>( \ |
| 470 | reinterpret_cast<Address>(current) + skip); \ |
| 471 | emit_write_barrier = (space_number == NEW_SPACE); \ |
| 472 | new_object = GetBackReferencedObject(data & kSpaceMask); \ |
| 473 | } else if (where == kRootArray) { \ |
| 474 | int id = source_.GetInt(); \ |
| 475 | Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \ |
| 476 | new_object = isolate->heap()->root(root_index); \ |
| 477 | emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 478 | hot_objects_.Add(HeapObject::cast(new_object)); \ |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 479 | } else if (where == kPartialSnapshotCache) { \ |
| 480 | int cache_index = source_.GetInt(); \ |
| 481 | new_object = isolate->partial_snapshot_cache()->at(cache_index); \ |
| 482 | emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
| 483 | } else if (where == kExternalReference) { \ |
| 484 | int skip = source_.GetInt(); \ |
| 485 | current = reinterpret_cast<Object**>( \ |
| 486 | reinterpret_cast<Address>(current) + skip); \ |
| 487 | int reference_id = source_.GetInt(); \ |
| 488 | Address address = external_reference_table_->address(reference_id); \ |
| 489 | new_object = reinterpret_cast<Object*>(address); \ |
| 490 | } else if (where == kAttachedReference) { \ |
| 491 | int index = source_.GetInt(); \ |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 492 | new_object = *attached_objects_[index]; \ |
| 493 | emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
| 494 | } else { \ |
| 495 | DCHECK(where == kBuiltin); \ |
| 496 | DCHECK(deserializing_user_code()); \ |
| 497 | int builtin_id = source_.GetInt(); \ |
| 498 | DCHECK_LE(0, builtin_id); \ |
| 499 | DCHECK_LT(builtin_id, Builtins::builtin_count); \ |
| 500 | Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \ |
| 501 | new_object = isolate->builtins()->builtin(name); \ |
| 502 | emit_write_barrier = false; \ |
| 503 | } \ |
| 504 | if (within == kInnerPointer) { \ |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 505 | if (new_object->IsCode()) { \ |
| 506 | Code* new_code_object = Code::cast(new_object); \ |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 507 | new_object = \ |
| 508 | reinterpret_cast<Object*>(new_code_object->instruction_start()); \ |
| 509 | } else { \ |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 510 | Cell* cell = Cell::cast(new_object); \ |
| 511 | new_object = reinterpret_cast<Object*>(cell->ValueAddress()); \ |
| 512 | } \ |
| 513 | } \ |
| 514 | if (how == kFromCode) { \ |
| 515 | Address location_of_branch_data = reinterpret_cast<Address>(current); \ |
| 516 | Assembler::deserialization_set_special_target_at( \ |
| 517 | isolate, location_of_branch_data, \ |
| 518 | Code::cast(HeapObject::FromAddress(current_object_address)), \ |
| 519 | reinterpret_cast<Address>(new_object)); \ |
| 520 | location_of_branch_data += Assembler::kSpecialTargetSize; \ |
| 521 | current = reinterpret_cast<Object**>(location_of_branch_data); \ |
| 522 | current_was_incremented = true; \ |
| 523 | } else { \ |
| 524 | UnalignedCopy(current, &new_object); \ |
| 525 | } \ |
| 526 | } \ |
| 527 | if (emit_write_barrier && write_barrier_needed) { \ |
| 528 | Address current_address = reinterpret_cast<Address>(current); \ |
| 529 | SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address)); \ |
| 530 | isolate->heap()->RecordWrite( \ |
| 531 | HeapObject::FromAddress(current_object_address), \ |
| 532 | static_cast<int>(current_address - current_object_address), \ |
| 533 | *reinterpret_cast<Object**>(current_address)); \ |
| 534 | } \ |
| 535 | if (!current_was_incremented) { \ |
| 536 | current++; \ |
| 537 | } \ |
| 538 | break; \ |
| 539 | } |
| 540 | |
| 541 | // This generates a case and a body for the new space (which has to do extra |
| 542 | // write barrier handling) and handles the other spaces with fall-through cases |
| 543 | // and one body. |
| 544 | #define ALL_SPACES(where, how, within) \ |
| 545 | CASE_STATEMENT(where, how, within, NEW_SPACE) \ |
| 546 | CASE_BODY(where, how, within, NEW_SPACE) \ |
| 547 | CASE_STATEMENT(where, how, within, OLD_SPACE) \ |
| 548 | CASE_STATEMENT(where, how, within, CODE_SPACE) \ |
| 549 | CASE_STATEMENT(where, how, within, MAP_SPACE) \ |
| 550 | CASE_STATEMENT(where, how, within, LO_SPACE) \ |
| 551 | CASE_BODY(where, how, within, kAnyOldSpace) |
| 552 | |
| 553 | #define FOUR_CASES(byte_code) \ |
| 554 | case byte_code: \ |
| 555 | case byte_code + 1: \ |
| 556 | case byte_code + 2: \ |
| 557 | case byte_code + 3: |
| 558 | |
| 559 | #define SIXTEEN_CASES(byte_code) \ |
| 560 | FOUR_CASES(byte_code) \ |
| 561 | FOUR_CASES(byte_code + 4) \ |
| 562 | FOUR_CASES(byte_code + 8) \ |
| 563 | FOUR_CASES(byte_code + 12) |
| 564 | |
| 565 | #define SINGLE_CASE(where, how, within, space) \ |
| 566 | CASE_STATEMENT(where, how, within, space) \ |
| 567 | CASE_BODY(where, how, within, space) |
| 568 | |
| 569 | // Deserialize a new object and write a pointer to it to the current |
| 570 | // object. |
| 571 | ALL_SPACES(kNewObject, kPlain, kStartOfObject) |
| 572 | // Support for direct instruction pointers in functions. It's an inner |
| 573 | // pointer because it points at the entry point, not at the start of the |
| 574 | // code object. |
| 575 | SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE) |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 576 | // Support for pointers into a cell. It's an inner pointer because it |
| 577 | // points directly at the value field, not the start of the cell object. |
| 578 | SINGLE_CASE(kNewObject, kPlain, kInnerPointer, OLD_SPACE) |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 579 | // Deserialize a new code object and write a pointer to its first |
| 580 | // instruction to the current code object. |
| 581 | ALL_SPACES(kNewObject, kFromCode, kInnerPointer) |
| 582 | // Find a recently deserialized object using its offset from the current |
| 583 | // allocation point and write a pointer to it to the current object. |
| 584 | ALL_SPACES(kBackref, kPlain, kStartOfObject) |
| 585 | ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject) |
| 586 | #if V8_CODE_EMBEDS_OBJECT_POINTER |
| 587 | // Deserialize a new object from pointer found in code and write |
| 588 | // a pointer to it to the current object. Required only for MIPS, PPC, ARM |
| 589 | // or S390 with embedded constant pool, and omitted on the other |
| 590 | // architectures because it is fully unrolled and would cause bloat. |
| 591 | ALL_SPACES(kNewObject, kFromCode, kStartOfObject) |
| 592 | // Find a recently deserialized code object using its offset from the |
| 593 | // current allocation point and write a pointer to it to the current |
| 594 | // object. Required only for MIPS, PPC, ARM or S390 with embedded |
| 595 | // constant pool. |
| 596 | ALL_SPACES(kBackref, kFromCode, kStartOfObject) |
| 597 | ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject) |
| 598 | #endif |
| 599 | // Find a recently deserialized code object using its offset from the |
| 600 | // current allocation point and write a pointer to its first instruction |
| 601 | // to the current code object or the instruction pointer in a function |
| 602 | // object. |
| 603 | ALL_SPACES(kBackref, kFromCode, kInnerPointer) |
| 604 | ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer) |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 605 | // Support for direct instruction pointers in functions. |
| 606 | SINGLE_CASE(kBackref, kPlain, kInnerPointer, CODE_SPACE) |
| 607 | SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, CODE_SPACE) |
| 608 | // Support for pointers into a cell. |
| 609 | SINGLE_CASE(kBackref, kPlain, kInnerPointer, OLD_SPACE) |
| 610 | SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, OLD_SPACE) |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 611 | // Find an object in the roots array and write a pointer to it to the |
| 612 | // current object. |
| 613 | SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0) |
| 614 | #if V8_CODE_EMBEDS_OBJECT_POINTER |
| 615 | // Find an object in the roots array and write a pointer to it to in code. |
| 616 | SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0) |
| 617 | #endif |
| 618 | // Find an object in the partial snapshots cache and write a pointer to it |
| 619 | // to the current object. |
| 620 | SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0) |
| 621 | // Find an code entry in the partial snapshots cache and |
| 622 | // write a pointer to it to the current object. |
| 623 | SINGLE_CASE(kPartialSnapshotCache, kPlain, kInnerPointer, 0) |
| 624 | // Find an external reference and write a pointer to it to the current |
| 625 | // object. |
| 626 | SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0) |
| 627 | // Find an external reference and write a pointer to it in the current |
| 628 | // code object. |
| 629 | SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0) |
| 630 | // Find an object in the attached references and write a pointer to it to |
| 631 | // the current object. |
| 632 | SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0) |
| 633 | SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0) |
| 634 | SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0) |
| 635 | // Find a builtin and write a pointer to it to the current object. |
| 636 | SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0) |
| 637 | SINGLE_CASE(kBuiltin, kPlain, kInnerPointer, 0) |
| 638 | SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0) |
| 639 | |
| 640 | #undef CASE_STATEMENT |
| 641 | #undef CASE_BODY |
| 642 | #undef ALL_SPACES |
| 643 | |
| 644 | case kSkip: { |
| 645 | int size = source_.GetInt(); |
| 646 | current = reinterpret_cast<Object**>( |
| 647 | reinterpret_cast<intptr_t>(current) + size); |
| 648 | break; |
| 649 | } |
| 650 | |
| 651 | case kInternalReferenceEncoded: |
| 652 | case kInternalReference: { |
| 653 | // Internal reference address is not encoded via skip, but by offset |
| 654 | // from code entry. |
| 655 | int pc_offset = source_.GetInt(); |
| 656 | int target_offset = source_.GetInt(); |
| 657 | Code* code = |
| 658 | Code::cast(HeapObject::FromAddress(current_object_address)); |
| 659 | DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size()); |
| 660 | DCHECK(0 <= target_offset && target_offset <= code->instruction_size()); |
| 661 | Address pc = code->entry() + pc_offset; |
| 662 | Address target = code->entry() + target_offset; |
| 663 | Assembler::deserialization_set_target_internal_reference_at( |
| 664 | isolate, pc, target, data == kInternalReference |
| 665 | ? RelocInfo::INTERNAL_REFERENCE |
| 666 | : RelocInfo::INTERNAL_REFERENCE_ENCODED); |
| 667 | break; |
| 668 | } |
| 669 | |
| 670 | case kNop: |
| 671 | break; |
| 672 | |
| 673 | case kNextChunk: { |
| 674 | int space = source_.Get(); |
| 675 | DCHECK(space < kNumberOfPreallocatedSpaces); |
| 676 | int chunk_index = current_chunk_[space]; |
| 677 | const Heap::Reservation& reservation = reservations_[space]; |
| 678 | // Make sure the current chunk is indeed exhausted. |
| 679 | CHECK_EQ(reservation[chunk_index].end, high_water_[space]); |
| 680 | // Move to next reserved chunk. |
| 681 | chunk_index = ++current_chunk_[space]; |
| 682 | CHECK_LT(chunk_index, reservation.length()); |
| 683 | high_water_[space] = reservation[chunk_index].start; |
| 684 | break; |
| 685 | } |
| 686 | |
| 687 | case kDeferred: { |
| 688 | // Deferred can only occur right after the heap object header. |
| 689 | DCHECK(current == reinterpret_cast<Object**>(current_object_address + |
| 690 | kPointerSize)); |
| 691 | HeapObject* obj = HeapObject::FromAddress(current_object_address); |
| 692 | // If the deferred object is a map, its instance type may be used |
| 693 | // during deserialization. Initialize it with a temporary value. |
| 694 | if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE); |
| 695 | current = limit; |
| 696 | return false; |
| 697 | } |
| 698 | |
| 699 | case kSynchronize: |
| 700 | // If we get here then that indicates that you have a mismatch between |
| 701 | // the number of GC roots when serializing and deserializing. |
| 702 | CHECK(false); |
| 703 | break; |
| 704 | |
| 705 | case kNativesStringResource: |
| 706 | current = CopyInNativesSource(Natives::GetScriptSource(source_.Get()), |
| 707 | current); |
| 708 | break; |
| 709 | |
| 710 | case kExtraNativesStringResource: |
| 711 | current = CopyInNativesSource( |
| 712 | ExtraNatives::GetScriptSource(source_.Get()), current); |
| 713 | break; |
| 714 | |
| 715 | // Deserialize raw data of variable length. |
| 716 | case kVariableRawData: { |
| 717 | int size_in_bytes = source_.GetInt(); |
| 718 | byte* raw_data_out = reinterpret_cast<byte*>(current); |
| 719 | source_.CopyRaw(raw_data_out, size_in_bytes); |
| 720 | break; |
| 721 | } |
| 722 | |
| 723 | case kVariableRepeat: { |
| 724 | int repeats = source_.GetInt(); |
| 725 | Object* object = current[-1]; |
| 726 | DCHECK(!isolate->heap()->InNewSpace(object)); |
| 727 | for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object); |
| 728 | break; |
| 729 | } |
| 730 | |
| 731 | case kAlignmentPrefix: |
| 732 | case kAlignmentPrefix + 1: |
| 733 | case kAlignmentPrefix + 2: |
| 734 | SetAlignment(data); |
| 735 | break; |
| 736 | |
| 737 | STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots); |
| 738 | STATIC_ASSERT(kNumberOfRootArrayConstants == 32); |
| 739 | SIXTEEN_CASES(kRootArrayConstantsWithSkip) |
| 740 | SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) { |
| 741 | int skip = source_.GetInt(); |
| 742 | current = reinterpret_cast<Object**>( |
| 743 | reinterpret_cast<intptr_t>(current) + skip); |
| 744 | // Fall through. |
| 745 | } |
| 746 | |
| 747 | SIXTEEN_CASES(kRootArrayConstants) |
| 748 | SIXTEEN_CASES(kRootArrayConstants + 16) { |
| 749 | int id = data & kRootArrayConstantsMask; |
| 750 | Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); |
| 751 | Object* object = isolate->heap()->root(root_index); |
| 752 | DCHECK(!isolate->heap()->InNewSpace(object)); |
| 753 | UnalignedCopy(current++, &object); |
| 754 | break; |
| 755 | } |
| 756 | |
| 757 | STATIC_ASSERT(kNumberOfHotObjects == 8); |
| 758 | FOUR_CASES(kHotObjectWithSkip) |
| 759 | FOUR_CASES(kHotObjectWithSkip + 4) { |
| 760 | int skip = source_.GetInt(); |
| 761 | current = reinterpret_cast<Object**>( |
| 762 | reinterpret_cast<Address>(current) + skip); |
| 763 | // Fall through. |
| 764 | } |
| 765 | |
| 766 | FOUR_CASES(kHotObject) |
| 767 | FOUR_CASES(kHotObject + 4) { |
| 768 | int index = data & kHotObjectMask; |
| 769 | Object* hot_object = hot_objects_.Get(index); |
| 770 | UnalignedCopy(current, &hot_object); |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 771 | if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 772 | Address current_address = reinterpret_cast<Address>(current); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 773 | isolate->heap()->RecordWrite( |
| 774 | HeapObject::FromAddress(current_object_address), |
| 775 | static_cast<int>(current_address - current_object_address), |
| 776 | hot_object); |
| 777 | } |
| 778 | current++; |
| 779 | break; |
| 780 | } |
| 781 | |
| 782 | // Deserialize raw data of fixed length from 1 to 32 words. |
| 783 | STATIC_ASSERT(kNumberOfFixedRawData == 32); |
| 784 | SIXTEEN_CASES(kFixedRawData) |
| 785 | SIXTEEN_CASES(kFixedRawData + 16) { |
| 786 | byte* raw_data_out = reinterpret_cast<byte*>(current); |
| 787 | int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2; |
| 788 | source_.CopyRaw(raw_data_out, size_in_bytes); |
| 789 | current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes); |
| 790 | break; |
| 791 | } |
| 792 | |
| 793 | STATIC_ASSERT(kNumberOfFixedRepeat == 16); |
| 794 | SIXTEEN_CASES(kFixedRepeat) { |
| 795 | int repeats = data - kFixedRepeatStart; |
| 796 | Object* object; |
| 797 | UnalignedCopy(&object, current - 1); |
| 798 | DCHECK(!isolate->heap()->InNewSpace(object)); |
| 799 | for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object); |
| 800 | break; |
| 801 | } |
| 802 | |
| 803 | #undef SIXTEEN_CASES |
| 804 | #undef FOUR_CASES |
| 805 | #undef SINGLE_CASE |
| 806 | |
| 807 | default: |
| 808 | CHECK(false); |
| 809 | } |
| 810 | } |
| 811 | CHECK_EQ(limit, current); |
| 812 | return true; |
| 813 | } |
| 814 | } // namespace internal |
| 815 | } // namespace v8 |