blob: 88820ae6033efe63b29c13389d6e8545b2de2a63 [file] [log] [blame]
Ben Murdochda12d292016-06-02 14:46:10 +01001// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/snapshot/deserializer.h"
6
7#include "src/bootstrapper.h"
8#include "src/external-reference-table.h"
9#include "src/heap/heap.h"
10#include "src/isolate.h"
11#include "src/macro-assembler.h"
12#include "src/snapshot/natives.h"
13#include "src/v8.h"
14
15namespace v8 {
16namespace internal {
17
18void Deserializer::DecodeReservation(
19 Vector<const SerializedData::Reservation> res) {
20 DCHECK_EQ(0, reservations_[NEW_SPACE].length());
21 STATIC_ASSERT(NEW_SPACE == 0);
22 int current_space = NEW_SPACE;
23 for (auto& r : res) {
24 reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
25 if (r.is_last()) current_space++;
26 }
27 DCHECK_EQ(kNumberOfSpaces, current_space);
28 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
29}
30
31void Deserializer::FlushICacheForNewIsolate() {
32 DCHECK(!deserializing_user_code_);
33 // The entire isolate is newly deserialized. Simply flush all code pages.
34 PageIterator it(isolate_->heap()->code_space());
35 while (it.has_next()) {
36 Page* p = it.next();
37 Assembler::FlushICache(isolate_, p->area_start(),
38 p->area_end() - p->area_start());
39 }
40}
41
42void Deserializer::FlushICacheForNewCodeObjects() {
43 DCHECK(deserializing_user_code_);
44 for (Code* code : new_code_objects_) {
45 if (FLAG_serialize_age_code) code->PreAge(isolate_);
46 Assembler::FlushICache(isolate_, code->instruction_start(),
47 code->instruction_size());
48 }
49}
50
51bool Deserializer::ReserveSpace() {
52#ifdef DEBUG
53 for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
54 CHECK(reservations_[i].length() > 0);
55 }
56#endif // DEBUG
57 if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
58 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
59 high_water_[i] = reservations_[i][0].start;
60 }
61 return true;
62}
63
64void Deserializer::Initialize(Isolate* isolate) {
65 DCHECK_NULL(isolate_);
66 DCHECK_NOT_NULL(isolate);
67 isolate_ = isolate;
68 DCHECK_NULL(external_reference_table_);
69 external_reference_table_ = ExternalReferenceTable::instance(isolate);
70 CHECK_EQ(magic_number_,
71 SerializedData::ComputeMagicNumber(external_reference_table_));
72}
73
74void Deserializer::Deserialize(Isolate* isolate) {
75 Initialize(isolate);
76 if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
77 // No active threads.
78 DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
79 // No active handles.
80 DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
81 // Partial snapshot cache is not yet populated.
82 DCHECK(isolate_->partial_snapshot_cache()->is_empty());
83
84 {
85 DisallowHeapAllocation no_gc;
86 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
87 isolate_->heap()->IterateSmiRoots(this);
88 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
89 isolate_->heap()->RepairFreeListsAfterDeserialization();
90 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
91 DeserializeDeferredObjects();
92 FlushICacheForNewIsolate();
93 }
94
95 isolate_->heap()->set_native_contexts_list(
96 isolate_->heap()->undefined_value());
97 // The allocation site list is build during root iteration, but if no sites
98 // were encountered then it needs to be initialized to undefined.
99 if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
100 isolate_->heap()->set_allocation_sites_list(
101 isolate_->heap()->undefined_value());
102 }
103
104 // Update data pointers to the external strings containing natives sources.
105 Natives::UpdateSourceCache(isolate_->heap());
106 ExtraNatives::UpdateSourceCache(isolate_->heap());
107
108 // Issue code events for newly deserialized code objects.
109 LOG_CODE_EVENT(isolate_, LogCodeObjects());
110 LOG_CODE_EVENT(isolate_, LogBytecodeHandlers());
111 LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
112}
113
114MaybeHandle<Object> Deserializer::DeserializePartial(
115 Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
116 Initialize(isolate);
117 if (!ReserveSpace()) {
118 V8::FatalProcessOutOfMemory("deserialize context");
119 return MaybeHandle<Object>();
120 }
121
Ben Murdochc5610432016-08-08 18:44:38 +0100122 AddAttachedObject(global_proxy);
Ben Murdochda12d292016-06-02 14:46:10 +0100123
124 DisallowHeapAllocation no_gc;
125 // Keep track of the code space start and end pointers in case new
126 // code objects were unserialized
127 OldSpace* code_space = isolate_->heap()->code_space();
128 Address start_address = code_space->top();
129 Object* root;
130 VisitPointer(&root);
131 DeserializeDeferredObjects();
132
133 isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
134
135 // There's no code deserialized here. If this assert fires then that's
136 // changed and logging should be added to notify the profiler et al of the
137 // new code, which also has to be flushed from instruction cache.
138 CHECK_EQ(start_address, code_space->top());
139 return Handle<Object>(root, isolate);
140}
141
142MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
143 Isolate* isolate) {
144 Initialize(isolate);
145 if (!ReserveSpace()) {
146 return Handle<SharedFunctionInfo>();
147 } else {
148 deserializing_user_code_ = true;
149 HandleScope scope(isolate);
150 Handle<SharedFunctionInfo> result;
151 {
152 DisallowHeapAllocation no_gc;
153 Object* root;
154 VisitPointer(&root);
155 DeserializeDeferredObjects();
156 FlushICacheForNewCodeObjects();
157 result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
158 isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
159 }
160 CommitPostProcessedObjects(isolate);
161 return scope.CloseAndEscape(result);
162 }
163}
164
165Deserializer::~Deserializer() {
166 // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
167 // DCHECK(source_.AtEOF());
Ben Murdochda12d292016-06-02 14:46:10 +0100168}
169
170// This is called on the roots. It is the driver of the deserialization
171// process. It is also called on the body of each function.
172void Deserializer::VisitPointers(Object** start, Object** end) {
173 // The space must be new space. Any other space would cause ReadChunk to try
174 // to update the remembered using NULL as the address.
175 ReadData(start, end, NEW_SPACE, NULL);
176}
177
178void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
179 static const byte expected = kSynchronize;
180 CHECK_EQ(expected, source_.Get());
181}
182
183void Deserializer::DeserializeDeferredObjects() {
184 for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
185 switch (code) {
186 case kAlignmentPrefix:
187 case kAlignmentPrefix + 1:
188 case kAlignmentPrefix + 2:
189 SetAlignment(code);
190 break;
191 default: {
192 int space = code & kSpaceMask;
193 DCHECK(space <= kNumberOfSpaces);
194 DCHECK(code - space == kNewObject);
195 HeapObject* object = GetBackReferencedObject(space);
196 int size = source_.GetInt() << kPointerSizeLog2;
197 Address obj_address = object->address();
198 Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
199 Object** end = reinterpret_cast<Object**>(obj_address + size);
200 bool filled = ReadData(start, end, space, obj_address);
201 CHECK(filled);
202 DCHECK(CanBeDeferred(object));
203 PostProcessNewObject(object, space);
204 }
205 }
206 }
207}
208
209// Used to insert a deserialized internalized string into the string table.
210class StringTableInsertionKey : public HashTableKey {
211 public:
212 explicit StringTableInsertionKey(String* string)
213 : string_(string), hash_(HashForObject(string)) {
214 DCHECK(string->IsInternalizedString());
215 }
216
217 bool IsMatch(Object* string) override {
218 // We know that all entries in a hash table had their hash keys created.
219 // Use that knowledge to have fast failure.
220 if (hash_ != HashForObject(string)) return false;
221 // We want to compare the content of two internalized strings here.
222 return string_->SlowEquals(String::cast(string));
223 }
224
225 uint32_t Hash() override { return hash_; }
226
227 uint32_t HashForObject(Object* key) override {
228 return String::cast(key)->Hash();
229 }
230
231 MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
232 return handle(string_, isolate);
233 }
234
235 private:
236 String* string_;
237 uint32_t hash_;
238 DisallowHeapAllocation no_gc;
239};
240
241HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
242 if (deserializing_user_code()) {
243 if (obj->IsString()) {
244 String* string = String::cast(obj);
245 // Uninitialize hash field as the hash seed may have changed.
246 string->set_hash_field(String::kEmptyHashField);
247 if (string->IsInternalizedString()) {
248 // Canonicalize the internalized string. If it already exists in the
249 // string table, set it to forward to the existing one.
250 StringTableInsertionKey key(string);
251 String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
252 if (canonical == NULL) {
253 new_internalized_strings_.Add(handle(string));
254 return string;
255 } else {
256 string->SetForwardedInternalizedString(canonical);
257 return canonical;
258 }
259 }
260 } else if (obj->IsScript()) {
261 new_scripts_.Add(handle(Script::cast(obj)));
262 } else {
263 DCHECK(CanBeDeferred(obj));
264 }
265 }
266 if (obj->IsAllocationSite()) {
267 DCHECK(obj->IsAllocationSite());
268 // Allocation sites are present in the snapshot, and must be linked into
269 // a list at deserialization time.
270 AllocationSite* site = AllocationSite::cast(obj);
271 // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
272 // as a (weak) root. If this root is relocated correctly, this becomes
273 // unnecessary.
274 if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
275 site->set_weak_next(isolate_->heap()->undefined_value());
276 } else {
277 site->set_weak_next(isolate_->heap()->allocation_sites_list());
278 }
279 isolate_->heap()->set_allocation_sites_list(site);
280 } else if (obj->IsCode()) {
281 // We flush all code pages after deserializing the startup snapshot. In that
282 // case, we only need to remember code objects in the large object space.
283 // When deserializing user code, remember each individual code object.
284 if (deserializing_user_code() || space == LO_SPACE) {
285 new_code_objects_.Add(Code::cast(obj));
286 }
287 }
288 // Check alignment.
289 DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
290 return obj;
291}
292
293void Deserializer::CommitPostProcessedObjects(Isolate* isolate) {
294 StringTable::EnsureCapacityForDeserialization(
295 isolate, new_internalized_strings_.length());
296 for (Handle<String> string : new_internalized_strings_) {
297 StringTableInsertionKey key(*string);
298 DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key));
299 StringTable::LookupKey(isolate, &key);
300 }
301
302 Heap* heap = isolate->heap();
303 Factory* factory = isolate->factory();
304 for (Handle<Script> script : new_scripts_) {
305 // Assign a new script id to avoid collision.
306 script->set_id(isolate_->heap()->NextScriptId());
307 // Add script to list.
308 Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script);
309 heap->SetRootScriptList(*list);
310 }
311}
312
313HeapObject* Deserializer::GetBackReferencedObject(int space) {
314 HeapObject* obj;
Ben Murdochc5610432016-08-08 18:44:38 +0100315 SerializerReference back_reference =
316 SerializerReference::FromBitfield(source_.GetInt());
Ben Murdochda12d292016-06-02 14:46:10 +0100317 if (space == LO_SPACE) {
318 CHECK(back_reference.chunk_index() == 0);
319 uint32_t index = back_reference.large_object_index();
320 obj = deserialized_large_objects_[index];
321 } else {
322 DCHECK(space < kNumberOfPreallocatedSpaces);
323 uint32_t chunk_index = back_reference.chunk_index();
324 DCHECK_LE(chunk_index, current_chunk_[space]);
325 uint32_t chunk_offset = back_reference.chunk_offset();
326 Address address = reservations_[space][chunk_index].start + chunk_offset;
327 if (next_alignment_ != kWordAligned) {
328 int padding = Heap::GetFillToAlign(address, next_alignment_);
329 next_alignment_ = kWordAligned;
330 DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
331 address += padding;
332 }
333 obj = HeapObject::FromAddress(address);
334 }
335 if (deserializing_user_code() && obj->IsInternalizedString()) {
336 obj = String::cast(obj)->GetForwardedInternalizedString();
337 }
338 hot_objects_.Add(obj);
339 return obj;
340}
341
342// This routine writes the new object into the pointer provided and then
343// returns true if the new object was in young space and false otherwise.
344// The reason for this strange interface is that otherwise the object is
345// written very late, which means the FreeSpace map is not set up by the
346// time we need to use it to mark the space at the end of a page free.
347void Deserializer::ReadObject(int space_number, Object** write_back) {
348 Address address;
349 HeapObject* obj;
350 int size = source_.GetInt() << kObjectAlignmentBits;
351
352 if (next_alignment_ != kWordAligned) {
353 int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
354 address = Allocate(space_number, reserved);
355 obj = HeapObject::FromAddress(address);
356 // If one of the following assertions fails, then we are deserializing an
357 // aligned object when the filler maps have not been deserialized yet.
358 // We require filler maps as padding to align the object.
359 Heap* heap = isolate_->heap();
360 DCHECK(heap->free_space_map()->IsMap());
361 DCHECK(heap->one_pointer_filler_map()->IsMap());
362 DCHECK(heap->two_pointer_filler_map()->IsMap());
363 obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
364 address = obj->address();
365 next_alignment_ = kWordAligned;
366 } else {
367 address = Allocate(space_number, size);
368 obj = HeapObject::FromAddress(address);
369 }
370
371 isolate_->heap()->OnAllocationEvent(obj, size);
372 Object** current = reinterpret_cast<Object**>(address);
373 Object** limit = current + (size >> kPointerSizeLog2);
374
375 if (ReadData(current, limit, space_number, address)) {
376 // Only post process if object content has not been deferred.
377 obj = PostProcessNewObject(obj, space_number);
378 }
379
380 Object* write_back_obj = obj;
381 UnalignedCopy(write_back, &write_back_obj);
382#ifdef DEBUG
383 if (obj->IsCode()) {
384 DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
385 } else {
386 DCHECK(space_number != CODE_SPACE);
387 }
388#endif // DEBUG
389}
390
391// We know the space requirements before deserialization and can
392// pre-allocate that reserved space. During deserialization, all we need
393// to do is to bump up the pointer for each space in the reserved
394// space. This is also used for fixing back references.
395// We may have to split up the pre-allocation into several chunks
396// because it would not fit onto a single page. We do not have to keep
397// track of when to move to the next chunk. An opcode will signal this.
398// Since multiple large objects cannot be folded into one large object
399// space allocation, we have to do an actual allocation when deserializing
400// each large object. Instead of tracking offset for back references, we
401// reference large objects by index.
402Address Deserializer::Allocate(int space_index, int size) {
403 if (space_index == LO_SPACE) {
404 AlwaysAllocateScope scope(isolate_);
405 LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
406 Executability exec = static_cast<Executability>(source_.Get());
407 AllocationResult result = lo_space->AllocateRaw(size, exec);
408 HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
409 deserialized_large_objects_.Add(obj);
410 return obj->address();
411 } else {
412 DCHECK(space_index < kNumberOfPreallocatedSpaces);
413 Address address = high_water_[space_index];
414 DCHECK_NOT_NULL(address);
415 high_water_[space_index] += size;
416#ifdef DEBUG
417 // Assert that the current reserved chunk is still big enough.
418 const Heap::Reservation& reservation = reservations_[space_index];
419 int chunk_index = current_chunk_[space_index];
420 CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
421#endif
422 if (space_index == CODE_SPACE) SkipList::Update(address, size);
423 return address;
424 }
425}
426
427Object** Deserializer::CopyInNativesSource(Vector<const char> source_vector,
428 Object** current) {
429 DCHECK(!isolate_->heap()->deserialization_complete());
430 NativesExternalStringResource* resource = new NativesExternalStringResource(
431 source_vector.start(), source_vector.length());
432 Object* resource_obj = reinterpret_cast<Object*>(resource);
433 UnalignedCopy(current++, &resource_obj);
434 return current;
435}
436
437bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
438 Address current_object_address) {
439 Isolate* const isolate = isolate_;
440 // Write barrier support costs around 1% in startup time. In fact there
441 // are no new space objects in current boot snapshots, so it's not needed,
442 // but that may change.
443 bool write_barrier_needed =
444 (current_object_address != NULL && source_space != NEW_SPACE &&
445 source_space != CODE_SPACE);
446 while (current < limit) {
447 byte data = source_.Get();
448 switch (data) {
449#define CASE_STATEMENT(where, how, within, space_number) \
450 case where + how + within + space_number: \
451 STATIC_ASSERT((where & ~kWhereMask) == 0); \
452 STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \
453 STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \
454 STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
455
456#define CASE_BODY(where, how, within, space_number_if_any) \
457 { \
458 bool emit_write_barrier = false; \
459 bool current_was_incremented = false; \
460 int space_number = space_number_if_any == kAnyOldSpace \
461 ? (data & kSpaceMask) \
462 : space_number_if_any; \
463 if (where == kNewObject && how == kPlain && within == kStartOfObject) { \
464 ReadObject(space_number, current); \
465 emit_write_barrier = (space_number == NEW_SPACE); \
466 } else { \
467 Object* new_object = NULL; /* May not be a real Object pointer. */ \
468 if (where == kNewObject) { \
469 ReadObject(space_number, &new_object); \
470 } else if (where == kBackref) { \
471 emit_write_barrier = (space_number == NEW_SPACE); \
472 new_object = GetBackReferencedObject(data & kSpaceMask); \
473 } else if (where == kBackrefWithSkip) { \
474 int skip = source_.GetInt(); \
475 current = reinterpret_cast<Object**>( \
476 reinterpret_cast<Address>(current) + skip); \
477 emit_write_barrier = (space_number == NEW_SPACE); \
478 new_object = GetBackReferencedObject(data & kSpaceMask); \
479 } else if (where == kRootArray) { \
480 int id = source_.GetInt(); \
481 Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
482 new_object = isolate->heap()->root(root_index); \
483 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
484 } else if (where == kPartialSnapshotCache) { \
485 int cache_index = source_.GetInt(); \
486 new_object = isolate->partial_snapshot_cache()->at(cache_index); \
487 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
488 } else if (where == kExternalReference) { \
489 int skip = source_.GetInt(); \
490 current = reinterpret_cast<Object**>( \
491 reinterpret_cast<Address>(current) + skip); \
492 int reference_id = source_.GetInt(); \
493 Address address = external_reference_table_->address(reference_id); \
494 new_object = reinterpret_cast<Object*>(address); \
495 } else if (where == kAttachedReference) { \
496 int index = source_.GetInt(); \
Ben Murdochda12d292016-06-02 14:46:10 +0100497 new_object = *attached_objects_[index]; \
498 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
499 } else { \
500 DCHECK(where == kBuiltin); \
501 DCHECK(deserializing_user_code()); \
502 int builtin_id = source_.GetInt(); \
503 DCHECK_LE(0, builtin_id); \
504 DCHECK_LT(builtin_id, Builtins::builtin_count); \
505 Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \
506 new_object = isolate->builtins()->builtin(name); \
507 emit_write_barrier = false; \
508 } \
509 if (within == kInnerPointer) { \
510 if (space_number != CODE_SPACE || new_object->IsCode()) { \
511 Code* new_code_object = reinterpret_cast<Code*>(new_object); \
512 new_object = \
513 reinterpret_cast<Object*>(new_code_object->instruction_start()); \
514 } else { \
515 DCHECK(space_number == CODE_SPACE); \
516 Cell* cell = Cell::cast(new_object); \
517 new_object = reinterpret_cast<Object*>(cell->ValueAddress()); \
518 } \
519 } \
520 if (how == kFromCode) { \
521 Address location_of_branch_data = reinterpret_cast<Address>(current); \
522 Assembler::deserialization_set_special_target_at( \
523 isolate, location_of_branch_data, \
524 Code::cast(HeapObject::FromAddress(current_object_address)), \
525 reinterpret_cast<Address>(new_object)); \
526 location_of_branch_data += Assembler::kSpecialTargetSize; \
527 current = reinterpret_cast<Object**>(location_of_branch_data); \
528 current_was_incremented = true; \
529 } else { \
530 UnalignedCopy(current, &new_object); \
531 } \
532 } \
533 if (emit_write_barrier && write_barrier_needed) { \
534 Address current_address = reinterpret_cast<Address>(current); \
535 SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address)); \
536 isolate->heap()->RecordWrite( \
537 HeapObject::FromAddress(current_object_address), \
538 static_cast<int>(current_address - current_object_address), \
539 *reinterpret_cast<Object**>(current_address)); \
540 } \
541 if (!current_was_incremented) { \
542 current++; \
543 } \
544 break; \
545 }
546
547// This generates a case and a body for the new space (which has to do extra
548// write barrier handling) and handles the other spaces with fall-through cases
549// and one body.
550#define ALL_SPACES(where, how, within) \
551 CASE_STATEMENT(where, how, within, NEW_SPACE) \
552 CASE_BODY(where, how, within, NEW_SPACE) \
553 CASE_STATEMENT(where, how, within, OLD_SPACE) \
554 CASE_STATEMENT(where, how, within, CODE_SPACE) \
555 CASE_STATEMENT(where, how, within, MAP_SPACE) \
556 CASE_STATEMENT(where, how, within, LO_SPACE) \
557 CASE_BODY(where, how, within, kAnyOldSpace)
558
559#define FOUR_CASES(byte_code) \
560 case byte_code: \
561 case byte_code + 1: \
562 case byte_code + 2: \
563 case byte_code + 3:
564
565#define SIXTEEN_CASES(byte_code) \
566 FOUR_CASES(byte_code) \
567 FOUR_CASES(byte_code + 4) \
568 FOUR_CASES(byte_code + 8) \
569 FOUR_CASES(byte_code + 12)
570
571#define SINGLE_CASE(where, how, within, space) \
572 CASE_STATEMENT(where, how, within, space) \
573 CASE_BODY(where, how, within, space)
574
575 // Deserialize a new object and write a pointer to it to the current
576 // object.
577 ALL_SPACES(kNewObject, kPlain, kStartOfObject)
578 // Support for direct instruction pointers in functions. It's an inner
579 // pointer because it points at the entry point, not at the start of the
580 // code object.
581 SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
582 // Deserialize a new code object and write a pointer to its first
583 // instruction to the current code object.
584 ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
585 // Find a recently deserialized object using its offset from the current
586 // allocation point and write a pointer to it to the current object.
587 ALL_SPACES(kBackref, kPlain, kStartOfObject)
588 ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
589#if V8_CODE_EMBEDS_OBJECT_POINTER
590 // Deserialize a new object from pointer found in code and write
591 // a pointer to it to the current object. Required only for MIPS, PPC, ARM
592 // or S390 with embedded constant pool, and omitted on the other
593 // architectures because it is fully unrolled and would cause bloat.
594 ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
595 // Find a recently deserialized code object using its offset from the
596 // current allocation point and write a pointer to it to the current
597 // object. Required only for MIPS, PPC, ARM or S390 with embedded
598 // constant pool.
599 ALL_SPACES(kBackref, kFromCode, kStartOfObject)
600 ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
601#endif
602 // Find a recently deserialized code object using its offset from the
603 // current allocation point and write a pointer to its first instruction
604 // to the current code object or the instruction pointer in a function
605 // object.
606 ALL_SPACES(kBackref, kFromCode, kInnerPointer)
607 ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
608 ALL_SPACES(kBackref, kPlain, kInnerPointer)
609 ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
610 // Find an object in the roots array and write a pointer to it to the
611 // current object.
612 SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
613#if V8_CODE_EMBEDS_OBJECT_POINTER
614 // Find an object in the roots array and write a pointer to it to in code.
615 SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0)
616#endif
617 // Find an object in the partial snapshots cache and write a pointer to it
618 // to the current object.
619 SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
620 // Find an code entry in the partial snapshots cache and
621 // write a pointer to it to the current object.
622 SINGLE_CASE(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
623 // Find an external reference and write a pointer to it to the current
624 // object.
625 SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0)
626 // Find an external reference and write a pointer to it in the current
627 // code object.
628 SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0)
629 // Find an object in the attached references and write a pointer to it to
630 // the current object.
631 SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
632 SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0)
633 SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
634 // Find a builtin and write a pointer to it to the current object.
635 SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
636 SINGLE_CASE(kBuiltin, kPlain, kInnerPointer, 0)
637 SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0)
638
639#undef CASE_STATEMENT
640#undef CASE_BODY
641#undef ALL_SPACES
642
643 case kSkip: {
644 int size = source_.GetInt();
645 current = reinterpret_cast<Object**>(
646 reinterpret_cast<intptr_t>(current) + size);
647 break;
648 }
649
650 case kInternalReferenceEncoded:
651 case kInternalReference: {
652 // Internal reference address is not encoded via skip, but by offset
653 // from code entry.
654 int pc_offset = source_.GetInt();
655 int target_offset = source_.GetInt();
656 Code* code =
657 Code::cast(HeapObject::FromAddress(current_object_address));
658 DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size());
659 DCHECK(0 <= target_offset && target_offset <= code->instruction_size());
660 Address pc = code->entry() + pc_offset;
661 Address target = code->entry() + target_offset;
662 Assembler::deserialization_set_target_internal_reference_at(
663 isolate, pc, target, data == kInternalReference
664 ? RelocInfo::INTERNAL_REFERENCE
665 : RelocInfo::INTERNAL_REFERENCE_ENCODED);
666 break;
667 }
668
669 case kNop:
670 break;
671
672 case kNextChunk: {
673 int space = source_.Get();
674 DCHECK(space < kNumberOfPreallocatedSpaces);
675 int chunk_index = current_chunk_[space];
676 const Heap::Reservation& reservation = reservations_[space];
677 // Make sure the current chunk is indeed exhausted.
678 CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
679 // Move to next reserved chunk.
680 chunk_index = ++current_chunk_[space];
681 CHECK_LT(chunk_index, reservation.length());
682 high_water_[space] = reservation[chunk_index].start;
683 break;
684 }
685
686 case kDeferred: {
687 // Deferred can only occur right after the heap object header.
688 DCHECK(current == reinterpret_cast<Object**>(current_object_address +
689 kPointerSize));
690 HeapObject* obj = HeapObject::FromAddress(current_object_address);
691 // If the deferred object is a map, its instance type may be used
692 // during deserialization. Initialize it with a temporary value.
693 if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
694 current = limit;
695 return false;
696 }
697
698 case kSynchronize:
699 // If we get here then that indicates that you have a mismatch between
700 // the number of GC roots when serializing and deserializing.
701 CHECK(false);
702 break;
703
704 case kNativesStringResource:
705 current = CopyInNativesSource(Natives::GetScriptSource(source_.Get()),
706 current);
707 break;
708
709 case kExtraNativesStringResource:
710 current = CopyInNativesSource(
711 ExtraNatives::GetScriptSource(source_.Get()), current);
712 break;
713
714 // Deserialize raw data of variable length.
715 case kVariableRawData: {
716 int size_in_bytes = source_.GetInt();
717 byte* raw_data_out = reinterpret_cast<byte*>(current);
718 source_.CopyRaw(raw_data_out, size_in_bytes);
719 break;
720 }
721
722 case kVariableRepeat: {
723 int repeats = source_.GetInt();
724 Object* object = current[-1];
725 DCHECK(!isolate->heap()->InNewSpace(object));
726 for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
727 break;
728 }
729
730 case kAlignmentPrefix:
731 case kAlignmentPrefix + 1:
732 case kAlignmentPrefix + 2:
733 SetAlignment(data);
734 break;
735
736 STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
737 STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
738 SIXTEEN_CASES(kRootArrayConstantsWithSkip)
739 SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
740 int skip = source_.GetInt();
741 current = reinterpret_cast<Object**>(
742 reinterpret_cast<intptr_t>(current) + skip);
743 // Fall through.
744 }
745
746 SIXTEEN_CASES(kRootArrayConstants)
747 SIXTEEN_CASES(kRootArrayConstants + 16) {
748 int id = data & kRootArrayConstantsMask;
749 Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
750 Object* object = isolate->heap()->root(root_index);
751 DCHECK(!isolate->heap()->InNewSpace(object));
752 UnalignedCopy(current++, &object);
753 break;
754 }
755
756 STATIC_ASSERT(kNumberOfHotObjects == 8);
757 FOUR_CASES(kHotObjectWithSkip)
758 FOUR_CASES(kHotObjectWithSkip + 4) {
759 int skip = source_.GetInt();
760 current = reinterpret_cast<Object**>(
761 reinterpret_cast<Address>(current) + skip);
762 // Fall through.
763 }
764
765 FOUR_CASES(kHotObject)
766 FOUR_CASES(kHotObject + 4) {
767 int index = data & kHotObjectMask;
768 Object* hot_object = hot_objects_.Get(index);
769 UnalignedCopy(current, &hot_object);
770 if (write_barrier_needed) {
771 Address current_address = reinterpret_cast<Address>(current);
772 SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
773 isolate->heap()->RecordWrite(
774 HeapObject::FromAddress(current_object_address),
775 static_cast<int>(current_address - current_object_address),
776 hot_object);
777 }
778 current++;
779 break;
780 }
781
782 // Deserialize raw data of fixed length from 1 to 32 words.
783 STATIC_ASSERT(kNumberOfFixedRawData == 32);
784 SIXTEEN_CASES(kFixedRawData)
785 SIXTEEN_CASES(kFixedRawData + 16) {
786 byte* raw_data_out = reinterpret_cast<byte*>(current);
787 int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
788 source_.CopyRaw(raw_data_out, size_in_bytes);
789 current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes);
790 break;
791 }
792
793 STATIC_ASSERT(kNumberOfFixedRepeat == 16);
794 SIXTEEN_CASES(kFixedRepeat) {
795 int repeats = data - kFixedRepeatStart;
796 Object* object;
797 UnalignedCopy(&object, current - 1);
798 DCHECK(!isolate->heap()->InNewSpace(object));
799 for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
800 break;
801 }
802
803#undef SIXTEEN_CASES
804#undef FOUR_CASES
805#undef SINGLE_CASE
806
807 default:
808 CHECK(false);
809 }
810 }
811 CHECK_EQ(limit, current);
812 return true;
813}
814} // namespace internal
815} // namespace v8