Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1 | // Copyright 2006-2008 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | // The common functionality when building with or without snapshots. |
| 6 | |
| 7 | #include "src/snapshot/snapshot.h" |
| 8 | |
| 9 | #include "src/api.h" |
| 10 | #include "src/base/platform/platform.h" |
| 11 | #include "src/full-codegen/full-codegen.h" |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame^] | 12 | #include "src/snapshot/deserializer.h" |
| 13 | #include "src/snapshot/snapshot-source-sink.h" |
| 14 | #include "src/version.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 15 | |
| 16 | namespace v8 { |
| 17 | namespace internal { |
| 18 | |
| 19 | #ifdef DEBUG |
| 20 | bool Snapshot::SnapshotIsValid(v8::StartupData* snapshot_blob) { |
| 21 | return !Snapshot::ExtractStartupData(snapshot_blob).is_empty() && |
| 22 | !Snapshot::ExtractContextData(snapshot_blob).is_empty(); |
| 23 | } |
| 24 | #endif // DEBUG |
| 25 | |
| 26 | |
| 27 | bool Snapshot::HaveASnapshotToStartFrom(Isolate* isolate) { |
| 28 | // Do not use snapshots if the isolate is used to create snapshots. |
| 29 | return isolate->snapshot_blob() != NULL && |
| 30 | isolate->snapshot_blob()->data != NULL; |
| 31 | } |
| 32 | |
| 33 | |
| 34 | bool Snapshot::EmbedsScript(Isolate* isolate) { |
| 35 | if (!isolate->snapshot_available()) return false; |
| 36 | return ExtractMetadata(isolate->snapshot_blob()).embeds_script(); |
| 37 | } |
| 38 | |
| 39 | |
| 40 | uint32_t Snapshot::SizeOfFirstPage(Isolate* isolate, AllocationSpace space) { |
| 41 | DCHECK(space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE); |
| 42 | if (!isolate->snapshot_available()) { |
| 43 | return static_cast<uint32_t>(MemoryAllocator::PageAreaSize(space)); |
| 44 | } |
| 45 | uint32_t size; |
| 46 | int offset = kFirstPageSizesOffset + (space - FIRST_PAGED_SPACE) * kInt32Size; |
| 47 | memcpy(&size, isolate->snapshot_blob()->data + offset, kInt32Size); |
| 48 | return size; |
| 49 | } |
| 50 | |
| 51 | |
| 52 | bool Snapshot::Initialize(Isolate* isolate) { |
| 53 | if (!isolate->snapshot_available()) return false; |
| 54 | base::ElapsedTimer timer; |
| 55 | if (FLAG_profile_deserialization) timer.Start(); |
| 56 | |
| 57 | const v8::StartupData* blob = isolate->snapshot_blob(); |
| 58 | Vector<const byte> startup_data = ExtractStartupData(blob); |
| 59 | SnapshotData snapshot_data(startup_data); |
| 60 | Deserializer deserializer(&snapshot_data); |
| 61 | bool success = isolate->Init(&deserializer); |
| 62 | if (FLAG_profile_deserialization) { |
| 63 | double ms = timer.Elapsed().InMillisecondsF(); |
| 64 | int bytes = startup_data.length(); |
| 65 | PrintF("[Deserializing isolate (%d bytes) took %0.3f ms]\n", bytes, ms); |
| 66 | } |
| 67 | return success; |
| 68 | } |
| 69 | |
| 70 | |
| 71 | MaybeHandle<Context> Snapshot::NewContextFromSnapshot( |
| 72 | Isolate* isolate, Handle<JSGlobalProxy> global_proxy) { |
| 73 | if (!isolate->snapshot_available()) return Handle<Context>(); |
| 74 | base::ElapsedTimer timer; |
| 75 | if (FLAG_profile_deserialization) timer.Start(); |
| 76 | |
| 77 | const v8::StartupData* blob = isolate->snapshot_blob(); |
| 78 | Vector<const byte> context_data = ExtractContextData(blob); |
| 79 | SnapshotData snapshot_data(context_data); |
| 80 | Deserializer deserializer(&snapshot_data); |
| 81 | |
| 82 | MaybeHandle<Object> maybe_context = |
| 83 | deserializer.DeserializePartial(isolate, global_proxy); |
| 84 | Handle<Object> result; |
| 85 | if (!maybe_context.ToHandle(&result)) return MaybeHandle<Context>(); |
| 86 | CHECK(result->IsContext()); |
| 87 | if (FLAG_profile_deserialization) { |
| 88 | double ms = timer.Elapsed().InMillisecondsF(); |
| 89 | int bytes = context_data.length(); |
| 90 | PrintF("[Deserializing context (%d bytes) took %0.3f ms]\n", bytes, ms); |
| 91 | } |
| 92 | return Handle<Context>::cast(result); |
| 93 | } |
| 94 | |
| 95 | |
| 96 | void CalculateFirstPageSizes(bool is_default_snapshot, |
| 97 | const SnapshotData& startup_snapshot, |
| 98 | const SnapshotData& context_snapshot, |
| 99 | uint32_t* sizes_out) { |
| 100 | Vector<const SerializedData::Reservation> startup_reservations = |
| 101 | startup_snapshot.Reservations(); |
| 102 | Vector<const SerializedData::Reservation> context_reservations = |
| 103 | context_snapshot.Reservations(); |
| 104 | int startup_index = 0; |
| 105 | int context_index = 0; |
| 106 | |
| 107 | if (FLAG_profile_deserialization) { |
| 108 | int startup_total = 0; |
| 109 | int context_total = 0; |
| 110 | for (auto& reservation : startup_reservations) { |
| 111 | startup_total += reservation.chunk_size(); |
| 112 | } |
| 113 | for (auto& reservation : context_reservations) { |
| 114 | context_total += reservation.chunk_size(); |
| 115 | } |
| 116 | PrintF( |
| 117 | "Deserialization will reserve:\n" |
| 118 | "%10d bytes per isolate\n" |
| 119 | "%10d bytes per context\n", |
| 120 | startup_total, context_total); |
| 121 | } |
| 122 | |
| 123 | for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) { |
| 124 | bool single_chunk = true; |
| 125 | while (!startup_reservations[startup_index].is_last()) { |
| 126 | single_chunk = false; |
| 127 | startup_index++; |
| 128 | } |
| 129 | while (!context_reservations[context_index].is_last()) { |
| 130 | single_chunk = false; |
| 131 | context_index++; |
| 132 | } |
| 133 | |
| 134 | uint32_t required = kMaxUInt32; |
| 135 | if (single_chunk) { |
| 136 | // If both the startup snapshot data and the context snapshot data on |
| 137 | // this space fit in a single page, then we consider limiting the size |
| 138 | // of the first page. For this, we add the chunk sizes and some extra |
| 139 | // allowance. This way we achieve a smaller startup memory footprint. |
| 140 | required = (startup_reservations[startup_index].chunk_size() + |
| 141 | 2 * context_reservations[context_index].chunk_size()) + |
| 142 | Page::kObjectStartOffset; |
| 143 | // Add a small allowance to the code space for small scripts. |
| 144 | if (space == CODE_SPACE) required += 32 * KB; |
| 145 | } else { |
| 146 | // We expect the vanilla snapshot to only require on page per space. |
| 147 | DCHECK(!is_default_snapshot); |
| 148 | } |
| 149 | |
| 150 | if (space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE) { |
| 151 | uint32_t max_size = |
| 152 | MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space)); |
| 153 | sizes_out[space - FIRST_PAGED_SPACE] = Min(required, max_size); |
| 154 | } else { |
| 155 | DCHECK(single_chunk); |
| 156 | } |
| 157 | startup_index++; |
| 158 | context_index++; |
| 159 | } |
| 160 | |
| 161 | DCHECK_EQ(startup_reservations.length(), startup_index); |
| 162 | DCHECK_EQ(context_reservations.length(), context_index); |
| 163 | } |
| 164 | |
| 165 | |
| 166 | v8::StartupData Snapshot::CreateSnapshotBlob( |
| 167 | const i::StartupSerializer& startup_ser, |
| 168 | const i::PartialSerializer& context_ser, Snapshot::Metadata metadata) { |
| 169 | SnapshotData startup_snapshot(startup_ser); |
| 170 | SnapshotData context_snapshot(context_ser); |
| 171 | Vector<const byte> startup_data = startup_snapshot.RawData(); |
| 172 | Vector<const byte> context_data = context_snapshot.RawData(); |
| 173 | |
| 174 | uint32_t first_page_sizes[kNumPagedSpaces]; |
| 175 | |
| 176 | CalculateFirstPageSizes(!metadata.embeds_script(), startup_snapshot, |
| 177 | context_snapshot, first_page_sizes); |
| 178 | |
| 179 | int startup_length = startup_data.length(); |
| 180 | int context_length = context_data.length(); |
| 181 | int context_offset = ContextOffset(startup_length); |
| 182 | |
| 183 | int length = context_offset + context_length; |
| 184 | char* data = new char[length]; |
| 185 | |
| 186 | memcpy(data + kMetadataOffset, &metadata.RawValue(), kInt32Size); |
| 187 | memcpy(data + kFirstPageSizesOffset, first_page_sizes, |
| 188 | kNumPagedSpaces * kInt32Size); |
| 189 | memcpy(data + kStartupLengthOffset, &startup_length, kInt32Size); |
| 190 | memcpy(data + kStartupDataOffset, startup_data.begin(), startup_length); |
| 191 | memcpy(data + context_offset, context_data.begin(), context_length); |
| 192 | v8::StartupData result = {data, length}; |
| 193 | |
| 194 | if (FLAG_profile_deserialization) { |
| 195 | PrintF( |
| 196 | "Snapshot blob consists of:\n" |
| 197 | "%10d bytes for startup\n" |
| 198 | "%10d bytes for context\n", |
| 199 | startup_length, context_length); |
| 200 | } |
| 201 | return result; |
| 202 | } |
| 203 | |
| 204 | |
| 205 | Snapshot::Metadata Snapshot::ExtractMetadata(const v8::StartupData* data) { |
| 206 | uint32_t raw; |
| 207 | memcpy(&raw, data->data + kMetadataOffset, kInt32Size); |
| 208 | return Metadata(raw); |
| 209 | } |
| 210 | |
| 211 | |
| 212 | Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) { |
| 213 | DCHECK_LT(kIntSize, data->raw_size); |
| 214 | int startup_length; |
| 215 | memcpy(&startup_length, data->data + kStartupLengthOffset, kInt32Size); |
| 216 | DCHECK_LT(startup_length, data->raw_size); |
| 217 | const byte* startup_data = |
| 218 | reinterpret_cast<const byte*>(data->data + kStartupDataOffset); |
| 219 | return Vector<const byte>(startup_data, startup_length); |
| 220 | } |
| 221 | |
| 222 | |
| 223 | Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data) { |
| 224 | DCHECK_LT(kIntSize, data->raw_size); |
| 225 | int startup_length; |
| 226 | memcpy(&startup_length, data->data + kStartupLengthOffset, kIntSize); |
| 227 | int context_offset = ContextOffset(startup_length); |
| 228 | const byte* context_data = |
| 229 | reinterpret_cast<const byte*>(data->data + context_offset); |
| 230 | DCHECK_LT(context_offset, data->raw_size); |
| 231 | int context_length = data->raw_size - context_offset; |
| 232 | return Vector<const byte>(context_data, context_length); |
| 233 | } |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame^] | 234 | |
| 235 | SnapshotData::SnapshotData(const Serializer& ser) { |
| 236 | DisallowHeapAllocation no_gc; |
| 237 | List<Reservation> reservations; |
| 238 | ser.EncodeReservations(&reservations); |
| 239 | const List<byte>& payload = ser.sink()->data(); |
| 240 | |
| 241 | // Calculate sizes. |
| 242 | int reservation_size = reservations.length() * kInt32Size; |
| 243 | int size = kHeaderSize + reservation_size + payload.length(); |
| 244 | |
| 245 | // Allocate backing store and create result data. |
| 246 | AllocateData(size); |
| 247 | |
| 248 | // Set header values. |
| 249 | SetMagicNumber(ser.isolate()); |
| 250 | SetHeaderValue(kCheckSumOffset, Version::Hash()); |
| 251 | SetHeaderValue(kNumReservationsOffset, reservations.length()); |
| 252 | SetHeaderValue(kPayloadLengthOffset, payload.length()); |
| 253 | |
| 254 | // Copy reservation chunk sizes. |
| 255 | CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()), |
| 256 | reservation_size); |
| 257 | |
| 258 | // Copy serialized data. |
| 259 | CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(), |
| 260 | static_cast<size_t>(payload.length())); |
| 261 | } |
| 262 | |
| 263 | bool SnapshotData::IsSane() { |
| 264 | return GetHeaderValue(kCheckSumOffset) == Version::Hash(); |
| 265 | } |
| 266 | |
| 267 | Vector<const SerializedData::Reservation> SnapshotData::Reservations() const { |
| 268 | return Vector<const Reservation>( |
| 269 | reinterpret_cast<const Reservation*>(data_ + kHeaderSize), |
| 270 | GetHeaderValue(kNumReservationsOffset)); |
| 271 | } |
| 272 | |
| 273 | Vector<const byte> SnapshotData::Payload() const { |
| 274 | int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size; |
| 275 | const byte* payload = data_ + kHeaderSize + reservations_size; |
| 276 | int length = GetHeaderValue(kPayloadLengthOffset); |
| 277 | DCHECK_EQ(data_ + size_, payload + length); |
| 278 | return Vector<const byte>(payload, length); |
| 279 | } |
| 280 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 281 | } // namespace internal |
| 282 | } // namespace v8 |