blob: 9ffc4149ab57309e259f2b3171c2b001b35750fd [file] [log] [blame]
Igor Murashkin37743352014-11-13 14:38:00 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <stdio.h>
18#include <stdlib.h>
19
20#include <fstream>
Andreas Gampe7ad71d02016-04-04 13:49:18 -070021#include <functional>
Igor Murashkin37743352014-11-13 14:38:00 -080022#include <iostream>
Igor Murashkin37743352014-11-13 14:38:00 -080023#include <map>
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070024#include <set>
25#include <string>
Mathieu Chartiercb044bc2016-04-01 13:56:41 -070026#include <unordered_set>
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070027#include <vector>
Igor Murashkin37743352014-11-13 14:38:00 -080028
Andreas Gampe46ee31b2016-12-14 10:11:49 -080029#include "android-base/stringprintf.h"
30
Andreas Gampea1d2f952017-04-20 22:53:58 -070031#include "art_field-inl.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070032#include "art_method-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080033#include "base/unix_file/fd_file.h"
Igor Murashkin37743352014-11-13 14:38:00 -080034#include "gc/heap.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070035#include "gc/space/image_space.h"
36#include "image.h"
Igor Murashkin37743352014-11-13 14:38:00 -080037#include "mirror/class-inl.h"
38#include "mirror/object-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080039#include "os.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070040#include "scoped_thread_state_change-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080041
Igor Murashkin37743352014-11-13 14:38:00 -080042#include "backtrace/BacktraceMap.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070043#include "cmdline.h"
Igor Murashkin37743352014-11-13 14:38:00 -080044
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070045#include <signal.h>
Igor Murashkin37743352014-11-13 14:38:00 -080046#include <sys/stat.h>
47#include <sys/types.h>
Igor Murashkin37743352014-11-13 14:38:00 -080048
49namespace art {
50
Andreas Gampe46ee31b2016-12-14 10:11:49 -080051using android::base::StringPrintf;
52
David Sehrb4005f02017-06-20 19:11:40 -070053namespace {
54
55constexpr size_t kMaxAddressPrint = 5;
56
57enum class ProcessType {
58 kZygote,
59 kRemote
60};
61
62enum class RemoteProcesses {
63 kImageOnly,
64 kZygoteOnly,
65 kImageAndZygote
66};
67
68struct MappingData {
69 // The count of pages that are considered dirty by the OS.
70 size_t dirty_pages = 0;
71 // The count of pages that differ by at least one byte.
72 size_t different_pages = 0;
73 // The count of differing bytes.
74 size_t different_bytes = 0;
75 // The count of differing four-byte units.
76 size_t different_int32s = 0;
77 // The count of pages that have mapping count == 1.
78 size_t private_pages = 0;
79 // The count of private pages that are also dirty.
80 size_t private_dirty_pages = 0;
81 // The count of pages that are marked dirty but do not differ.
82 size_t false_dirty_pages = 0;
83 // Set of the local virtual page indices that are dirty.
84 std::set<size_t> dirty_page_set;
85};
86
87static std::string GetClassDescriptor(mirror::Class* klass)
88 REQUIRES_SHARED(Locks::mutator_lock_) {
89 CHECK(klass != nullptr);
90
91 std::string descriptor;
92 const char* descriptor_str = klass->GetDescriptor(&descriptor /*out*/);
93
94 return std::string(descriptor_str);
95}
96
97static std::string PrettyFieldValue(ArtField* field, mirror::Object* object)
98 REQUIRES_SHARED(Locks::mutator_lock_) {
99 std::ostringstream oss;
100 switch (field->GetTypeAsPrimitiveType()) {
101 case Primitive::kPrimNot: {
102 oss << object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
103 field->GetOffset());
104 break;
105 }
106 case Primitive::kPrimBoolean: {
107 oss << static_cast<bool>(object->GetFieldBoolean<kVerifyNone>(field->GetOffset()));
108 break;
109 }
110 case Primitive::kPrimByte: {
111 oss << static_cast<int32_t>(object->GetFieldByte<kVerifyNone>(field->GetOffset()));
112 break;
113 }
114 case Primitive::kPrimChar: {
115 oss << object->GetFieldChar<kVerifyNone>(field->GetOffset());
116 break;
117 }
118 case Primitive::kPrimShort: {
119 oss << object->GetFieldShort<kVerifyNone>(field->GetOffset());
120 break;
121 }
122 case Primitive::kPrimInt: {
123 oss << object->GetField32<kVerifyNone>(field->GetOffset());
124 break;
125 }
126 case Primitive::kPrimLong: {
127 oss << object->GetField64<kVerifyNone>(field->GetOffset());
128 break;
129 }
130 case Primitive::kPrimFloat: {
131 oss << object->GetField32<kVerifyNone>(field->GetOffset());
132 break;
133 }
134 case Primitive::kPrimDouble: {
135 oss << object->GetField64<kVerifyNone>(field->GetOffset());
136 break;
137 }
138 case Primitive::kPrimVoid: {
139 oss << "void";
140 break;
141 }
142 }
143 return oss.str();
144}
145
146template <typename K, typename V, typename D>
147static std::vector<std::pair<V, K>> SortByValueDesc(
148 const std::map<K, D> map,
149 std::function<V(const D&)> value_mapper = [](const D& d) { return static_cast<V>(d); }) {
150 // Store value->key so that we can use the default sort from pair which
151 // sorts by value first and then key
152 std::vector<std::pair<V, K>> value_key_vector;
153
154 for (const auto& kv_pair : map) {
155 value_key_vector.push_back(std::make_pair(value_mapper(kv_pair.second), kv_pair.first));
156 }
157
158 // Sort in reverse (descending order)
159 std::sort(value_key_vector.rbegin(), value_key_vector.rend());
160 return value_key_vector;
161}
162
163// Fixup a remote pointer that we read from a foreign boot.art to point to our own memory.
164// Returned pointer will point to inside of remote_contents.
165template <typename T>
166static T* FixUpRemotePointer(T* remote_ptr,
167 std::vector<uint8_t>& remote_contents,
168 const backtrace_map_t& boot_map) {
169 if (remote_ptr == nullptr) {
170 return nullptr;
171 }
172
173 uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr);
174
175 CHECK_LE(boot_map.start, remote);
176 CHECK_GT(boot_map.end, remote);
177
178 off_t boot_offset = remote - boot_map.start;
179
180 return reinterpret_cast<T*>(&remote_contents[boot_offset]);
181}
182
183template <typename T>
184static T* RemoteContentsPointerToLocal(T* remote_ptr,
185 std::vector<uint8_t>& remote_contents,
186 const ImageHeader& image_header) {
187 if (remote_ptr == nullptr) {
188 return nullptr;
189 }
190
191 uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr);
192 ptrdiff_t boot_offset = remote - &remote_contents[0];
193
194 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + boot_offset;
195
196 return reinterpret_cast<T*>(const_cast<uint8_t*>(local_ptr));
197}
198
199template <typename T> size_t EntrySize(T* entry);
200template<> size_t EntrySize(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
201 return object->SizeOf();
202}
203template<> size_t EntrySize(ArtMethod* art_method) REQUIRES_SHARED(Locks::mutator_lock_) {
204 return sizeof(*art_method);
205}
206
207template <typename T>
208static bool EntriesDiffer(T* entry1, T* entry2) REQUIRES_SHARED(Locks::mutator_lock_) {
209 return memcmp(entry1, entry2, EntrySize(entry1)) != 0;
210}
211
212template <typename T>
213struct RegionCommon {
214 public:
215 RegionCommon(std::ostream* os,
216 std::vector<uint8_t>* remote_contents,
217 std::vector<uint8_t>* zygote_contents,
218 const backtrace_map_t& boot_map,
219 const ImageHeader& image_header) :
220 os_(*os),
221 remote_contents_(remote_contents),
222 zygote_contents_(zygote_contents),
223 boot_map_(boot_map),
224 image_header_(image_header),
225 different_entries_(0),
226 dirty_entry_bytes_(0),
227 false_dirty_entry_bytes_(0) {
228 CHECK(remote_contents != nullptr);
229 CHECK(zygote_contents != nullptr);
230 }
231
232 void DumpSamplesAndOffsetCount() {
233 os_ << " sample object addresses: ";
234 for (size_t i = 0; i < dirty_entries_.size() && i < kMaxAddressPrint; ++i) {
235 T* entry = dirty_entries_[i];
236 os_ << reinterpret_cast<void*>(entry) << ", ";
237 }
238 os_ << "\n";
239 os_ << " dirty byte +offset:count list = ";
240 std::vector<std::pair<size_t, off_t>> field_dirty_count_sorted =
241 SortByValueDesc<off_t, size_t, size_t>(field_dirty_count_);
242 for (const std::pair<size_t, off_t>& pair : field_dirty_count_sorted) {
243 off_t offset = pair.second;
244 size_t count = pair.first;
245 os_ << "+" << offset << ":" << count << ", ";
246 }
247 os_ << "\n";
248 }
249
250 size_t GetDifferentEntryCount() const { return different_entries_; }
251 size_t GetDirtyEntryBytes() const { return dirty_entry_bytes_; }
252 size_t GetFalseDirtyEntryCount() const { return false_dirty_entries_.size(); }
253 size_t GetFalseDirtyEntryBytes() const { return false_dirty_entry_bytes_; }
254 size_t GetZygoteDirtyEntryCount() const { return zygote_dirty_entries_.size(); }
255
256 protected:
257 bool IsEntryOnDirtyPage(T* entry, const std::set<size_t>& dirty_pages) const
258 REQUIRES_SHARED(Locks::mutator_lock_) {
259 size_t size = EntrySize(entry);
260 size_t page_off = 0;
261 size_t current_page_idx;
262 uintptr_t entry_address = reinterpret_cast<uintptr_t>(entry);
263 // Iterate every page this entry belongs to
264 do {
265 current_page_idx = entry_address / kPageSize + page_off;
266 if (dirty_pages.find(current_page_idx) != dirty_pages.end()) {
267 // This entry is on a dirty page
268 return true;
269 }
270 page_off++;
271 } while ((current_page_idx * kPageSize) < RoundUp(entry_address + size, kObjectAlignment));
272 return false;
273 }
274
275 void AddZygoteDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
276 zygote_dirty_entries_.insert(entry);
277 }
278
279 void AddImageDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
280 image_dirty_entries_.insert(entry);
281 }
282
283 void AddFalseDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
284 false_dirty_entries_.push_back(entry);
285 false_dirty_entry_bytes_ += EntrySize(entry);
286 }
287
288 // The output stream to write to.
289 std::ostream& os_;
290 // The byte contents of the remote (image) process' image.
291 std::vector<uint8_t>* remote_contents_;
292 // The byte contents of the zygote process' image.
293 std::vector<uint8_t>* zygote_contents_;
294 const backtrace_map_t& boot_map_;
295 const ImageHeader& image_header_;
296
297 // Count of entries that are different.
298 size_t different_entries_;
299
300 // Local entries that are dirty (differ in at least one byte).
301 size_t dirty_entry_bytes_;
302 std::vector<T*> dirty_entries_;
303
304 // Local entries that are clean, but located on dirty pages.
305 size_t false_dirty_entry_bytes_;
306 std::vector<T*> false_dirty_entries_;
307
308 // Image dirty entries
309 // If zygote_pid_only_ == true, these are shared dirty entries in the zygote.
310 // If zygote_pid_only_ == false, these are private dirty entries in the application.
311 std::set<T*> image_dirty_entries_;
312
313 // Zygote dirty entries (probably private dirty).
314 // We only add entries here if they differed in both the image and the zygote, so
315 // they are probably private dirty.
316 std::set<T*> zygote_dirty_entries_;
317
318 std::map<off_t /* field offset */, size_t /* count */> field_dirty_count_;
319
320 private:
321 DISALLOW_COPY_AND_ASSIGN(RegionCommon);
322};
323
324template <typename T>
325class RegionSpecializedBase : public RegionCommon<T> {
326};
327
328// Region analysis for mirror::Objects
329template<>
330class RegionSpecializedBase<mirror::Object> : public RegionCommon<mirror::Object> {
331 public:
332 RegionSpecializedBase(std::ostream* os,
333 std::vector<uint8_t>* remote_contents,
334 std::vector<uint8_t>* zygote_contents,
335 const backtrace_map_t& boot_map,
Jeff Haoc23b0c02017-07-27 18:19:38 -0700336 const ImageHeader& image_header,
337 bool dump_dirty_objects)
338 : RegionCommon<mirror::Object>(os, remote_contents, zygote_contents, boot_map, image_header),
339 os_(*os),
340 dump_dirty_objects_(dump_dirty_objects) { }
David Sehrb4005f02017-06-20 19:11:40 -0700341
342 void CheckEntrySanity(const uint8_t* current) const
343 REQUIRES_SHARED(Locks::mutator_lock_) {
344 CHECK_ALIGNED(current, kObjectAlignment);
345 mirror::Object* entry = reinterpret_cast<mirror::Object*>(const_cast<uint8_t*>(current));
346 // Sanity check that we are reading a real mirror::Object
347 CHECK(entry->GetClass() != nullptr) << "Image object at address "
348 << entry
349 << " has null class";
350 if (kUseBakerReadBarrier) {
351 entry->AssertReadBarrierState();
352 }
353 }
354
355 mirror::Object* GetNextEntry(mirror::Object* entry)
356 REQUIRES_SHARED(Locks::mutator_lock_) {
357 uint8_t* next =
358 reinterpret_cast<uint8_t*>(entry) + RoundUp(EntrySize(entry), kObjectAlignment);
359 return reinterpret_cast<mirror::Object*>(next);
360 }
361
362 void VisitEntry(mirror::Object* entry)
363 REQUIRES_SHARED(Locks::mutator_lock_) {
364 // Unconditionally store the class descriptor in case we need it later
365 mirror::Class* klass = entry->GetClass();
366 class_data_[klass].descriptor = GetClassDescriptor(klass);
367 }
368
369 void AddCleanEntry(mirror::Object* entry)
370 REQUIRES_SHARED(Locks::mutator_lock_) {
371 class_data_[entry->GetClass()].AddCleanObject();
372 }
373
374 void AddFalseDirtyEntry(mirror::Object* entry)
375 REQUIRES_SHARED(Locks::mutator_lock_) {
376 RegionCommon<mirror::Object>::AddFalseDirtyEntry(entry);
377 class_data_[entry->GetClass()].AddFalseDirtyObject(entry);
378 }
379
380 void AddDirtyEntry(mirror::Object* entry, mirror::Object* entry_remote)
381 REQUIRES_SHARED(Locks::mutator_lock_) {
382 size_t entry_size = EntrySize(entry);
383 ++different_entries_;
384 dirty_entry_bytes_ += entry_size;
385 // Log dirty count and objects for class objects only.
386 mirror::Class* klass = entry->GetClass();
387 if (klass->IsClassClass()) {
388 // Increment counts for the fields that are dirty
389 const uint8_t* current = reinterpret_cast<const uint8_t*>(entry);
390 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(entry_remote);
391 for (size_t i = 0; i < entry_size; ++i) {
392 if (current[i] != current_remote[i]) {
393 field_dirty_count_[i]++;
394 }
395 }
396 dirty_entries_.push_back(entry);
397 }
398 class_data_[klass].AddDirtyObject(entry, entry_remote);
399 }
400
Jeff Haoc23b0c02017-07-27 18:19:38 -0700401 void DiffEntryContents(mirror::Object* entry,
402 uint8_t* remote_bytes,
403 const uint8_t* base_ptr,
404 bool log_dirty_objects)
David Sehrb4005f02017-06-20 19:11:40 -0700405 REQUIRES_SHARED(Locks::mutator_lock_) {
406 const char* tabs = " ";
407 // Attempt to find fields for all dirty bytes.
408 mirror::Class* klass = entry->GetClass();
409 if (entry->IsClass()) {
410 os_ << tabs
411 << "Class " << mirror::Class::PrettyClass(entry->AsClass()) << " " << entry << "\n";
412 } else {
413 os_ << tabs
414 << "Instance of " << mirror::Class::PrettyClass(klass) << " " << entry << "\n";
415 }
416
417 std::unordered_set<ArtField*> dirty_instance_fields;
418 std::unordered_set<ArtField*> dirty_static_fields;
419 // Examine the bytes comprising the Object, computing which fields are dirty
420 // and recording them for later display. If the Object is an array object,
421 // compute the dirty entries.
David Sehrb4005f02017-06-20 19:11:40 -0700422 mirror::Object* remote_entry = reinterpret_cast<mirror::Object*>(remote_bytes);
423 for (size_t i = 0, count = entry->SizeOf(); i < count; ++i) {
Mathieu Chartier51e79652017-07-24 15:43:38 -0700424 if (base_ptr[i] != remote_bytes[i]) {
David Sehrb4005f02017-06-20 19:11:40 -0700425 ArtField* field = ArtField::FindInstanceFieldWithOffset</*exact*/false>(klass, i);
426 if (field != nullptr) {
427 dirty_instance_fields.insert(field);
428 } else if (entry->IsClass()) {
429 field = ArtField::FindStaticFieldWithOffset</*exact*/false>(entry->AsClass(), i);
430 if (field != nullptr) {
431 dirty_static_fields.insert(field);
432 }
433 }
434 if (field == nullptr) {
435 if (klass->IsArrayClass()) {
436 mirror::Class* component_type = klass->GetComponentType();
437 Primitive::Type primitive_type = component_type->GetPrimitiveType();
438 size_t component_size = Primitive::ComponentSize(primitive_type);
439 size_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
440 if (i >= data_offset) {
441 os_ << tabs << "Dirty array element " << (i - data_offset) / component_size << "\n";
442 // Skip to next element to prevent spam.
443 i += component_size - 1;
444 continue;
445 }
446 }
447 os_ << tabs << "No field for byte offset " << i << "\n";
448 }
449 }
450 }
451 // Dump different fields.
452 if (!dirty_instance_fields.empty()) {
453 os_ << tabs << "Dirty instance fields " << dirty_instance_fields.size() << "\n";
454 for (ArtField* field : dirty_instance_fields) {
455 os_ << tabs << ArtField::PrettyField(field)
456 << " original=" << PrettyFieldValue(field, entry)
457 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
458 }
459 }
460 if (!dirty_static_fields.empty()) {
Jeff Haoc23b0c02017-07-27 18:19:38 -0700461 if (dump_dirty_objects_ && log_dirty_objects) {
462 dirty_objects_.insert(entry);
463 }
David Sehrb4005f02017-06-20 19:11:40 -0700464 os_ << tabs << "Dirty static fields " << dirty_static_fields.size() << "\n";
465 for (ArtField* field : dirty_static_fields) {
466 os_ << tabs << ArtField::PrettyField(field)
467 << " original=" << PrettyFieldValue(field, entry)
468 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
469 }
470 }
471 os_ << "\n";
472 }
473
Jeff Haoc23b0c02017-07-27 18:19:38 -0700474 void DumpDirtyObjects() REQUIRES_SHARED(Locks::mutator_lock_) {
475 for (mirror::Object* obj : dirty_objects_) {
476 if (obj->IsClass()) {
477 os_ << "Private dirty object: " << obj->AsClass()->PrettyDescriptor() << "\n";
478 }
479 }
480 }
481
David Sehrb4005f02017-06-20 19:11:40 -0700482 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
483 // vector of pairs (size_t count, Class*)
484 auto dirty_object_class_values =
485 SortByValueDesc<mirror::Class*, size_t, ClassData>(
486 class_data_,
487 [](const ClassData& d) { return d.dirty_object_count; });
488 os_ << "\n" << " Dirty object count by class:\n";
489 for (const auto& vk_pair : dirty_object_class_values) {
490 size_t dirty_object_count = vk_pair.first;
491 mirror::Class* klass = vk_pair.second;
492 ClassData& class_data = class_data_[klass];
493 size_t object_sizes = class_data.dirty_object_size_in_bytes;
494 float avg_dirty_bytes_per_class =
495 class_data.dirty_object_byte_count * 1.0f / object_sizes;
496 float avg_object_size = object_sizes * 1.0f / dirty_object_count;
497 const std::string& descriptor = class_data.descriptor;
498 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
499 << "objects: " << dirty_object_count << ", "
500 << "avg dirty bytes: " << avg_dirty_bytes_per_class << ", "
501 << "avg object size: " << avg_object_size << ", "
502 << "class descriptor: '" << descriptor << "'"
503 << ")\n";
504 if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) {
505 DumpSamplesAndOffsetCount();
506 os_ << " field contents:\n";
507 for (mirror::Object* object : class_data.dirty_objects) {
508 // remote class object
509 auto remote_klass = reinterpret_cast<mirror::Class*>(object);
510 // local class object
511 auto local_klass =
512 RemoteContentsPointerToLocal(remote_klass,
513 *RegionCommon<mirror::Object>::remote_contents_,
514 RegionCommon<mirror::Object>::image_header_);
515 os_ << " " << reinterpret_cast<const void*>(object) << " ";
516 os_ << " class_status (remote): " << remote_klass->GetStatus() << ", ";
517 os_ << " class_status (local): " << local_klass->GetStatus();
518 os_ << "\n";
519 }
520 }
521 }
522 }
523
524 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
525 // vector of pairs (size_t count, Class*)
526 auto false_dirty_object_class_values =
527 SortByValueDesc<mirror::Class*, size_t, ClassData>(
528 class_data_,
529 [](const ClassData& d) { return d.false_dirty_object_count; });
530 os_ << "\n" << " False-dirty object count by class:\n";
531 for (const auto& vk_pair : false_dirty_object_class_values) {
532 size_t object_count = vk_pair.first;
533 mirror::Class* klass = vk_pair.second;
534 ClassData& class_data = class_data_[klass];
535 size_t object_sizes = class_data.false_dirty_byte_count;
536 float avg_object_size = object_sizes * 1.0f / object_count;
537 const std::string& descriptor = class_data.descriptor;
538 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
539 << "objects: " << object_count << ", "
540 << "avg object size: " << avg_object_size << ", "
541 << "total bytes: " << object_sizes << ", "
542 << "class descriptor: '" << descriptor << "'"
543 << ")\n";
544 }
545 }
546
547 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
548 // vector of pairs (size_t count, Class*)
549 auto clean_object_class_values =
550 SortByValueDesc<mirror::Class*, size_t, ClassData>(
551 class_data_,
552 [](const ClassData& d) { return d.clean_object_count; });
553 os_ << "\n" << " Clean object count by class:\n";
554 for (const auto& vk_pair : clean_object_class_values) {
555 os_ << " " << mirror::Class::PrettyClass(vk_pair.second) << " (" << vk_pair.first << ")\n";
556 }
557 }
558
559 private:
560 // Aggregate and detail class data from an image diff.
561 struct ClassData {
562 size_t dirty_object_count = 0;
563 // Track only the byte-per-byte dirtiness (in bytes)
564 size_t dirty_object_byte_count = 0;
565 // Track the object-by-object dirtiness (in bytes)
566 size_t dirty_object_size_in_bytes = 0;
567 size_t clean_object_count = 0;
568 std::string descriptor;
569 size_t false_dirty_byte_count = 0;
570 size_t false_dirty_object_count = 0;
571 std::vector<mirror::Object*> false_dirty_objects;
572 // Remote pointers to dirty objects
573 std::vector<mirror::Object*> dirty_objects;
574
575 void AddCleanObject() REQUIRES_SHARED(Locks::mutator_lock_) {
576 ++clean_object_count;
577 }
578
579 void AddDirtyObject(mirror::Object* object, mirror::Object* object_remote)
580 REQUIRES_SHARED(Locks::mutator_lock_) {
581 ++dirty_object_count;
582 dirty_object_byte_count += CountDirtyBytes(object, object_remote);
583 dirty_object_size_in_bytes += EntrySize(object);
584 dirty_objects.push_back(object_remote);
585 }
586
587 void AddFalseDirtyObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
588 ++false_dirty_object_count;
589 false_dirty_objects.push_back(object);
590 false_dirty_byte_count += EntrySize(object);
591 }
592
593 private:
594 // Go byte-by-byte and figure out what exactly got dirtied
595 static size_t CountDirtyBytes(mirror::Object* object1, mirror::Object* object2)
596 REQUIRES_SHARED(Locks::mutator_lock_) {
597 const uint8_t* cur1 = reinterpret_cast<const uint8_t*>(object1);
598 const uint8_t* cur2 = reinterpret_cast<const uint8_t*>(object2);
599 size_t dirty_bytes = 0;
600 size_t object_size = EntrySize(object1);
601 for (size_t i = 0; i < object_size; ++i) {
602 if (cur1[i] != cur2[i]) {
603 dirty_bytes++;
604 }
605 }
606 return dirty_bytes;
607 }
608 };
609
610 std::ostream& os_;
Jeff Haoc23b0c02017-07-27 18:19:38 -0700611 bool dump_dirty_objects_;
612 std::unordered_set<mirror::Object*> dirty_objects_;
David Sehrb4005f02017-06-20 19:11:40 -0700613 std::map<mirror::Class*, ClassData> class_data_;
614
615 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
616};
617
618// Region analysis for ArtMethods.
619// TODO: most of these need work.
620template<>
621class RegionSpecializedBase<ArtMethod> : RegionCommon<ArtMethod> {
622 public:
623 RegionSpecializedBase(std::ostream* os,
624 std::vector<uint8_t>* remote_contents,
625 std::vector<uint8_t>* zygote_contents,
626 const backtrace_map_t& boot_map,
627 const ImageHeader& image_header) :
628 RegionCommon<ArtMethod>(os, remote_contents, zygote_contents, boot_map, image_header),
629 os_(*os) { }
630
631 void CheckEntrySanity(const uint8_t* current ATTRIBUTE_UNUSED) const
632 REQUIRES_SHARED(Locks::mutator_lock_) {
633 }
634
635 ArtMethod* GetNextEntry(ArtMethod* entry)
636 REQUIRES_SHARED(Locks::mutator_lock_) {
637 uint8_t* next = reinterpret_cast<uint8_t*>(entry) + RoundUp(EntrySize(entry), kObjectAlignment);
638 return reinterpret_cast<ArtMethod*>(next);
639 }
640
641 void VisitEntry(ArtMethod* method ATTRIBUTE_UNUSED)
642 REQUIRES_SHARED(Locks::mutator_lock_) {
643 }
644
645 void AddFalseDirtyEntry(ArtMethod* method)
646 REQUIRES_SHARED(Locks::mutator_lock_) {
647 RegionCommon<ArtMethod>::AddFalseDirtyEntry(method);
648 }
649
650 void AddCleanEntry(ArtMethod* method ATTRIBUTE_UNUSED) {
651 }
652
653 void AddDirtyEntry(ArtMethod* method, ArtMethod* method_remote)
654 REQUIRES_SHARED(Locks::mutator_lock_) {
655 size_t entry_size = EntrySize(method);
656 ++different_entries_;
657 dirty_entry_bytes_ += entry_size;
658 // Increment counts for the fields that are dirty
659 const uint8_t* current = reinterpret_cast<const uint8_t*>(method);
660 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(method_remote);
661 // ArtMethods always log their dirty count and entries.
662 for (size_t i = 0; i < entry_size; ++i) {
663 if (current[i] != current_remote[i]) {
664 field_dirty_count_[i]++;
665 }
666 }
667 dirty_entries_.push_back(method);
668 }
669
670 void DiffEntryContents(ArtMethod* method ATTRIBUTE_UNUSED,
Mathieu Chartier51e79652017-07-24 15:43:38 -0700671 uint8_t* remote_bytes ATTRIBUTE_UNUSED,
672 const uint8_t* base_ptr ATTRIBUTE_UNUSED)
David Sehrb4005f02017-06-20 19:11:40 -0700673 REQUIRES_SHARED(Locks::mutator_lock_) {
674 }
675
676 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
677 DumpSamplesAndOffsetCount();
678 os_ << " field contents:\n";
679 for (ArtMethod* method : dirty_entries_) {
680 // remote method
681 auto art_method = reinterpret_cast<ArtMethod*>(method);
682 // remote class
683 mirror::Class* remote_declaring_class =
684 FixUpRemotePointer(art_method->GetDeclaringClass(),
685 *RegionCommon<ArtMethod>::remote_contents_,
686 RegionCommon<ArtMethod>::boot_map_);
687 // local class
688 mirror::Class* declaring_class =
689 RemoteContentsPointerToLocal(remote_declaring_class,
690 *RegionCommon<ArtMethod>::remote_contents_,
691 RegionCommon<ArtMethod>::image_header_);
692 DumpOneArtMethod(art_method, declaring_class, remote_declaring_class);
693 }
694 }
695
696 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
697 os_ << " field contents:\n";
698 for (ArtMethod* method : false_dirty_entries_) {
699 // local class
700 mirror::Class* declaring_class = method->GetDeclaringClass();
701 DumpOneArtMethod(method, declaring_class, nullptr);
702 }
703 }
704
705 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
706 }
707
708 private:
709 std::ostream& os_;
710
711 void DumpOneArtMethod(ArtMethod* art_method,
712 mirror::Class* declaring_class,
713 mirror::Class* remote_declaring_class)
714 REQUIRES_SHARED(Locks::mutator_lock_) {
715 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
716 os_ << " " << reinterpret_cast<const void*>(art_method) << " ";
717 os_ << " entryPointFromJni: "
718 << reinterpret_cast<const void*>(art_method->GetDataPtrSize(pointer_size)) << ", ";
719 os_ << " entryPointFromQuickCompiledCode: "
720 << reinterpret_cast<const void*>(
721 art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size))
722 << ", ";
723 os_ << " isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
724 os_ << " class_status (local): " << declaring_class->GetStatus();
725 if (remote_declaring_class != nullptr) {
726 os_ << ", class_status (remote): " << remote_declaring_class->GetStatus();
727 }
728 os_ << "\n";
729 }
730
731 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
732};
733
734template <typename T>
735class RegionData : public RegionSpecializedBase<T> {
736 public:
737 RegionData(std::ostream* os,
738 std::vector<uint8_t>* remote_contents,
739 std::vector<uint8_t>* zygote_contents,
740 const backtrace_map_t& boot_map,
Jeff Haoc23b0c02017-07-27 18:19:38 -0700741 const ImageHeader& image_header,
742 bool dump_dirty_objects)
743 : RegionSpecializedBase<T>(os,
744 remote_contents,
745 zygote_contents,
746 boot_map,
747 image_header,
748 dump_dirty_objects),
749 os_(*os) {
David Sehrb4005f02017-06-20 19:11:40 -0700750 CHECK(remote_contents != nullptr);
751 CHECK(zygote_contents != nullptr);
752 }
753
754 // Walk over the type T entries in theregion between begin_image_ptr and end_image_ptr,
755 // collecting and reporting data regarding dirty, difference, etc.
756 void ProcessRegion(const MappingData& mapping_data,
757 RemoteProcesses remotes,
758 const uint8_t* begin_image_ptr,
759 const uint8_t* end_image_ptr)
760 REQUIRES_SHARED(Locks::mutator_lock_) {
761 const uint8_t* current = begin_image_ptr + RoundUp(sizeof(ImageHeader), kObjectAlignment);
762 T* entry = reinterpret_cast<T*>(const_cast<uint8_t*>(current));
763 while (reinterpret_cast<uintptr_t>(entry) < reinterpret_cast<uintptr_t>(end_image_ptr)) {
764 ComputeEntryDirty(entry, begin_image_ptr, mapping_data.dirty_page_set);
765
766 entry = RegionSpecializedBase<T>::GetNextEntry(entry);
767 }
768
769 // Looking at only dirty pages, figure out how many of those bytes belong to dirty entries.
770 // TODO: fix this now that there are multiple regions in a mapping.
771 float true_dirtied_percent =
772 RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * kPageSize);
773
774 // Entry specific statistics.
775 os_ << RegionCommon<T>::GetDifferentEntryCount() << " different entries, \n "
776 << RegionCommon<T>::GetDirtyEntryBytes() << " different entry [bytes], \n "
777 << RegionCommon<T>::GetFalseDirtyEntryCount() << " false dirty entries,\n "
778 << RegionCommon<T>::GetFalseDirtyEntryBytes() << " false dirty entry [bytes], \n "
779 << true_dirtied_percent << " different entries-vs-total in a dirty page;\n "
Mathieu Chartier51e79652017-07-24 15:43:38 -0700780 << "\n";
David Sehrb4005f02017-06-20 19:11:40 -0700781
Mathieu Chartier51e79652017-07-24 15:43:38 -0700782 const uint8_t* base_ptr = begin_image_ptr;
David Sehrb4005f02017-06-20 19:11:40 -0700783 switch (remotes) {
784 case RemoteProcesses::kZygoteOnly:
785 os_ << " Zygote shared dirty entries: ";
786 break;
787 case RemoteProcesses::kImageAndZygote:
788 os_ << " Application dirty entries (private dirty): ";
Mathieu Chartier51e79652017-07-24 15:43:38 -0700789 // If we are dumping private dirty, diff against the zygote map to make it clearer what
790 // fields caused the page to be private dirty.
791 base_ptr = &RegionCommon<T>::zygote_contents_->operator[](0);
David Sehrb4005f02017-06-20 19:11:40 -0700792 break;
793 case RemoteProcesses::kImageOnly:
794 os_ << " Application dirty entries (unknown whether private or shared dirty): ";
795 break;
796 }
Mathieu Chartier51e79652017-07-24 15:43:38 -0700797 DiffDirtyEntries(ProcessType::kRemote,
798 begin_image_ptr,
799 RegionCommon<T>::remote_contents_,
Jeff Haoc23b0c02017-07-27 18:19:38 -0700800 base_ptr,
801 /*log_dirty_objects*/true);
Mathieu Chartier51e79652017-07-24 15:43:38 -0700802 // Print shared dirty after since it's less important.
803 if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
804 // We only reach this point if both pids were specified. Furthermore,
805 // entries are only displayed here if they differed in both the image
806 // and the zygote, so they are probably private dirty.
807 CHECK(remotes == RemoteProcesses::kImageAndZygote);
808 os_ << "\n" << " Zygote dirty entries (probably shared dirty): ";
809 DiffDirtyEntries(ProcessType::kZygote,
810 begin_image_ptr,
811 RegionCommon<T>::zygote_contents_,
Jeff Haoc23b0c02017-07-27 18:19:38 -0700812 begin_image_ptr,
813 /*log_dirty_objects*/false);
Mathieu Chartier51e79652017-07-24 15:43:38 -0700814 }
Jeff Haoc23b0c02017-07-27 18:19:38 -0700815 RegionSpecializedBase<T>::DumpDirtyObjects();
David Sehrb4005f02017-06-20 19:11:40 -0700816 RegionSpecializedBase<T>::DumpDirtyEntries();
817 RegionSpecializedBase<T>::DumpFalseDirtyEntries();
818 RegionSpecializedBase<T>::DumpCleanEntries();
819 }
820
821 private:
822 std::ostream& os_;
823
824 void DiffDirtyEntries(ProcessType process_type,
825 const uint8_t* begin_image_ptr,
Mathieu Chartier51e79652017-07-24 15:43:38 -0700826 std::vector<uint8_t>* contents,
Jeff Haoc23b0c02017-07-27 18:19:38 -0700827 const uint8_t* base_ptr,
828 bool log_dirty_objects)
David Sehrb4005f02017-06-20 19:11:40 -0700829 REQUIRES_SHARED(Locks::mutator_lock_) {
830 os_ << RegionCommon<T>::dirty_entries_.size() << "\n";
831 const std::set<T*>& entries =
832 (process_type == ProcessType::kZygote) ?
833 RegionCommon<T>::zygote_dirty_entries_:
834 RegionCommon<T>::image_dirty_entries_;
835 for (T* entry : entries) {
836 uint8_t* entry_bytes = reinterpret_cast<uint8_t*>(entry);
837 ptrdiff_t offset = entry_bytes - begin_image_ptr;
838 uint8_t* remote_bytes = &(*contents)[offset];
Jeff Haoc23b0c02017-07-27 18:19:38 -0700839 RegionSpecializedBase<T>::DiffEntryContents(entry,
840 remote_bytes,
841 &base_ptr[offset],
842 log_dirty_objects);
David Sehrb4005f02017-06-20 19:11:40 -0700843 }
844 }
845
846 void ComputeEntryDirty(T* entry,
847 const uint8_t* begin_image_ptr,
848 const std::set<size_t>& dirty_pages)
849 REQUIRES_SHARED(Locks::mutator_lock_) {
850 // Set up pointers in the remote and the zygote for comparison.
851 uint8_t* current = reinterpret_cast<uint8_t*>(entry);
852 ptrdiff_t offset = current - begin_image_ptr;
853 T* entry_remote =
854 reinterpret_cast<T*>(const_cast<uint8_t*>(&(*RegionCommon<T>::remote_contents_)[offset]));
Mathieu Chartier51e79652017-07-24 15:43:38 -0700855 const bool have_zygote = !RegionCommon<T>::zygote_contents_->empty();
David Sehrb4005f02017-06-20 19:11:40 -0700856 const uint8_t* current_zygote =
Mathieu Chartier51e79652017-07-24 15:43:38 -0700857 have_zygote ? &(*RegionCommon<T>::zygote_contents_)[offset] : nullptr;
David Sehrb4005f02017-06-20 19:11:40 -0700858 T* entry_zygote = reinterpret_cast<T*>(const_cast<uint8_t*>(current_zygote));
859 // Visit and classify entries at the current location.
860 RegionSpecializedBase<T>::VisitEntry(entry);
Mathieu Chartier51e79652017-07-24 15:43:38 -0700861
862 // Test private dirty first.
863 bool is_dirty = false;
864 if (have_zygote) {
865 bool private_dirty = EntriesDiffer(entry_zygote, entry_remote);
866 if (private_dirty) {
867 // Private dirty, app vs zygote.
868 is_dirty = true;
David Sehrb4005f02017-06-20 19:11:40 -0700869 RegionCommon<T>::AddImageDirtyEntry(entry);
David Sehrb4005f02017-06-20 19:11:40 -0700870 }
Mathieu Chartier51e79652017-07-24 15:43:38 -0700871 if (EntriesDiffer(entry_zygote, entry)) {
872 // Shared dirty, zygote vs image.
873 is_dirty = true;
874 RegionCommon<T>::AddZygoteDirtyEntry(entry);
875 }
876 } else if (EntriesDiffer(entry_remote, entry)) {
877 // Shared or private dirty, app vs image.
878 is_dirty = true;
879 RegionCommon<T>::AddImageDirtyEntry(entry);
880 }
881 if (is_dirty) {
882 // TODO: Add support dirty entries in zygote and image.
883 RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
David Sehrb4005f02017-06-20 19:11:40 -0700884 } else {
885 RegionSpecializedBase<T>::AddCleanEntry(entry);
Mathieu Chartier51e79652017-07-24 15:43:38 -0700886 if (RegionCommon<T>::IsEntryOnDirtyPage(entry, dirty_pages)) {
887 // This entry was either never mutated or got mutated back to the same value.
888 // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
889 RegionSpecializedBase<T>::AddFalseDirtyEntry(entry);
890 }
David Sehrb4005f02017-06-20 19:11:40 -0700891 }
892 }
893
894 DISALLOW_COPY_AND_ASSIGN(RegionData);
895};
896
897} // namespace
898
899
Igor Murashkin37743352014-11-13 14:38:00 -0800900class ImgDiagDumper {
901 public:
902 explicit ImgDiagDumper(std::ostream* os,
Mathieu Chartiercb044bc2016-04-01 13:56:41 -0700903 const ImageHeader& image_header,
904 const std::string& image_location,
Mathieu Chartierc5196cd2016-04-08 14:08:37 -0700905 pid_t image_diff_pid,
Jeff Haoc23b0c02017-07-27 18:19:38 -0700906 pid_t zygote_diff_pid,
907 bool dump_dirty_objects)
Igor Murashkin37743352014-11-13 14:38:00 -0800908 : os_(os),
909 image_header_(image_header),
910 image_location_(image_location),
Mathieu Chartierc5196cd2016-04-08 14:08:37 -0700911 image_diff_pid_(image_diff_pid),
David Sehr20e271a2017-06-14 13:02:14 -0700912 zygote_diff_pid_(zygote_diff_pid),
Jeff Haoc23b0c02017-07-27 18:19:38 -0700913 dump_dirty_objects_(dump_dirty_objects),
David Sehr20e271a2017-06-14 13:02:14 -0700914 zygote_pid_only_(false) {}
Igor Murashkin37743352014-11-13 14:38:00 -0800915
David Sehr50005a02017-06-21 13:24:21 -0700916 bool Init() {
Igor Murashkin37743352014-11-13 14:38:00 -0800917 std::ostream& os = *os_;
Mathieu Chartiercb044bc2016-04-01 13:56:41 -0700918
David Sehr50005a02017-06-21 13:24:21 -0700919 if (image_diff_pid_ < 0 && zygote_diff_pid_ < 0) {
920 os << "Either --image-diff-pid or --zygote-diff-pid (or both) must be specified.\n";
921 return false;
Igor Murashkin37743352014-11-13 14:38:00 -0800922 }
923
David Sehr50005a02017-06-21 13:24:21 -0700924 // To avoid the combinations of command-line argument use cases:
925 // If the user invoked with only --zygote-diff-pid, shuffle that to
926 // image_diff_pid_, invalidate zygote_diff_pid_, and remember that
927 // image_diff_pid_ is now special.
928 if (image_diff_pid_ < 0) {
929 image_diff_pid_ = zygote_diff_pid_;
930 zygote_diff_pid_ = -1;
931 zygote_pid_only_ = true;
David Sehr45de57f2017-06-21 05:03:22 +0000932 }
Igor Murashkin37743352014-11-13 14:38:00 -0800933
David Sehr45de57f2017-06-21 05:03:22 +0000934 {
935 struct stat sts;
936 std::string proc_pid_str =
937 StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
938 if (stat(proc_pid_str.c_str(), &sts) == -1) {
939 os << "Process does not exist";
940 return false;
Igor Murashkin37743352014-11-13 14:38:00 -0800941 }
942 }
943
David Sehr45de57f2017-06-21 05:03:22 +0000944 // Open /proc/$pid/maps to view memory maps
David Sehr50005a02017-06-21 13:24:21 -0700945 auto tmp_proc_maps = std::unique_ptr<BacktraceMap>(BacktraceMap::Create(image_diff_pid_));
946 if (tmp_proc_maps == nullptr) {
David Sehr45de57f2017-06-21 05:03:22 +0000947 os << "Could not read backtrace maps";
948 return false;
949 }
Igor Murashkin37743352014-11-13 14:38:00 -0800950
David Sehr45de57f2017-06-21 05:03:22 +0000951 bool found_boot_map = false;
David Sehr45de57f2017-06-21 05:03:22 +0000952 // Find the memory map only for boot.art
David Sehr50005a02017-06-21 13:24:21 -0700953 for (const backtrace_map_t& map : *tmp_proc_maps) {
David Sehr45de57f2017-06-21 05:03:22 +0000954 if (EndsWith(map.name, GetImageLocationBaseName())) {
955 if ((map.flags & PROT_WRITE) != 0) {
David Sehr50005a02017-06-21 13:24:21 -0700956 boot_map_ = map;
David Sehr45de57f2017-06-21 05:03:22 +0000957 found_boot_map = true;
958 break;
David Sehr0627be32017-06-16 13:50:02 -0700959 }
David Sehr45de57f2017-06-21 05:03:22 +0000960 // In actuality there's more than 1 map, but the second one is read-only.
961 // The one we care about is the write-able map.
962 // The readonly maps are guaranteed to be identical, so its not interesting to compare
963 // them.
David Sehr0627be32017-06-16 13:50:02 -0700964 }
965 }
David Sehr0627be32017-06-16 13:50:02 -0700966
David Sehr45de57f2017-06-21 05:03:22 +0000967 if (!found_boot_map) {
968 os << "Could not find map for " << GetImageLocationBaseName();
969 return false;
970 }
David Sehr50005a02017-06-21 13:24:21 -0700971 // Sanity check boot_map_.
972 CHECK(boot_map_.end >= boot_map_.start);
973 boot_map_size_ = boot_map_.end - boot_map_.start;
David Sehr0627be32017-06-16 13:50:02 -0700974
David Sehr50005a02017-06-21 13:24:21 -0700975 // Open /proc/<image_diff_pid_>/mem and read as remote_contents_.
976 std::string image_file_name =
977 StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
978 auto image_map_file = std::unique_ptr<File>(OS::OpenFileForReading(image_file_name.c_str()));
979 if (image_map_file == nullptr) {
980 os << "Failed to open " << image_file_name << " for reading";
981 return false;
982 }
983 std::vector<uint8_t> tmp_remote_contents(boot_map_size_);
984 if (!image_map_file->PreadFully(&tmp_remote_contents[0], boot_map_size_, boot_map_.start)) {
985 os << "Could not fully read file " << image_file_name;
986 return false;
987 }
988
989 // If zygote_diff_pid_ != -1, open /proc/<zygote_diff_pid_>/mem and read as zygote_contents_.
990 std::vector<uint8_t> tmp_zygote_contents;
991 if (zygote_diff_pid_ != -1) {
992 std::string zygote_file_name =
993 StringPrintf("/proc/%ld/mem", static_cast<long>(zygote_diff_pid_)); // NOLINT [runtime/int]
994 std::unique_ptr<File> zygote_map_file(OS::OpenFileForReading(zygote_file_name.c_str()));
995 if (zygote_map_file == nullptr) {
996 os << "Failed to open " << zygote_file_name << " for reading";
997 return false;
998 }
999 // The boot map should be at the same address.
Mathieu Chartier51e79652017-07-24 15:43:38 -07001000 tmp_zygote_contents.resize(boot_map_size_);
David Sehr50005a02017-06-21 13:24:21 -07001001 if (!zygote_map_file->PreadFully(&tmp_zygote_contents[0], boot_map_size_, boot_map_.start)) {
1002 LOG(WARNING) << "Could not fully read zygote file " << zygote_file_name;
1003 return false;
1004 }
1005 }
1006
1007 // Open /proc/<image_diff_pid_>/pagemap.
1008 std::string pagemap_file_name = StringPrintf(
1009 "/proc/%ld/pagemap", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
1010 auto tmp_pagemap_file =
1011 std::unique_ptr<File>(OS::OpenFileForReading(pagemap_file_name.c_str()));
1012 if (tmp_pagemap_file == nullptr) {
1013 os << "Failed to open " << pagemap_file_name << " for reading: " << strerror(errno);
1014 return false;
1015 }
1016
1017 // Not truly clean, mmap-ing boot.art again would be more pristine, but close enough
1018 const char* clean_pagemap_file_name = "/proc/self/pagemap";
1019 auto tmp_clean_pagemap_file = std::unique_ptr<File>(
1020 OS::OpenFileForReading(clean_pagemap_file_name));
1021 if (tmp_clean_pagemap_file == nullptr) {
1022 os << "Failed to open " << clean_pagemap_file_name << " for reading: " << strerror(errno);
1023 return false;
1024 }
1025
1026 auto tmp_kpageflags_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpageflags"));
1027 if (tmp_kpageflags_file == nullptr) {
1028 os << "Failed to open /proc/kpageflags for reading: " << strerror(errno);
1029 return false;
1030 }
1031
1032 auto tmp_kpagecount_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpagecount"));
1033 if (tmp_kpagecount_file == nullptr) {
1034 os << "Failed to open /proc/kpagecount for reading:" << strerror(errno);
1035 return false;
1036 }
1037
David Sehrb4005f02017-06-20 19:11:40 -07001038 // Commit the mappings, etc.
David Sehr50005a02017-06-21 13:24:21 -07001039 proc_maps_ = std::move(tmp_proc_maps);
1040 remote_contents_ = std::move(tmp_remote_contents);
1041 zygote_contents_ = std::move(tmp_zygote_contents);
1042 pagemap_file_ = std::move(*tmp_pagemap_file.release());
1043 clean_pagemap_file_ = std::move(*tmp_clean_pagemap_file.release());
1044 kpageflags_file_ = std::move(*tmp_kpageflags_file.release());
1045 kpagecount_file_ = std::move(*tmp_kpagecount_file.release());
1046
1047 return true;
1048 }
1049
1050 bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
1051 std::ostream& os = *os_;
1052 os << "IMAGE LOCATION: " << image_location_ << "\n\n";
1053
1054 os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
1055
1056 os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
1057
1058 PrintPidLine("IMAGE", image_diff_pid_);
1059 os << "\n\n";
1060 PrintPidLine("ZYGOTE", zygote_diff_pid_);
1061 bool ret = true;
1062 if (image_diff_pid_ >= 0 || zygote_diff_pid_ >= 0) {
1063 ret = DumpImageDiff();
1064 os << "\n\n";
1065 }
1066
1067 os << std::flush;
1068
1069 return ret;
1070 }
1071
1072 private:
1073 bool DumpImageDiff()
1074 REQUIRES_SHARED(Locks::mutator_lock_) {
1075 return DumpImageDiffMap();
1076 }
1077
David Sehrb4005f02017-06-20 19:11:40 -07001078 bool ComputeDirtyBytes(const uint8_t* image_begin, MappingData* mapping_data /*out*/) {
David Sehr50005a02017-06-21 13:24:21 -07001079 std::ostream& os = *os_;
1080
1081 size_t virtual_page_idx = 0; // Virtual page number (for an absolute memory address)
1082 size_t page_idx = 0; // Page index relative to 0
1083 size_t previous_page_idx = 0; // Previous page index relative to 0
1084
1085
1086 // Iterate through one page at a time. Boot map begin/end already implicitly aligned.
1087 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; begin += kPageSize) {
1088 ptrdiff_t offset = begin - boot_map_.start;
1089
1090 // We treat the image header as part of the memory map for now
1091 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1092 // But it might still be interesting to see if any of the ImageHeader data mutated
1093 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1094 uint8_t* remote_ptr = &remote_contents_[offset];
1095
1096 if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) {
David Sehrb4005f02017-06-20 19:11:40 -07001097 mapping_data->different_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001098
1099 // Count the number of 32-bit integers that are different.
1100 for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) {
1101 uint32_t* remote_ptr_int32 = reinterpret_cast<uint32_t*>(remote_ptr);
1102 const uint32_t* local_ptr_int32 = reinterpret_cast<const uint32_t*>(local_ptr);
1103
1104 if (remote_ptr_int32[i] != local_ptr_int32[i]) {
David Sehrb4005f02017-06-20 19:11:40 -07001105 mapping_data->different_int32s++;
David Sehr50005a02017-06-21 13:24:21 -07001106 }
1107 }
1108 }
1109 }
1110
Mathieu Chartier728f8502017-07-28 17:35:30 -07001111 std::vector<size_t> private_dirty_pages_for_section(ImageHeader::kSectionCount, 0u);
1112
David Sehr50005a02017-06-21 13:24:21 -07001113 // Iterate through one byte at a time.
1114 ptrdiff_t page_off_begin = image_header_.GetImageBegin() - image_begin;
1115 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; ++begin) {
1116 previous_page_idx = page_idx;
1117 ptrdiff_t offset = begin - boot_map_.start;
1118
1119 // We treat the image header as part of the memory map for now
1120 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1121 // But it might still be interesting to see if any of the ImageHeader data mutated
1122 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1123 uint8_t* remote_ptr = &remote_contents_[offset];
1124
1125 virtual_page_idx = reinterpret_cast<uintptr_t>(local_ptr) / kPageSize;
1126
1127 // Calculate the page index, relative to the 0th page where the image begins
1128 page_idx = (offset + page_off_begin) / kPageSize;
1129 if (*local_ptr != *remote_ptr) {
1130 // Track number of bytes that are different
David Sehrb4005f02017-06-20 19:11:40 -07001131 mapping_data->different_bytes++;
David Sehr50005a02017-06-21 13:24:21 -07001132 }
1133
1134 // Independently count the # of dirty pages on the remote side
1135 size_t remote_virtual_page_idx = begin / kPageSize;
1136 if (previous_page_idx != page_idx) {
1137 uint64_t page_count = 0xC0FFEE;
1138 // TODO: virtual_page_idx needs to be from the same process
1139 std::string error_msg;
1140 int dirtiness = (IsPageDirty(&pagemap_file_, // Image-diff-pid procmap
1141 &clean_pagemap_file_, // Self procmap
1142 &kpageflags_file_,
1143 &kpagecount_file_,
1144 remote_virtual_page_idx, // potentially "dirty" page
1145 virtual_page_idx, // true "clean" page
1146 &page_count,
1147 &error_msg));
1148 if (dirtiness < 0) {
1149 os << error_msg;
1150 return false;
1151 } else if (dirtiness > 0) {
David Sehrb4005f02017-06-20 19:11:40 -07001152 mapping_data->dirty_pages++;
1153 mapping_data->dirty_page_set.insert(mapping_data->dirty_page_set.end(), virtual_page_idx);
David Sehr50005a02017-06-21 13:24:21 -07001154 }
1155
1156 bool is_dirty = dirtiness > 0;
1157 bool is_private = page_count == 1;
1158
1159 if (page_count == 1) {
David Sehrb4005f02017-06-20 19:11:40 -07001160 mapping_data->private_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001161 }
1162
1163 if (is_dirty && is_private) {
David Sehrb4005f02017-06-20 19:11:40 -07001164 mapping_data->private_dirty_pages++;
Mathieu Chartier728f8502017-07-28 17:35:30 -07001165 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
1166 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i);
1167 if (image_header_.GetImageSection(section).Contains(offset)) {
1168 ++private_dirty_pages_for_section[i];
1169 }
1170 }
David Sehr50005a02017-06-21 13:24:21 -07001171 }
1172 }
1173 }
David Sehrb4005f02017-06-20 19:11:40 -07001174 mapping_data->false_dirty_pages = mapping_data->dirty_pages - mapping_data->different_pages;
1175 // Print low-level (bytes, int32s, pages) statistics.
1176 os << mapping_data->different_bytes << " differing bytes,\n "
1177 << mapping_data->different_int32s << " differing int32s,\n "
1178 << mapping_data->different_pages << " differing pages,\n "
1179 << mapping_data->dirty_pages << " pages are dirty;\n "
1180 << mapping_data->false_dirty_pages << " pages are false dirty;\n "
1181 << mapping_data->private_pages << " pages are private;\n "
Mathieu Chartier728f8502017-07-28 17:35:30 -07001182 << mapping_data->private_dirty_pages << " pages are Private_Dirty\n "
1183 << "\n";
1184
1185 size_t total_private_dirty_pages = std::accumulate(private_dirty_pages_for_section.begin(),
1186 private_dirty_pages_for_section.end(),
1187 0u);
1188 os << "Image sections (total private dirty pages " << total_private_dirty_pages << ")\n";
1189 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
1190 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i);
1191 os << section << " " << image_header_.GetImageSection(section)
1192 << " private dirty pages=" << private_dirty_pages_for_section[i] << "\n";
1193 }
1194 os << "\n";
David Sehrb4005f02017-06-20 19:11:40 -07001195
David Sehr50005a02017-06-21 13:24:21 -07001196 return true;
1197 }
1198
David Sehr50005a02017-06-21 13:24:21 -07001199 // Look at /proc/$pid/mem and only diff the things from there
1200 bool DumpImageDiffMap()
David Sehrb4005f02017-06-20 19:11:40 -07001201 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehr50005a02017-06-21 13:24:21 -07001202 std::ostream& os = *os_;
Igor Murashkin37743352014-11-13 14:38:00 -08001203 std::string error_msg;
1204
1205 // Walk the bytes and diff against our boot image
Igor Murashkin37743352014-11-13 14:38:00 -08001206 os << "\nObserving boot image header at address "
David Sehr50005a02017-06-21 13:24:21 -07001207 << reinterpret_cast<const void*>(&image_header_)
Igor Murashkin37743352014-11-13 14:38:00 -08001208 << "\n\n";
1209
David Sehr50005a02017-06-21 13:24:21 -07001210 const uint8_t* image_begin_unaligned = image_header_.GetImageBegin();
Mathieu Chartierc7853442015-03-27 14:35:38 -07001211 const uint8_t* image_mirror_end_unaligned = image_begin_unaligned +
David Sehr50005a02017-06-21 13:24:21 -07001212 image_header_.GetImageSection(ImageHeader::kSectionObjects).Size();
1213 const uint8_t* image_end_unaligned = image_begin_unaligned + image_header_.GetImageSize();
Igor Murashkin37743352014-11-13 14:38:00 -08001214
1215 // Adjust range to nearest page
1216 const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize);
1217 const uint8_t* image_end = AlignUp(image_end_unaligned, kPageSize);
1218
David Sehr50005a02017-06-21 13:24:21 -07001219 if (reinterpret_cast<uintptr_t>(image_begin) > boot_map_.start ||
1220 reinterpret_cast<uintptr_t>(image_end) < boot_map_.end) {
Igor Murashkin37743352014-11-13 14:38:00 -08001221 // Sanity check that we aren't trying to read a completely different boot image
1222 os << "Remote boot map is out of range of local boot map: " <<
1223 "local begin " << reinterpret_cast<const void*>(image_begin) <<
1224 ", local end " << reinterpret_cast<const void*>(image_end) <<
David Sehr50005a02017-06-21 13:24:21 -07001225 ", remote begin " << reinterpret_cast<const void*>(boot_map_.start) <<
1226 ", remote end " << reinterpret_cast<const void*>(boot_map_.end);
Igor Murashkin37743352014-11-13 14:38:00 -08001227 return false;
1228 // If we wanted even more validation we could map the ImageHeader from the file
1229 }
1230
David Sehrb4005f02017-06-20 19:11:40 -07001231 MappingData mapping_data;
David Sehr45de57f2017-06-21 05:03:22 +00001232
David Sehrb4005f02017-06-20 19:11:40 -07001233 os << "Mapping at [" << reinterpret_cast<void*>(boot_map_.start) << ", "
1234 << reinterpret_cast<void*>(boot_map_.end) << ") had:\n ";
1235 if (!ComputeDirtyBytes(image_begin, &mapping_data)) {
David Sehr50005a02017-06-21 13:24:21 -07001236 return false;
Igor Murashkin37743352014-11-13 14:38:00 -08001237 }
1238
David Sehrb4005f02017-06-20 19:11:40 -07001239 RegionData<mirror::Object> object_region_data(os_,
1240 &remote_contents_,
1241 &zygote_contents_,
1242 boot_map_,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001243 image_header_,
1244 dump_dirty_objects_);
Andreas Gampe7ad71d02016-04-04 13:49:18 -07001245
David Sehrb4005f02017-06-20 19:11:40 -07001246 RemoteProcesses remotes;
David Sehr20e271a2017-06-14 13:02:14 -07001247 if (zygote_pid_only_) {
David Sehrb4005f02017-06-20 19:11:40 -07001248 remotes = RemoteProcesses::kZygoteOnly;
1249 } else if (zygote_diff_pid_ > 0) {
1250 remotes = RemoteProcesses::kImageAndZygote;
David Sehr20e271a2017-06-14 13:02:14 -07001251 } else {
David Sehrb4005f02017-06-20 19:11:40 -07001252 remotes = RemoteProcesses::kImageOnly;
Mathieu Chartiercb044bc2016-04-01 13:56:41 -07001253 }
1254
David Sehrb4005f02017-06-20 19:11:40 -07001255 object_region_data.ProcessRegion(mapping_data,
1256 remotes,
1257 image_begin_unaligned,
1258 image_mirror_end_unaligned);
Igor Murashkin37743352014-11-13 14:38:00 -08001259
1260 return true;
1261 }
1262
Igor Murashkin37743352014-11-13 14:38:00 -08001263 static bool GetPageFrameNumber(File* page_map_file,
1264 size_t virtual_page_index,
1265 uint64_t* page_frame_number,
1266 std::string* error_msg) {
1267 CHECK(page_map_file != nullptr);
1268 CHECK(page_frame_number != nullptr);
1269 CHECK(error_msg != nullptr);
1270
1271 constexpr size_t kPageMapEntrySize = sizeof(uint64_t);
1272 constexpr uint64_t kPageFrameNumberMask = (1ULL << 55) - 1; // bits 0-54 [in /proc/$pid/pagemap]
1273 constexpr uint64_t kPageSoftDirtyMask = (1ULL << 55); // bit 55 [in /proc/$pid/pagemap]
1274
1275 uint64_t page_map_entry = 0;
1276
1277 // Read 64-bit entry from /proc/$pid/pagemap to get the physical page frame number
1278 if (!page_map_file->PreadFully(&page_map_entry, kPageMapEntrySize,
1279 virtual_page_index * kPageMapEntrySize)) {
1280 *error_msg = StringPrintf("Failed to read the virtual page index entry from %s",
1281 page_map_file->GetPath().c_str());
1282 return false;
1283 }
1284
1285 // TODO: seems useless, remove this.
1286 bool soft_dirty = (page_map_entry & kPageSoftDirtyMask) != 0;
1287 if ((false)) {
1288 LOG(VERBOSE) << soft_dirty; // Suppress unused warning
1289 UNREACHABLE();
1290 }
1291
1292 *page_frame_number = page_map_entry & kPageFrameNumberMask;
1293
1294 return true;
1295 }
1296
1297 static int IsPageDirty(File* page_map_file,
David Sehr50005a02017-06-21 13:24:21 -07001298 File* clean_pagemap_file,
1299 File* kpageflags_file,
1300 File* kpagecount_file,
Igor Murashkin37743352014-11-13 14:38:00 -08001301 size_t virtual_page_idx,
1302 size_t clean_virtual_page_idx,
1303 // Out parameters:
1304 uint64_t* page_count, std::string* error_msg) {
1305 CHECK(page_map_file != nullptr);
David Sehr50005a02017-06-21 13:24:21 -07001306 CHECK(clean_pagemap_file != nullptr);
1307 CHECK_NE(page_map_file, clean_pagemap_file);
1308 CHECK(kpageflags_file != nullptr);
1309 CHECK(kpagecount_file != nullptr);
Igor Murashkin37743352014-11-13 14:38:00 -08001310 CHECK(page_count != nullptr);
1311 CHECK(error_msg != nullptr);
1312
1313 // Constants are from https://www.kernel.org/doc/Documentation/vm/pagemap.txt
1314
1315 constexpr size_t kPageFlagsEntrySize = sizeof(uint64_t);
1316 constexpr size_t kPageCountEntrySize = sizeof(uint64_t);
1317 constexpr uint64_t kPageFlagsDirtyMask = (1ULL << 4); // in /proc/kpageflags
1318 constexpr uint64_t kPageFlagsNoPageMask = (1ULL << 20); // in /proc/kpageflags
1319 constexpr uint64_t kPageFlagsMmapMask = (1ULL << 11); // in /proc/kpageflags
1320
1321 uint64_t page_frame_number = 0;
1322 if (!GetPageFrameNumber(page_map_file, virtual_page_idx, &page_frame_number, error_msg)) {
1323 return -1;
1324 }
1325
1326 uint64_t page_frame_number_clean = 0;
David Sehr50005a02017-06-21 13:24:21 -07001327 if (!GetPageFrameNumber(clean_pagemap_file, clean_virtual_page_idx, &page_frame_number_clean,
Igor Murashkin37743352014-11-13 14:38:00 -08001328 error_msg)) {
1329 return -1;
1330 }
1331
1332 // Read 64-bit entry from /proc/kpageflags to get the dirty bit for a page
1333 uint64_t kpage_flags_entry = 0;
David Sehr50005a02017-06-21 13:24:21 -07001334 if (!kpageflags_file->PreadFully(&kpage_flags_entry,
Igor Murashkin37743352014-11-13 14:38:00 -08001335 kPageFlagsEntrySize,
1336 page_frame_number * kPageFlagsEntrySize)) {
1337 *error_msg = StringPrintf("Failed to read the page flags from %s",
David Sehr50005a02017-06-21 13:24:21 -07001338 kpageflags_file->GetPath().c_str());
Igor Murashkin37743352014-11-13 14:38:00 -08001339 return -1;
1340 }
1341
1342 // Read 64-bit entyry from /proc/kpagecount to get mapping counts for a page
David Sehr50005a02017-06-21 13:24:21 -07001343 if (!kpagecount_file->PreadFully(page_count /*out*/,
Igor Murashkin37743352014-11-13 14:38:00 -08001344 kPageCountEntrySize,
1345 page_frame_number * kPageCountEntrySize)) {
1346 *error_msg = StringPrintf("Failed to read the page count from %s",
David Sehr50005a02017-06-21 13:24:21 -07001347 kpagecount_file->GetPath().c_str());
Igor Murashkin37743352014-11-13 14:38:00 -08001348 return -1;
1349 }
1350
1351 // There must be a page frame at the requested address.
1352 CHECK_EQ(kpage_flags_entry & kPageFlagsNoPageMask, 0u);
1353 // The page frame must be memory mapped
1354 CHECK_NE(kpage_flags_entry & kPageFlagsMmapMask, 0u);
1355
1356 // Page is dirty, i.e. has diverged from file, if the 4th bit is set to 1
1357 bool flags_dirty = (kpage_flags_entry & kPageFlagsDirtyMask) != 0;
1358
1359 // page_frame_number_clean must come from the *same* process
1360 // but a *different* mmap than page_frame_number
1361 if (flags_dirty) {
1362 CHECK_NE(page_frame_number, page_frame_number_clean);
1363 }
1364
1365 return page_frame_number != page_frame_number_clean;
1366 }
1367
David Sehr50005a02017-06-21 13:24:21 -07001368 void PrintPidLine(const std::string& kind, pid_t pid) {
1369 if (pid < 0) {
1370 *os_ << kind << " DIFF PID: disabled\n\n";
1371 } else {
1372 *os_ << kind << " DIFF PID (" << pid << "): ";
1373 }
1374 }
1375
1376 static bool EndsWith(const std::string& str, const std::string& suffix) {
1377 return str.size() >= suffix.size() &&
1378 str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
1379 }
1380
1381 // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar)
1382 static std::string BaseName(const std::string& str) {
1383 size_t idx = str.rfind('/');
1384 if (idx == std::string::npos) {
1385 return str;
1386 }
1387
1388 return str.substr(idx + 1);
1389 }
1390
Igor Murashkin37743352014-11-13 14:38:00 -08001391 // Return the image location, stripped of any directories, e.g. "boot.art" or "core.art"
1392 std::string GetImageLocationBaseName() const {
1393 return BaseName(std::string(image_location_));
1394 }
1395
1396 std::ostream* os_;
1397 const ImageHeader& image_header_;
Andreas Gampe8994a042015-12-30 19:03:17 +00001398 const std::string image_location_;
Igor Murashkin37743352014-11-13 14:38:00 -08001399 pid_t image_diff_pid_; // Dump image diff against boot.art if pid is non-negative
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001400 pid_t zygote_diff_pid_; // Dump image diff against zygote boot.art if pid is non-negative
Jeff Haoc23b0c02017-07-27 18:19:38 -07001401 bool dump_dirty_objects_; // Adds dumping of objects that are dirty.
David Sehr20e271a2017-06-14 13:02:14 -07001402 bool zygote_pid_only_; // The user only specified a pid for the zygote.
Igor Murashkin37743352014-11-13 14:38:00 -08001403
David Sehr50005a02017-06-21 13:24:21 -07001404 // BacktraceMap used for finding the memory mapping of the image file.
1405 std::unique_ptr<BacktraceMap> proc_maps_;
1406 // Boot image mapping.
1407 backtrace_map_t boot_map_{}; // NOLINT
1408 // The size of the boot image mapping.
1409 size_t boot_map_size_;
1410 // The contents of /proc/<image_diff_pid_>/maps.
1411 std::vector<uint8_t> remote_contents_;
1412 // The contents of /proc/<zygote_diff_pid_>/maps.
1413 std::vector<uint8_t> zygote_contents_;
1414 // A File for reading /proc/<zygote_diff_pid_>/maps.
1415 File pagemap_file_;
1416 // A File for reading /proc/self/pagemap.
1417 File clean_pagemap_file_;
1418 // A File for reading /proc/kpageflags.
1419 File kpageflags_file_;
1420 // A File for reading /proc/kpagecount.
1421 File kpagecount_file_;
1422
Igor Murashkin37743352014-11-13 14:38:00 -08001423 DISALLOW_COPY_AND_ASSIGN(ImgDiagDumper);
1424};
1425
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001426static int DumpImage(Runtime* runtime,
1427 std::ostream* os,
1428 pid_t image_diff_pid,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001429 pid_t zygote_diff_pid,
1430 bool dump_dirty_objects) {
Igor Murashkin37743352014-11-13 14:38:00 -08001431 ScopedObjectAccess soa(Thread::Current());
1432 gc::Heap* heap = runtime->GetHeap();
Jeff Haodcdc85b2015-12-04 14:06:18 -08001433 std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
1434 CHECK(!image_spaces.empty());
1435 for (gc::space::ImageSpace* image_space : image_spaces) {
1436 const ImageHeader& image_header = image_space->GetImageHeader();
1437 if (!image_header.IsValid()) {
1438 fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
1439 return EXIT_FAILURE;
1440 }
1441
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001442 ImgDiagDumper img_diag_dumper(os,
1443 image_header,
1444 image_space->GetImageLocation(),
1445 image_diff_pid,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001446 zygote_diff_pid,
1447 dump_dirty_objects);
David Sehr50005a02017-06-21 13:24:21 -07001448 if (!img_diag_dumper.Init()) {
1449 return EXIT_FAILURE;
1450 }
Jeff Haodcdc85b2015-12-04 14:06:18 -08001451 if (!img_diag_dumper.Dump()) {
1452 return EXIT_FAILURE;
1453 }
Igor Murashkin37743352014-11-13 14:38:00 -08001454 }
Jeff Haodcdc85b2015-12-04 14:06:18 -08001455 return EXIT_SUCCESS;
Igor Murashkin37743352014-11-13 14:38:00 -08001456}
1457
1458struct ImgDiagArgs : public CmdlineArgs {
1459 protected:
1460 using Base = CmdlineArgs;
1461
1462 virtual ParseStatus ParseCustom(const StringPiece& option,
1463 std::string* error_msg) OVERRIDE {
1464 {
1465 ParseStatus base_parse = Base::ParseCustom(option, error_msg);
1466 if (base_parse != kParseUnknownArgument) {
1467 return base_parse;
1468 }
1469 }
1470
1471 if (option.starts_with("--image-diff-pid=")) {
1472 const char* image_diff_pid = option.substr(strlen("--image-diff-pid=")).data();
1473
1474 if (!ParseInt(image_diff_pid, &image_diff_pid_)) {
1475 *error_msg = "Image diff pid out of range";
1476 return kParseError;
1477 }
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001478 } else if (option.starts_with("--zygote-diff-pid=")) {
1479 const char* zygote_diff_pid = option.substr(strlen("--zygote-diff-pid=")).data();
1480
1481 if (!ParseInt(zygote_diff_pid, &zygote_diff_pid_)) {
1482 *error_msg = "Zygote diff pid out of range";
1483 return kParseError;
1484 }
Jeff Haoc23b0c02017-07-27 18:19:38 -07001485 } else if (option == "--dump-dirty-objects") {
1486 dump_dirty_objects_ = true;
Igor Murashkin37743352014-11-13 14:38:00 -08001487 } else {
1488 return kParseUnknownArgument;
1489 }
1490
1491 return kParseOk;
1492 }
1493
1494 virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
1495 // Perform the parent checks.
1496 ParseStatus parent_checks = Base::ParseChecks(error_msg);
1497 if (parent_checks != kParseOk) {
1498 return parent_checks;
1499 }
1500
1501 // Perform our own checks.
1502
1503 if (kill(image_diff_pid_,
1504 /*sig*/0) != 0) { // No signal is sent, perform error-checking only.
1505 // Check if the pid exists before proceeding.
1506 if (errno == ESRCH) {
1507 *error_msg = "Process specified does not exist";
1508 } else {
1509 *error_msg = StringPrintf("Failed to check process status: %s", strerror(errno));
1510 }
1511 return kParseError;
1512 } else if (instruction_set_ != kRuntimeISA) {
1513 // Don't allow different ISAs since the images are ISA-specific.
1514 // Right now the code assumes both the runtime ISA and the remote ISA are identical.
1515 *error_msg = "Must use the default runtime ISA; changing ISA is not supported.";
1516 return kParseError;
1517 }
1518
1519 return kParseOk;
1520 }
1521
1522 virtual std::string GetUsage() const {
1523 std::string usage;
1524
1525 usage +=
1526 "Usage: imgdiag [options] ...\n"
1527 " Example: imgdiag --image-diff-pid=$(pidof dex2oat)\n"
1528 " Example: adb shell imgdiag --image-diff-pid=$(pid zygote)\n"
1529 "\n";
1530
1531 usage += Base::GetUsage();
1532
1533 usage += // Optional.
1534 " --image-diff-pid=<pid>: provide the PID of a process whose boot.art you want to diff.\n"
1535 " Example: --image-diff-pid=$(pid zygote)\n"
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001536 " --zygote-diff-pid=<pid>: provide the PID of the zygote whose boot.art you want to diff "
1537 "against.\n"
1538 " Example: --zygote-diff-pid=$(pid zygote)\n"
Jeff Haoc23b0c02017-07-27 18:19:38 -07001539 " --dump-dirty-objects: additionally output dirty objects of interest.\n"
Igor Murashkin37743352014-11-13 14:38:00 -08001540 "\n";
1541
1542 return usage;
1543 }
1544
1545 public:
1546 pid_t image_diff_pid_ = -1;
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001547 pid_t zygote_diff_pid_ = -1;
Jeff Haoc23b0c02017-07-27 18:19:38 -07001548 bool dump_dirty_objects_ = false;
Igor Murashkin37743352014-11-13 14:38:00 -08001549};
1550
1551struct ImgDiagMain : public CmdlineMain<ImgDiagArgs> {
1552 virtual bool ExecuteWithRuntime(Runtime* runtime) {
1553 CHECK(args_ != nullptr);
1554
1555 return DumpImage(runtime,
Igor Murashkin37743352014-11-13 14:38:00 -08001556 args_->os_,
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001557 args_->image_diff_pid_,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001558 args_->zygote_diff_pid_,
1559 args_->dump_dirty_objects_) == EXIT_SUCCESS;
Igor Murashkin37743352014-11-13 14:38:00 -08001560 }
1561};
1562
1563} // namespace art
1564
1565int main(int argc, char** argv) {
1566 art::ImgDiagMain main;
1567 return main.Main(argc, argv);
1568}