blob: 7ef24c700eb8074e697accb370260eec5d90f92c [file] [log] [blame]
Igor Murashkin37743352014-11-13 14:38:00 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <stdio.h>
18#include <stdlib.h>
19
20#include <fstream>
Andreas Gampe7ad71d02016-04-04 13:49:18 -070021#include <functional>
Igor Murashkin37743352014-11-13 14:38:00 -080022#include <iostream>
Igor Murashkin37743352014-11-13 14:38:00 -080023#include <map>
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070024#include <set>
25#include <string>
Mathieu Chartiercb044bc2016-04-01 13:56:41 -070026#include <unordered_set>
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070027#include <vector>
Igor Murashkin37743352014-11-13 14:38:00 -080028
Andreas Gampe46ee31b2016-12-14 10:11:49 -080029#include "android-base/stringprintf.h"
30
Andreas Gampea1d2f952017-04-20 22:53:58 -070031#include "art_field-inl.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070032#include "art_method-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080033#include "base/unix_file/fd_file.h"
Igor Murashkin37743352014-11-13 14:38:00 -080034#include "gc/heap.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070035#include "gc/space/image_space.h"
36#include "image.h"
Igor Murashkin37743352014-11-13 14:38:00 -080037#include "mirror/class-inl.h"
38#include "mirror/object-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080039#include "os.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070040#include "scoped_thread_state_change-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080041
Igor Murashkin37743352014-11-13 14:38:00 -080042#include "backtrace/BacktraceMap.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070043#include "cmdline.h"
Igor Murashkin37743352014-11-13 14:38:00 -080044
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070045#include <signal.h>
Igor Murashkin37743352014-11-13 14:38:00 -080046#include <sys/stat.h>
47#include <sys/types.h>
Igor Murashkin37743352014-11-13 14:38:00 -080048
49namespace art {
50
Andreas Gampe46ee31b2016-12-14 10:11:49 -080051using android::base::StringPrintf;
52
David Sehrb4005f02017-06-20 19:11:40 -070053namespace {
54
55constexpr size_t kMaxAddressPrint = 5;
56
57enum class ProcessType {
58 kZygote,
59 kRemote
60};
61
62enum class RemoteProcesses {
63 kImageOnly,
64 kZygoteOnly,
65 kImageAndZygote
66};
67
68struct MappingData {
69 // The count of pages that are considered dirty by the OS.
70 size_t dirty_pages = 0;
71 // The count of pages that differ by at least one byte.
72 size_t different_pages = 0;
73 // The count of differing bytes.
74 size_t different_bytes = 0;
75 // The count of differing four-byte units.
76 size_t different_int32s = 0;
77 // The count of pages that have mapping count == 1.
78 size_t private_pages = 0;
79 // The count of private pages that are also dirty.
80 size_t private_dirty_pages = 0;
81 // The count of pages that are marked dirty but do not differ.
82 size_t false_dirty_pages = 0;
83 // Set of the local virtual page indices that are dirty.
84 std::set<size_t> dirty_page_set;
85};
86
87static std::string GetClassDescriptor(mirror::Class* klass)
88 REQUIRES_SHARED(Locks::mutator_lock_) {
89 CHECK(klass != nullptr);
90
91 std::string descriptor;
92 const char* descriptor_str = klass->GetDescriptor(&descriptor /*out*/);
93
94 return std::string(descriptor_str);
95}
96
97static std::string PrettyFieldValue(ArtField* field, mirror::Object* object)
98 REQUIRES_SHARED(Locks::mutator_lock_) {
99 std::ostringstream oss;
100 switch (field->GetTypeAsPrimitiveType()) {
101 case Primitive::kPrimNot: {
102 oss << object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
103 field->GetOffset());
104 break;
105 }
106 case Primitive::kPrimBoolean: {
107 oss << static_cast<bool>(object->GetFieldBoolean<kVerifyNone>(field->GetOffset()));
108 break;
109 }
110 case Primitive::kPrimByte: {
111 oss << static_cast<int32_t>(object->GetFieldByte<kVerifyNone>(field->GetOffset()));
112 break;
113 }
114 case Primitive::kPrimChar: {
115 oss << object->GetFieldChar<kVerifyNone>(field->GetOffset());
116 break;
117 }
118 case Primitive::kPrimShort: {
119 oss << object->GetFieldShort<kVerifyNone>(field->GetOffset());
120 break;
121 }
122 case Primitive::kPrimInt: {
123 oss << object->GetField32<kVerifyNone>(field->GetOffset());
124 break;
125 }
126 case Primitive::kPrimLong: {
127 oss << object->GetField64<kVerifyNone>(field->GetOffset());
128 break;
129 }
130 case Primitive::kPrimFloat: {
131 oss << object->GetField32<kVerifyNone>(field->GetOffset());
132 break;
133 }
134 case Primitive::kPrimDouble: {
135 oss << object->GetField64<kVerifyNone>(field->GetOffset());
136 break;
137 }
138 case Primitive::kPrimVoid: {
139 oss << "void";
140 break;
141 }
142 }
143 return oss.str();
144}
145
146template <typename K, typename V, typename D>
147static std::vector<std::pair<V, K>> SortByValueDesc(
148 const std::map<K, D> map,
149 std::function<V(const D&)> value_mapper = [](const D& d) { return static_cast<V>(d); }) {
150 // Store value->key so that we can use the default sort from pair which
151 // sorts by value first and then key
152 std::vector<std::pair<V, K>> value_key_vector;
153
154 for (const auto& kv_pair : map) {
155 value_key_vector.push_back(std::make_pair(value_mapper(kv_pair.second), kv_pair.first));
156 }
157
158 // Sort in reverse (descending order)
159 std::sort(value_key_vector.rbegin(), value_key_vector.rend());
160 return value_key_vector;
161}
162
163// Fixup a remote pointer that we read from a foreign boot.art to point to our own memory.
164// Returned pointer will point to inside of remote_contents.
165template <typename T>
166static T* FixUpRemotePointer(T* remote_ptr,
167 std::vector<uint8_t>& remote_contents,
168 const backtrace_map_t& boot_map) {
169 if (remote_ptr == nullptr) {
170 return nullptr;
171 }
172
173 uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr);
174
175 CHECK_LE(boot_map.start, remote);
176 CHECK_GT(boot_map.end, remote);
177
178 off_t boot_offset = remote - boot_map.start;
179
180 return reinterpret_cast<T*>(&remote_contents[boot_offset]);
181}
182
183template <typename T>
184static T* RemoteContentsPointerToLocal(T* remote_ptr,
185 std::vector<uint8_t>& remote_contents,
186 const ImageHeader& image_header) {
187 if (remote_ptr == nullptr) {
188 return nullptr;
189 }
190
191 uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr);
192 ptrdiff_t boot_offset = remote - &remote_contents[0];
193
194 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + boot_offset;
195
196 return reinterpret_cast<T*>(const_cast<uint8_t*>(local_ptr));
197}
198
199template <typename T> size_t EntrySize(T* entry);
200template<> size_t EntrySize(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
201 return object->SizeOf();
202}
203template<> size_t EntrySize(ArtMethod* art_method) REQUIRES_SHARED(Locks::mutator_lock_) {
204 return sizeof(*art_method);
205}
206
207template <typename T>
208static bool EntriesDiffer(T* entry1, T* entry2) REQUIRES_SHARED(Locks::mutator_lock_) {
209 return memcmp(entry1, entry2, EntrySize(entry1)) != 0;
210}
211
212template <typename T>
213struct RegionCommon {
214 public:
215 RegionCommon(std::ostream* os,
216 std::vector<uint8_t>* remote_contents,
217 std::vector<uint8_t>* zygote_contents,
218 const backtrace_map_t& boot_map,
219 const ImageHeader& image_header) :
220 os_(*os),
221 remote_contents_(remote_contents),
222 zygote_contents_(zygote_contents),
223 boot_map_(boot_map),
224 image_header_(image_header),
225 different_entries_(0),
226 dirty_entry_bytes_(0),
227 false_dirty_entry_bytes_(0) {
228 CHECK(remote_contents != nullptr);
229 CHECK(zygote_contents != nullptr);
230 }
231
232 void DumpSamplesAndOffsetCount() {
233 os_ << " sample object addresses: ";
234 for (size_t i = 0; i < dirty_entries_.size() && i < kMaxAddressPrint; ++i) {
235 T* entry = dirty_entries_[i];
236 os_ << reinterpret_cast<void*>(entry) << ", ";
237 }
238 os_ << "\n";
239 os_ << " dirty byte +offset:count list = ";
240 std::vector<std::pair<size_t, off_t>> field_dirty_count_sorted =
241 SortByValueDesc<off_t, size_t, size_t>(field_dirty_count_);
242 for (const std::pair<size_t, off_t>& pair : field_dirty_count_sorted) {
243 off_t offset = pair.second;
244 size_t count = pair.first;
245 os_ << "+" << offset << ":" << count << ", ";
246 }
247 os_ << "\n";
248 }
249
250 size_t GetDifferentEntryCount() const { return different_entries_; }
251 size_t GetDirtyEntryBytes() const { return dirty_entry_bytes_; }
252 size_t GetFalseDirtyEntryCount() const { return false_dirty_entries_.size(); }
253 size_t GetFalseDirtyEntryBytes() const { return false_dirty_entry_bytes_; }
254 size_t GetZygoteDirtyEntryCount() const { return zygote_dirty_entries_.size(); }
255
256 protected:
257 bool IsEntryOnDirtyPage(T* entry, const std::set<size_t>& dirty_pages) const
258 REQUIRES_SHARED(Locks::mutator_lock_) {
259 size_t size = EntrySize(entry);
260 size_t page_off = 0;
261 size_t current_page_idx;
262 uintptr_t entry_address = reinterpret_cast<uintptr_t>(entry);
263 // Iterate every page this entry belongs to
264 do {
265 current_page_idx = entry_address / kPageSize + page_off;
266 if (dirty_pages.find(current_page_idx) != dirty_pages.end()) {
267 // This entry is on a dirty page
268 return true;
269 }
270 page_off++;
271 } while ((current_page_idx * kPageSize) < RoundUp(entry_address + size, kObjectAlignment));
272 return false;
273 }
274
275 void AddZygoteDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
276 zygote_dirty_entries_.insert(entry);
277 }
278
279 void AddImageDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
280 image_dirty_entries_.insert(entry);
281 }
282
283 void AddFalseDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
284 false_dirty_entries_.push_back(entry);
285 false_dirty_entry_bytes_ += EntrySize(entry);
286 }
287
288 // The output stream to write to.
289 std::ostream& os_;
290 // The byte contents of the remote (image) process' image.
291 std::vector<uint8_t>* remote_contents_;
292 // The byte contents of the zygote process' image.
293 std::vector<uint8_t>* zygote_contents_;
294 const backtrace_map_t& boot_map_;
295 const ImageHeader& image_header_;
296
297 // Count of entries that are different.
298 size_t different_entries_;
299
300 // Local entries that are dirty (differ in at least one byte).
301 size_t dirty_entry_bytes_;
302 std::vector<T*> dirty_entries_;
303
304 // Local entries that are clean, but located on dirty pages.
305 size_t false_dirty_entry_bytes_;
306 std::vector<T*> false_dirty_entries_;
307
308 // Image dirty entries
309 // If zygote_pid_only_ == true, these are shared dirty entries in the zygote.
310 // If zygote_pid_only_ == false, these are private dirty entries in the application.
311 std::set<T*> image_dirty_entries_;
312
313 // Zygote dirty entries (probably private dirty).
314 // We only add entries here if they differed in both the image and the zygote, so
315 // they are probably private dirty.
316 std::set<T*> zygote_dirty_entries_;
317
318 std::map<off_t /* field offset */, size_t /* count */> field_dirty_count_;
319
320 private:
321 DISALLOW_COPY_AND_ASSIGN(RegionCommon);
322};
323
324template <typename T>
325class RegionSpecializedBase : public RegionCommon<T> {
326};
327
328// Region analysis for mirror::Objects
329template<>
330class RegionSpecializedBase<mirror::Object> : public RegionCommon<mirror::Object> {
331 public:
332 RegionSpecializedBase(std::ostream* os,
333 std::vector<uint8_t>* remote_contents,
334 std::vector<uint8_t>* zygote_contents,
335 const backtrace_map_t& boot_map,
336 const ImageHeader& image_header) :
337 RegionCommon<mirror::Object>(os, remote_contents, zygote_contents, boot_map, image_header),
338 os_(*os) { }
339
340 void CheckEntrySanity(const uint8_t* current) const
341 REQUIRES_SHARED(Locks::mutator_lock_) {
342 CHECK_ALIGNED(current, kObjectAlignment);
343 mirror::Object* entry = reinterpret_cast<mirror::Object*>(const_cast<uint8_t*>(current));
344 // Sanity check that we are reading a real mirror::Object
345 CHECK(entry->GetClass() != nullptr) << "Image object at address "
346 << entry
347 << " has null class";
348 if (kUseBakerReadBarrier) {
349 entry->AssertReadBarrierState();
350 }
351 }
352
353 mirror::Object* GetNextEntry(mirror::Object* entry)
354 REQUIRES_SHARED(Locks::mutator_lock_) {
355 uint8_t* next =
356 reinterpret_cast<uint8_t*>(entry) + RoundUp(EntrySize(entry), kObjectAlignment);
357 return reinterpret_cast<mirror::Object*>(next);
358 }
359
360 void VisitEntry(mirror::Object* entry)
361 REQUIRES_SHARED(Locks::mutator_lock_) {
362 // Unconditionally store the class descriptor in case we need it later
363 mirror::Class* klass = entry->GetClass();
364 class_data_[klass].descriptor = GetClassDescriptor(klass);
365 }
366
367 void AddCleanEntry(mirror::Object* entry)
368 REQUIRES_SHARED(Locks::mutator_lock_) {
369 class_data_[entry->GetClass()].AddCleanObject();
370 }
371
372 void AddFalseDirtyEntry(mirror::Object* entry)
373 REQUIRES_SHARED(Locks::mutator_lock_) {
374 RegionCommon<mirror::Object>::AddFalseDirtyEntry(entry);
375 class_data_[entry->GetClass()].AddFalseDirtyObject(entry);
376 }
377
378 void AddDirtyEntry(mirror::Object* entry, mirror::Object* entry_remote)
379 REQUIRES_SHARED(Locks::mutator_lock_) {
380 size_t entry_size = EntrySize(entry);
381 ++different_entries_;
382 dirty_entry_bytes_ += entry_size;
383 // Log dirty count and objects for class objects only.
384 mirror::Class* klass = entry->GetClass();
385 if (klass->IsClassClass()) {
386 // Increment counts for the fields that are dirty
387 const uint8_t* current = reinterpret_cast<const uint8_t*>(entry);
388 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(entry_remote);
389 for (size_t i = 0; i < entry_size; ++i) {
390 if (current[i] != current_remote[i]) {
391 field_dirty_count_[i]++;
392 }
393 }
394 dirty_entries_.push_back(entry);
395 }
396 class_data_[klass].AddDirtyObject(entry, entry_remote);
397 }
398
Mathieu Chartier51e79652017-07-24 15:43:38 -0700399 void DiffEntryContents(mirror::Object* entry, uint8_t* remote_bytes, const uint8_t* base_ptr)
David Sehrb4005f02017-06-20 19:11:40 -0700400 REQUIRES_SHARED(Locks::mutator_lock_) {
401 const char* tabs = " ";
402 // Attempt to find fields for all dirty bytes.
403 mirror::Class* klass = entry->GetClass();
404 if (entry->IsClass()) {
405 os_ << tabs
406 << "Class " << mirror::Class::PrettyClass(entry->AsClass()) << " " << entry << "\n";
407 } else {
408 os_ << tabs
409 << "Instance of " << mirror::Class::PrettyClass(klass) << " " << entry << "\n";
410 }
411
412 std::unordered_set<ArtField*> dirty_instance_fields;
413 std::unordered_set<ArtField*> dirty_static_fields;
414 // Examine the bytes comprising the Object, computing which fields are dirty
415 // and recording them for later display. If the Object is an array object,
416 // compute the dirty entries.
David Sehrb4005f02017-06-20 19:11:40 -0700417 mirror::Object* remote_entry = reinterpret_cast<mirror::Object*>(remote_bytes);
418 for (size_t i = 0, count = entry->SizeOf(); i < count; ++i) {
Mathieu Chartier51e79652017-07-24 15:43:38 -0700419 if (base_ptr[i] != remote_bytes[i]) {
David Sehrb4005f02017-06-20 19:11:40 -0700420 ArtField* field = ArtField::FindInstanceFieldWithOffset</*exact*/false>(klass, i);
421 if (field != nullptr) {
422 dirty_instance_fields.insert(field);
423 } else if (entry->IsClass()) {
424 field = ArtField::FindStaticFieldWithOffset</*exact*/false>(entry->AsClass(), i);
425 if (field != nullptr) {
426 dirty_static_fields.insert(field);
427 }
428 }
429 if (field == nullptr) {
430 if (klass->IsArrayClass()) {
431 mirror::Class* component_type = klass->GetComponentType();
432 Primitive::Type primitive_type = component_type->GetPrimitiveType();
433 size_t component_size = Primitive::ComponentSize(primitive_type);
434 size_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
435 if (i >= data_offset) {
436 os_ << tabs << "Dirty array element " << (i - data_offset) / component_size << "\n";
437 // Skip to next element to prevent spam.
438 i += component_size - 1;
439 continue;
440 }
441 }
442 os_ << tabs << "No field for byte offset " << i << "\n";
443 }
444 }
445 }
446 // Dump different fields.
447 if (!dirty_instance_fields.empty()) {
448 os_ << tabs << "Dirty instance fields " << dirty_instance_fields.size() << "\n";
449 for (ArtField* field : dirty_instance_fields) {
450 os_ << tabs << ArtField::PrettyField(field)
451 << " original=" << PrettyFieldValue(field, entry)
452 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
453 }
454 }
455 if (!dirty_static_fields.empty()) {
456 os_ << tabs << "Dirty static fields " << dirty_static_fields.size() << "\n";
457 for (ArtField* field : dirty_static_fields) {
458 os_ << tabs << ArtField::PrettyField(field)
459 << " original=" << PrettyFieldValue(field, entry)
460 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
461 }
462 }
463 os_ << "\n";
464 }
465
466 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
467 // vector of pairs (size_t count, Class*)
468 auto dirty_object_class_values =
469 SortByValueDesc<mirror::Class*, size_t, ClassData>(
470 class_data_,
471 [](const ClassData& d) { return d.dirty_object_count; });
472 os_ << "\n" << " Dirty object count by class:\n";
473 for (const auto& vk_pair : dirty_object_class_values) {
474 size_t dirty_object_count = vk_pair.first;
475 mirror::Class* klass = vk_pair.second;
476 ClassData& class_data = class_data_[klass];
477 size_t object_sizes = class_data.dirty_object_size_in_bytes;
478 float avg_dirty_bytes_per_class =
479 class_data.dirty_object_byte_count * 1.0f / object_sizes;
480 float avg_object_size = object_sizes * 1.0f / dirty_object_count;
481 const std::string& descriptor = class_data.descriptor;
482 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
483 << "objects: " << dirty_object_count << ", "
484 << "avg dirty bytes: " << avg_dirty_bytes_per_class << ", "
485 << "avg object size: " << avg_object_size << ", "
486 << "class descriptor: '" << descriptor << "'"
487 << ")\n";
488 if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) {
489 DumpSamplesAndOffsetCount();
490 os_ << " field contents:\n";
491 for (mirror::Object* object : class_data.dirty_objects) {
492 // remote class object
493 auto remote_klass = reinterpret_cast<mirror::Class*>(object);
494 // local class object
495 auto local_klass =
496 RemoteContentsPointerToLocal(remote_klass,
497 *RegionCommon<mirror::Object>::remote_contents_,
498 RegionCommon<mirror::Object>::image_header_);
499 os_ << " " << reinterpret_cast<const void*>(object) << " ";
500 os_ << " class_status (remote): " << remote_klass->GetStatus() << ", ";
501 os_ << " class_status (local): " << local_klass->GetStatus();
502 os_ << "\n";
503 }
504 }
505 }
506 }
507
508 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
509 // vector of pairs (size_t count, Class*)
510 auto false_dirty_object_class_values =
511 SortByValueDesc<mirror::Class*, size_t, ClassData>(
512 class_data_,
513 [](const ClassData& d) { return d.false_dirty_object_count; });
514 os_ << "\n" << " False-dirty object count by class:\n";
515 for (const auto& vk_pair : false_dirty_object_class_values) {
516 size_t object_count = vk_pair.first;
517 mirror::Class* klass = vk_pair.second;
518 ClassData& class_data = class_data_[klass];
519 size_t object_sizes = class_data.false_dirty_byte_count;
520 float avg_object_size = object_sizes * 1.0f / object_count;
521 const std::string& descriptor = class_data.descriptor;
522 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
523 << "objects: " << object_count << ", "
524 << "avg object size: " << avg_object_size << ", "
525 << "total bytes: " << object_sizes << ", "
526 << "class descriptor: '" << descriptor << "'"
527 << ")\n";
528 }
529 }
530
531 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
532 // vector of pairs (size_t count, Class*)
533 auto clean_object_class_values =
534 SortByValueDesc<mirror::Class*, size_t, ClassData>(
535 class_data_,
536 [](const ClassData& d) { return d.clean_object_count; });
537 os_ << "\n" << " Clean object count by class:\n";
538 for (const auto& vk_pair : clean_object_class_values) {
539 os_ << " " << mirror::Class::PrettyClass(vk_pair.second) << " (" << vk_pair.first << ")\n";
540 }
541 }
542
543 private:
544 // Aggregate and detail class data from an image diff.
545 struct ClassData {
546 size_t dirty_object_count = 0;
547 // Track only the byte-per-byte dirtiness (in bytes)
548 size_t dirty_object_byte_count = 0;
549 // Track the object-by-object dirtiness (in bytes)
550 size_t dirty_object_size_in_bytes = 0;
551 size_t clean_object_count = 0;
552 std::string descriptor;
553 size_t false_dirty_byte_count = 0;
554 size_t false_dirty_object_count = 0;
555 std::vector<mirror::Object*> false_dirty_objects;
556 // Remote pointers to dirty objects
557 std::vector<mirror::Object*> dirty_objects;
558
559 void AddCleanObject() REQUIRES_SHARED(Locks::mutator_lock_) {
560 ++clean_object_count;
561 }
562
563 void AddDirtyObject(mirror::Object* object, mirror::Object* object_remote)
564 REQUIRES_SHARED(Locks::mutator_lock_) {
565 ++dirty_object_count;
566 dirty_object_byte_count += CountDirtyBytes(object, object_remote);
567 dirty_object_size_in_bytes += EntrySize(object);
568 dirty_objects.push_back(object_remote);
569 }
570
571 void AddFalseDirtyObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
572 ++false_dirty_object_count;
573 false_dirty_objects.push_back(object);
574 false_dirty_byte_count += EntrySize(object);
575 }
576
577 private:
578 // Go byte-by-byte and figure out what exactly got dirtied
579 static size_t CountDirtyBytes(mirror::Object* object1, mirror::Object* object2)
580 REQUIRES_SHARED(Locks::mutator_lock_) {
581 const uint8_t* cur1 = reinterpret_cast<const uint8_t*>(object1);
582 const uint8_t* cur2 = reinterpret_cast<const uint8_t*>(object2);
583 size_t dirty_bytes = 0;
584 size_t object_size = EntrySize(object1);
585 for (size_t i = 0; i < object_size; ++i) {
586 if (cur1[i] != cur2[i]) {
587 dirty_bytes++;
588 }
589 }
590 return dirty_bytes;
591 }
592 };
593
594 std::ostream& os_;
595 std::map<mirror::Class*, ClassData> class_data_;
596
597 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
598};
599
600// Region analysis for ArtMethods.
601// TODO: most of these need work.
602template<>
603class RegionSpecializedBase<ArtMethod> : RegionCommon<ArtMethod> {
604 public:
605 RegionSpecializedBase(std::ostream* os,
606 std::vector<uint8_t>* remote_contents,
607 std::vector<uint8_t>* zygote_contents,
608 const backtrace_map_t& boot_map,
609 const ImageHeader& image_header) :
610 RegionCommon<ArtMethod>(os, remote_contents, zygote_contents, boot_map, image_header),
611 os_(*os) { }
612
613 void CheckEntrySanity(const uint8_t* current ATTRIBUTE_UNUSED) const
614 REQUIRES_SHARED(Locks::mutator_lock_) {
615 }
616
617 ArtMethod* GetNextEntry(ArtMethod* entry)
618 REQUIRES_SHARED(Locks::mutator_lock_) {
619 uint8_t* next = reinterpret_cast<uint8_t*>(entry) + RoundUp(EntrySize(entry), kObjectAlignment);
620 return reinterpret_cast<ArtMethod*>(next);
621 }
622
623 void VisitEntry(ArtMethod* method ATTRIBUTE_UNUSED)
624 REQUIRES_SHARED(Locks::mutator_lock_) {
625 }
626
627 void AddFalseDirtyEntry(ArtMethod* method)
628 REQUIRES_SHARED(Locks::mutator_lock_) {
629 RegionCommon<ArtMethod>::AddFalseDirtyEntry(method);
630 }
631
632 void AddCleanEntry(ArtMethod* method ATTRIBUTE_UNUSED) {
633 }
634
635 void AddDirtyEntry(ArtMethod* method, ArtMethod* method_remote)
636 REQUIRES_SHARED(Locks::mutator_lock_) {
637 size_t entry_size = EntrySize(method);
638 ++different_entries_;
639 dirty_entry_bytes_ += entry_size;
640 // Increment counts for the fields that are dirty
641 const uint8_t* current = reinterpret_cast<const uint8_t*>(method);
642 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(method_remote);
643 // ArtMethods always log their dirty count and entries.
644 for (size_t i = 0; i < entry_size; ++i) {
645 if (current[i] != current_remote[i]) {
646 field_dirty_count_[i]++;
647 }
648 }
649 dirty_entries_.push_back(method);
650 }
651
652 void DiffEntryContents(ArtMethod* method ATTRIBUTE_UNUSED,
Mathieu Chartier51e79652017-07-24 15:43:38 -0700653 uint8_t* remote_bytes ATTRIBUTE_UNUSED,
654 const uint8_t* base_ptr ATTRIBUTE_UNUSED)
David Sehrb4005f02017-06-20 19:11:40 -0700655 REQUIRES_SHARED(Locks::mutator_lock_) {
656 }
657
658 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
659 DumpSamplesAndOffsetCount();
660 os_ << " field contents:\n";
661 for (ArtMethod* method : dirty_entries_) {
662 // remote method
663 auto art_method = reinterpret_cast<ArtMethod*>(method);
664 // remote class
665 mirror::Class* remote_declaring_class =
666 FixUpRemotePointer(art_method->GetDeclaringClass(),
667 *RegionCommon<ArtMethod>::remote_contents_,
668 RegionCommon<ArtMethod>::boot_map_);
669 // local class
670 mirror::Class* declaring_class =
671 RemoteContentsPointerToLocal(remote_declaring_class,
672 *RegionCommon<ArtMethod>::remote_contents_,
673 RegionCommon<ArtMethod>::image_header_);
674 DumpOneArtMethod(art_method, declaring_class, remote_declaring_class);
675 }
676 }
677
678 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
679 os_ << " field contents:\n";
680 for (ArtMethod* method : false_dirty_entries_) {
681 // local class
682 mirror::Class* declaring_class = method->GetDeclaringClass();
683 DumpOneArtMethod(method, declaring_class, nullptr);
684 }
685 }
686
687 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
688 }
689
690 private:
691 std::ostream& os_;
692
693 void DumpOneArtMethod(ArtMethod* art_method,
694 mirror::Class* declaring_class,
695 mirror::Class* remote_declaring_class)
696 REQUIRES_SHARED(Locks::mutator_lock_) {
697 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
698 os_ << " " << reinterpret_cast<const void*>(art_method) << " ";
699 os_ << " entryPointFromJni: "
700 << reinterpret_cast<const void*>(art_method->GetDataPtrSize(pointer_size)) << ", ";
701 os_ << " entryPointFromQuickCompiledCode: "
702 << reinterpret_cast<const void*>(
703 art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size))
704 << ", ";
705 os_ << " isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
706 os_ << " class_status (local): " << declaring_class->GetStatus();
707 if (remote_declaring_class != nullptr) {
708 os_ << ", class_status (remote): " << remote_declaring_class->GetStatus();
709 }
710 os_ << "\n";
711 }
712
713 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
714};
715
716template <typename T>
717class RegionData : public RegionSpecializedBase<T> {
718 public:
719 RegionData(std::ostream* os,
720 std::vector<uint8_t>* remote_contents,
721 std::vector<uint8_t>* zygote_contents,
722 const backtrace_map_t& boot_map,
723 const ImageHeader& image_header) :
724 RegionSpecializedBase<T>(os, remote_contents, zygote_contents, boot_map, image_header),
725 os_(*os) {
726 CHECK(remote_contents != nullptr);
727 CHECK(zygote_contents != nullptr);
728 }
729
730 // Walk over the type T entries in theregion between begin_image_ptr and end_image_ptr,
731 // collecting and reporting data regarding dirty, difference, etc.
732 void ProcessRegion(const MappingData& mapping_data,
733 RemoteProcesses remotes,
734 const uint8_t* begin_image_ptr,
735 const uint8_t* end_image_ptr)
736 REQUIRES_SHARED(Locks::mutator_lock_) {
737 const uint8_t* current = begin_image_ptr + RoundUp(sizeof(ImageHeader), kObjectAlignment);
738 T* entry = reinterpret_cast<T*>(const_cast<uint8_t*>(current));
739 while (reinterpret_cast<uintptr_t>(entry) < reinterpret_cast<uintptr_t>(end_image_ptr)) {
740 ComputeEntryDirty(entry, begin_image_ptr, mapping_data.dirty_page_set);
741
742 entry = RegionSpecializedBase<T>::GetNextEntry(entry);
743 }
744
745 // Looking at only dirty pages, figure out how many of those bytes belong to dirty entries.
746 // TODO: fix this now that there are multiple regions in a mapping.
747 float true_dirtied_percent =
748 RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * kPageSize);
749
750 // Entry specific statistics.
751 os_ << RegionCommon<T>::GetDifferentEntryCount() << " different entries, \n "
752 << RegionCommon<T>::GetDirtyEntryBytes() << " different entry [bytes], \n "
753 << RegionCommon<T>::GetFalseDirtyEntryCount() << " false dirty entries,\n "
754 << RegionCommon<T>::GetFalseDirtyEntryBytes() << " false dirty entry [bytes], \n "
755 << true_dirtied_percent << " different entries-vs-total in a dirty page;\n "
Mathieu Chartier51e79652017-07-24 15:43:38 -0700756 << "\n";
David Sehrb4005f02017-06-20 19:11:40 -0700757
Mathieu Chartier51e79652017-07-24 15:43:38 -0700758 const uint8_t* base_ptr = begin_image_ptr;
David Sehrb4005f02017-06-20 19:11:40 -0700759 switch (remotes) {
760 case RemoteProcesses::kZygoteOnly:
761 os_ << " Zygote shared dirty entries: ";
762 break;
763 case RemoteProcesses::kImageAndZygote:
764 os_ << " Application dirty entries (private dirty): ";
Mathieu Chartier51e79652017-07-24 15:43:38 -0700765 // If we are dumping private dirty, diff against the zygote map to make it clearer what
766 // fields caused the page to be private dirty.
767 base_ptr = &RegionCommon<T>::zygote_contents_->operator[](0);
David Sehrb4005f02017-06-20 19:11:40 -0700768 break;
769 case RemoteProcesses::kImageOnly:
770 os_ << " Application dirty entries (unknown whether private or shared dirty): ";
771 break;
772 }
Mathieu Chartier51e79652017-07-24 15:43:38 -0700773 DiffDirtyEntries(ProcessType::kRemote,
774 begin_image_ptr,
775 RegionCommon<T>::remote_contents_,
776 base_ptr);
777 // Print shared dirty after since it's less important.
778 if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
779 // We only reach this point if both pids were specified. Furthermore,
780 // entries are only displayed here if they differed in both the image
781 // and the zygote, so they are probably private dirty.
782 CHECK(remotes == RemoteProcesses::kImageAndZygote);
783 os_ << "\n" << " Zygote dirty entries (probably shared dirty): ";
784 DiffDirtyEntries(ProcessType::kZygote,
785 begin_image_ptr,
786 RegionCommon<T>::zygote_contents_,
787 begin_image_ptr);
788 }
David Sehrb4005f02017-06-20 19:11:40 -0700789 RegionSpecializedBase<T>::DumpDirtyEntries();
790 RegionSpecializedBase<T>::DumpFalseDirtyEntries();
791 RegionSpecializedBase<T>::DumpCleanEntries();
792 }
793
794 private:
795 std::ostream& os_;
796
797 void DiffDirtyEntries(ProcessType process_type,
798 const uint8_t* begin_image_ptr,
Mathieu Chartier51e79652017-07-24 15:43:38 -0700799 std::vector<uint8_t>* contents,
800 const uint8_t* base_ptr)
David Sehrb4005f02017-06-20 19:11:40 -0700801 REQUIRES_SHARED(Locks::mutator_lock_) {
802 os_ << RegionCommon<T>::dirty_entries_.size() << "\n";
803 const std::set<T*>& entries =
804 (process_type == ProcessType::kZygote) ?
805 RegionCommon<T>::zygote_dirty_entries_:
806 RegionCommon<T>::image_dirty_entries_;
807 for (T* entry : entries) {
808 uint8_t* entry_bytes = reinterpret_cast<uint8_t*>(entry);
809 ptrdiff_t offset = entry_bytes - begin_image_ptr;
810 uint8_t* remote_bytes = &(*contents)[offset];
Mathieu Chartier51e79652017-07-24 15:43:38 -0700811 RegionSpecializedBase<T>::DiffEntryContents(entry, remote_bytes, &base_ptr[offset]);
David Sehrb4005f02017-06-20 19:11:40 -0700812 }
813 }
814
815 void ComputeEntryDirty(T* entry,
816 const uint8_t* begin_image_ptr,
817 const std::set<size_t>& dirty_pages)
818 REQUIRES_SHARED(Locks::mutator_lock_) {
819 // Set up pointers in the remote and the zygote for comparison.
820 uint8_t* current = reinterpret_cast<uint8_t*>(entry);
821 ptrdiff_t offset = current - begin_image_ptr;
822 T* entry_remote =
823 reinterpret_cast<T*>(const_cast<uint8_t*>(&(*RegionCommon<T>::remote_contents_)[offset]));
Mathieu Chartier51e79652017-07-24 15:43:38 -0700824 const bool have_zygote = !RegionCommon<T>::zygote_contents_->empty();
David Sehrb4005f02017-06-20 19:11:40 -0700825 const uint8_t* current_zygote =
Mathieu Chartier51e79652017-07-24 15:43:38 -0700826 have_zygote ? &(*RegionCommon<T>::zygote_contents_)[offset] : nullptr;
David Sehrb4005f02017-06-20 19:11:40 -0700827 T* entry_zygote = reinterpret_cast<T*>(const_cast<uint8_t*>(current_zygote));
828 // Visit and classify entries at the current location.
829 RegionSpecializedBase<T>::VisitEntry(entry);
Mathieu Chartier51e79652017-07-24 15:43:38 -0700830
831 // Test private dirty first.
832 bool is_dirty = false;
833 if (have_zygote) {
834 bool private_dirty = EntriesDiffer(entry_zygote, entry_remote);
835 if (private_dirty) {
836 // Private dirty, app vs zygote.
837 is_dirty = true;
David Sehrb4005f02017-06-20 19:11:40 -0700838 RegionCommon<T>::AddImageDirtyEntry(entry);
David Sehrb4005f02017-06-20 19:11:40 -0700839 }
Mathieu Chartier51e79652017-07-24 15:43:38 -0700840 if (EntriesDiffer(entry_zygote, entry)) {
841 // Shared dirty, zygote vs image.
842 is_dirty = true;
843 RegionCommon<T>::AddZygoteDirtyEntry(entry);
844 }
845 } else if (EntriesDiffer(entry_remote, entry)) {
846 // Shared or private dirty, app vs image.
847 is_dirty = true;
848 RegionCommon<T>::AddImageDirtyEntry(entry);
849 }
850 if (is_dirty) {
851 // TODO: Add support dirty entries in zygote and image.
852 RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
David Sehrb4005f02017-06-20 19:11:40 -0700853 } else {
854 RegionSpecializedBase<T>::AddCleanEntry(entry);
Mathieu Chartier51e79652017-07-24 15:43:38 -0700855 if (RegionCommon<T>::IsEntryOnDirtyPage(entry, dirty_pages)) {
856 // This entry was either never mutated or got mutated back to the same value.
857 // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
858 RegionSpecializedBase<T>::AddFalseDirtyEntry(entry);
859 }
David Sehrb4005f02017-06-20 19:11:40 -0700860 }
861 }
862
863 DISALLOW_COPY_AND_ASSIGN(RegionData);
864};
865
866} // namespace
867
868
Igor Murashkin37743352014-11-13 14:38:00 -0800869class ImgDiagDumper {
870 public:
871 explicit ImgDiagDumper(std::ostream* os,
Mathieu Chartiercb044bc2016-04-01 13:56:41 -0700872 const ImageHeader& image_header,
873 const std::string& image_location,
Mathieu Chartierc5196cd2016-04-08 14:08:37 -0700874 pid_t image_diff_pid,
875 pid_t zygote_diff_pid)
Igor Murashkin37743352014-11-13 14:38:00 -0800876 : os_(os),
877 image_header_(image_header),
878 image_location_(image_location),
Mathieu Chartierc5196cd2016-04-08 14:08:37 -0700879 image_diff_pid_(image_diff_pid),
David Sehr20e271a2017-06-14 13:02:14 -0700880 zygote_diff_pid_(zygote_diff_pid),
881 zygote_pid_only_(false) {}
Igor Murashkin37743352014-11-13 14:38:00 -0800882
David Sehr50005a02017-06-21 13:24:21 -0700883 bool Init() {
Igor Murashkin37743352014-11-13 14:38:00 -0800884 std::ostream& os = *os_;
Mathieu Chartiercb044bc2016-04-01 13:56:41 -0700885
David Sehr50005a02017-06-21 13:24:21 -0700886 if (image_diff_pid_ < 0 && zygote_diff_pid_ < 0) {
887 os << "Either --image-diff-pid or --zygote-diff-pid (or both) must be specified.\n";
888 return false;
Igor Murashkin37743352014-11-13 14:38:00 -0800889 }
890
David Sehr50005a02017-06-21 13:24:21 -0700891 // To avoid the combinations of command-line argument use cases:
892 // If the user invoked with only --zygote-diff-pid, shuffle that to
893 // image_diff_pid_, invalidate zygote_diff_pid_, and remember that
894 // image_diff_pid_ is now special.
895 if (image_diff_pid_ < 0) {
896 image_diff_pid_ = zygote_diff_pid_;
897 zygote_diff_pid_ = -1;
898 zygote_pid_only_ = true;
David Sehr45de57f2017-06-21 05:03:22 +0000899 }
Igor Murashkin37743352014-11-13 14:38:00 -0800900
David Sehr45de57f2017-06-21 05:03:22 +0000901 {
902 struct stat sts;
903 std::string proc_pid_str =
904 StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
905 if (stat(proc_pid_str.c_str(), &sts) == -1) {
906 os << "Process does not exist";
907 return false;
Igor Murashkin37743352014-11-13 14:38:00 -0800908 }
909 }
910
David Sehr45de57f2017-06-21 05:03:22 +0000911 // Open /proc/$pid/maps to view memory maps
David Sehr50005a02017-06-21 13:24:21 -0700912 auto tmp_proc_maps = std::unique_ptr<BacktraceMap>(BacktraceMap::Create(image_diff_pid_));
913 if (tmp_proc_maps == nullptr) {
David Sehr45de57f2017-06-21 05:03:22 +0000914 os << "Could not read backtrace maps";
915 return false;
916 }
Igor Murashkin37743352014-11-13 14:38:00 -0800917
David Sehr45de57f2017-06-21 05:03:22 +0000918 bool found_boot_map = false;
David Sehr45de57f2017-06-21 05:03:22 +0000919 // Find the memory map only for boot.art
David Sehr50005a02017-06-21 13:24:21 -0700920 for (const backtrace_map_t& map : *tmp_proc_maps) {
David Sehr45de57f2017-06-21 05:03:22 +0000921 if (EndsWith(map.name, GetImageLocationBaseName())) {
922 if ((map.flags & PROT_WRITE) != 0) {
David Sehr50005a02017-06-21 13:24:21 -0700923 boot_map_ = map;
David Sehr45de57f2017-06-21 05:03:22 +0000924 found_boot_map = true;
925 break;
David Sehr0627be32017-06-16 13:50:02 -0700926 }
David Sehr45de57f2017-06-21 05:03:22 +0000927 // In actuality there's more than 1 map, but the second one is read-only.
928 // The one we care about is the write-able map.
929 // The readonly maps are guaranteed to be identical, so its not interesting to compare
930 // them.
David Sehr0627be32017-06-16 13:50:02 -0700931 }
932 }
David Sehr0627be32017-06-16 13:50:02 -0700933
David Sehr45de57f2017-06-21 05:03:22 +0000934 if (!found_boot_map) {
935 os << "Could not find map for " << GetImageLocationBaseName();
936 return false;
937 }
David Sehr50005a02017-06-21 13:24:21 -0700938 // Sanity check boot_map_.
939 CHECK(boot_map_.end >= boot_map_.start);
940 boot_map_size_ = boot_map_.end - boot_map_.start;
David Sehr0627be32017-06-16 13:50:02 -0700941
David Sehr50005a02017-06-21 13:24:21 -0700942 // Open /proc/<image_diff_pid_>/mem and read as remote_contents_.
943 std::string image_file_name =
944 StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
945 auto image_map_file = std::unique_ptr<File>(OS::OpenFileForReading(image_file_name.c_str()));
946 if (image_map_file == nullptr) {
947 os << "Failed to open " << image_file_name << " for reading";
948 return false;
949 }
950 std::vector<uint8_t> tmp_remote_contents(boot_map_size_);
951 if (!image_map_file->PreadFully(&tmp_remote_contents[0], boot_map_size_, boot_map_.start)) {
952 os << "Could not fully read file " << image_file_name;
953 return false;
954 }
955
956 // If zygote_diff_pid_ != -1, open /proc/<zygote_diff_pid_>/mem and read as zygote_contents_.
957 std::vector<uint8_t> tmp_zygote_contents;
958 if (zygote_diff_pid_ != -1) {
959 std::string zygote_file_name =
960 StringPrintf("/proc/%ld/mem", static_cast<long>(zygote_diff_pid_)); // NOLINT [runtime/int]
961 std::unique_ptr<File> zygote_map_file(OS::OpenFileForReading(zygote_file_name.c_str()));
962 if (zygote_map_file == nullptr) {
963 os << "Failed to open " << zygote_file_name << " for reading";
964 return false;
965 }
966 // The boot map should be at the same address.
Mathieu Chartier51e79652017-07-24 15:43:38 -0700967 tmp_zygote_contents.resize(boot_map_size_);
David Sehr50005a02017-06-21 13:24:21 -0700968 if (!zygote_map_file->PreadFully(&tmp_zygote_contents[0], boot_map_size_, boot_map_.start)) {
969 LOG(WARNING) << "Could not fully read zygote file " << zygote_file_name;
970 return false;
971 }
972 }
973
974 // Open /proc/<image_diff_pid_>/pagemap.
975 std::string pagemap_file_name = StringPrintf(
976 "/proc/%ld/pagemap", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
977 auto tmp_pagemap_file =
978 std::unique_ptr<File>(OS::OpenFileForReading(pagemap_file_name.c_str()));
979 if (tmp_pagemap_file == nullptr) {
980 os << "Failed to open " << pagemap_file_name << " for reading: " << strerror(errno);
981 return false;
982 }
983
984 // Not truly clean, mmap-ing boot.art again would be more pristine, but close enough
985 const char* clean_pagemap_file_name = "/proc/self/pagemap";
986 auto tmp_clean_pagemap_file = std::unique_ptr<File>(
987 OS::OpenFileForReading(clean_pagemap_file_name));
988 if (tmp_clean_pagemap_file == nullptr) {
989 os << "Failed to open " << clean_pagemap_file_name << " for reading: " << strerror(errno);
990 return false;
991 }
992
993 auto tmp_kpageflags_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpageflags"));
994 if (tmp_kpageflags_file == nullptr) {
995 os << "Failed to open /proc/kpageflags for reading: " << strerror(errno);
996 return false;
997 }
998
999 auto tmp_kpagecount_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpagecount"));
1000 if (tmp_kpagecount_file == nullptr) {
1001 os << "Failed to open /proc/kpagecount for reading:" << strerror(errno);
1002 return false;
1003 }
1004
David Sehrb4005f02017-06-20 19:11:40 -07001005 // Commit the mappings, etc.
David Sehr50005a02017-06-21 13:24:21 -07001006 proc_maps_ = std::move(tmp_proc_maps);
1007 remote_contents_ = std::move(tmp_remote_contents);
1008 zygote_contents_ = std::move(tmp_zygote_contents);
1009 pagemap_file_ = std::move(*tmp_pagemap_file.release());
1010 clean_pagemap_file_ = std::move(*tmp_clean_pagemap_file.release());
1011 kpageflags_file_ = std::move(*tmp_kpageflags_file.release());
1012 kpagecount_file_ = std::move(*tmp_kpagecount_file.release());
1013
1014 return true;
1015 }
1016
1017 bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
1018 std::ostream& os = *os_;
1019 os << "IMAGE LOCATION: " << image_location_ << "\n\n";
1020
1021 os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
1022
1023 os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
1024
1025 PrintPidLine("IMAGE", image_diff_pid_);
1026 os << "\n\n";
1027 PrintPidLine("ZYGOTE", zygote_diff_pid_);
1028 bool ret = true;
1029 if (image_diff_pid_ >= 0 || zygote_diff_pid_ >= 0) {
1030 ret = DumpImageDiff();
1031 os << "\n\n";
1032 }
1033
1034 os << std::flush;
1035
1036 return ret;
1037 }
1038
1039 private:
1040 bool DumpImageDiff()
1041 REQUIRES_SHARED(Locks::mutator_lock_) {
1042 return DumpImageDiffMap();
1043 }
1044
David Sehrb4005f02017-06-20 19:11:40 -07001045 bool ComputeDirtyBytes(const uint8_t* image_begin, MappingData* mapping_data /*out*/) {
David Sehr50005a02017-06-21 13:24:21 -07001046 std::ostream& os = *os_;
1047
1048 size_t virtual_page_idx = 0; // Virtual page number (for an absolute memory address)
1049 size_t page_idx = 0; // Page index relative to 0
1050 size_t previous_page_idx = 0; // Previous page index relative to 0
1051
1052
1053 // Iterate through one page at a time. Boot map begin/end already implicitly aligned.
1054 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; begin += kPageSize) {
1055 ptrdiff_t offset = begin - boot_map_.start;
1056
1057 // We treat the image header as part of the memory map for now
1058 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1059 // But it might still be interesting to see if any of the ImageHeader data mutated
1060 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1061 uint8_t* remote_ptr = &remote_contents_[offset];
1062
1063 if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) {
David Sehrb4005f02017-06-20 19:11:40 -07001064 mapping_data->different_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001065
1066 // Count the number of 32-bit integers that are different.
1067 for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) {
1068 uint32_t* remote_ptr_int32 = reinterpret_cast<uint32_t*>(remote_ptr);
1069 const uint32_t* local_ptr_int32 = reinterpret_cast<const uint32_t*>(local_ptr);
1070
1071 if (remote_ptr_int32[i] != local_ptr_int32[i]) {
David Sehrb4005f02017-06-20 19:11:40 -07001072 mapping_data->different_int32s++;
David Sehr50005a02017-06-21 13:24:21 -07001073 }
1074 }
1075 }
1076 }
1077
Mathieu Chartier728f8502017-07-28 17:35:30 -07001078 std::vector<size_t> private_dirty_pages_for_section(ImageHeader::kSectionCount, 0u);
1079
David Sehr50005a02017-06-21 13:24:21 -07001080 // Iterate through one byte at a time.
1081 ptrdiff_t page_off_begin = image_header_.GetImageBegin() - image_begin;
1082 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; ++begin) {
1083 previous_page_idx = page_idx;
1084 ptrdiff_t offset = begin - boot_map_.start;
1085
1086 // We treat the image header as part of the memory map for now
1087 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1088 // But it might still be interesting to see if any of the ImageHeader data mutated
1089 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1090 uint8_t* remote_ptr = &remote_contents_[offset];
1091
1092 virtual_page_idx = reinterpret_cast<uintptr_t>(local_ptr) / kPageSize;
1093
1094 // Calculate the page index, relative to the 0th page where the image begins
1095 page_idx = (offset + page_off_begin) / kPageSize;
1096 if (*local_ptr != *remote_ptr) {
1097 // Track number of bytes that are different
David Sehrb4005f02017-06-20 19:11:40 -07001098 mapping_data->different_bytes++;
David Sehr50005a02017-06-21 13:24:21 -07001099 }
1100
1101 // Independently count the # of dirty pages on the remote side
1102 size_t remote_virtual_page_idx = begin / kPageSize;
1103 if (previous_page_idx != page_idx) {
1104 uint64_t page_count = 0xC0FFEE;
1105 // TODO: virtual_page_idx needs to be from the same process
1106 std::string error_msg;
1107 int dirtiness = (IsPageDirty(&pagemap_file_, // Image-diff-pid procmap
1108 &clean_pagemap_file_, // Self procmap
1109 &kpageflags_file_,
1110 &kpagecount_file_,
1111 remote_virtual_page_idx, // potentially "dirty" page
1112 virtual_page_idx, // true "clean" page
1113 &page_count,
1114 &error_msg));
1115 if (dirtiness < 0) {
1116 os << error_msg;
1117 return false;
1118 } else if (dirtiness > 0) {
David Sehrb4005f02017-06-20 19:11:40 -07001119 mapping_data->dirty_pages++;
1120 mapping_data->dirty_page_set.insert(mapping_data->dirty_page_set.end(), virtual_page_idx);
David Sehr50005a02017-06-21 13:24:21 -07001121 }
1122
1123 bool is_dirty = dirtiness > 0;
1124 bool is_private = page_count == 1;
1125
1126 if (page_count == 1) {
David Sehrb4005f02017-06-20 19:11:40 -07001127 mapping_data->private_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001128 }
1129
1130 if (is_dirty && is_private) {
David Sehrb4005f02017-06-20 19:11:40 -07001131 mapping_data->private_dirty_pages++;
Mathieu Chartier728f8502017-07-28 17:35:30 -07001132 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
1133 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i);
1134 if (image_header_.GetImageSection(section).Contains(offset)) {
1135 ++private_dirty_pages_for_section[i];
1136 }
1137 }
David Sehr50005a02017-06-21 13:24:21 -07001138 }
1139 }
1140 }
David Sehrb4005f02017-06-20 19:11:40 -07001141 mapping_data->false_dirty_pages = mapping_data->dirty_pages - mapping_data->different_pages;
1142 // Print low-level (bytes, int32s, pages) statistics.
1143 os << mapping_data->different_bytes << " differing bytes,\n "
1144 << mapping_data->different_int32s << " differing int32s,\n "
1145 << mapping_data->different_pages << " differing pages,\n "
1146 << mapping_data->dirty_pages << " pages are dirty;\n "
1147 << mapping_data->false_dirty_pages << " pages are false dirty;\n "
1148 << mapping_data->private_pages << " pages are private;\n "
Mathieu Chartier728f8502017-07-28 17:35:30 -07001149 << mapping_data->private_dirty_pages << " pages are Private_Dirty\n "
1150 << "\n";
1151
1152 size_t total_private_dirty_pages = std::accumulate(private_dirty_pages_for_section.begin(),
1153 private_dirty_pages_for_section.end(),
1154 0u);
1155 os << "Image sections (total private dirty pages " << total_private_dirty_pages << ")\n";
1156 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
1157 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i);
1158 os << section << " " << image_header_.GetImageSection(section)
1159 << " private dirty pages=" << private_dirty_pages_for_section[i] << "\n";
1160 }
1161 os << "\n";
David Sehrb4005f02017-06-20 19:11:40 -07001162
David Sehr50005a02017-06-21 13:24:21 -07001163 return true;
1164 }
1165
David Sehr50005a02017-06-21 13:24:21 -07001166 // Look at /proc/$pid/mem and only diff the things from there
1167 bool DumpImageDiffMap()
David Sehrb4005f02017-06-20 19:11:40 -07001168 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehr50005a02017-06-21 13:24:21 -07001169 std::ostream& os = *os_;
Igor Murashkin37743352014-11-13 14:38:00 -08001170 std::string error_msg;
1171
1172 // Walk the bytes and diff against our boot image
Igor Murashkin37743352014-11-13 14:38:00 -08001173 os << "\nObserving boot image header at address "
David Sehr50005a02017-06-21 13:24:21 -07001174 << reinterpret_cast<const void*>(&image_header_)
Igor Murashkin37743352014-11-13 14:38:00 -08001175 << "\n\n";
1176
David Sehr50005a02017-06-21 13:24:21 -07001177 const uint8_t* image_begin_unaligned = image_header_.GetImageBegin();
Mathieu Chartierc7853442015-03-27 14:35:38 -07001178 const uint8_t* image_mirror_end_unaligned = image_begin_unaligned +
David Sehr50005a02017-06-21 13:24:21 -07001179 image_header_.GetImageSection(ImageHeader::kSectionObjects).Size();
1180 const uint8_t* image_end_unaligned = image_begin_unaligned + image_header_.GetImageSize();
Igor Murashkin37743352014-11-13 14:38:00 -08001181
1182 // Adjust range to nearest page
1183 const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize);
1184 const uint8_t* image_end = AlignUp(image_end_unaligned, kPageSize);
1185
David Sehr50005a02017-06-21 13:24:21 -07001186 if (reinterpret_cast<uintptr_t>(image_begin) > boot_map_.start ||
1187 reinterpret_cast<uintptr_t>(image_end) < boot_map_.end) {
Igor Murashkin37743352014-11-13 14:38:00 -08001188 // Sanity check that we aren't trying to read a completely different boot image
1189 os << "Remote boot map is out of range of local boot map: " <<
1190 "local begin " << reinterpret_cast<const void*>(image_begin) <<
1191 ", local end " << reinterpret_cast<const void*>(image_end) <<
David Sehr50005a02017-06-21 13:24:21 -07001192 ", remote begin " << reinterpret_cast<const void*>(boot_map_.start) <<
1193 ", remote end " << reinterpret_cast<const void*>(boot_map_.end);
Igor Murashkin37743352014-11-13 14:38:00 -08001194 return false;
1195 // If we wanted even more validation we could map the ImageHeader from the file
1196 }
1197
David Sehrb4005f02017-06-20 19:11:40 -07001198 MappingData mapping_data;
David Sehr45de57f2017-06-21 05:03:22 +00001199
David Sehrb4005f02017-06-20 19:11:40 -07001200 os << "Mapping at [" << reinterpret_cast<void*>(boot_map_.start) << ", "
1201 << reinterpret_cast<void*>(boot_map_.end) << ") had:\n ";
1202 if (!ComputeDirtyBytes(image_begin, &mapping_data)) {
David Sehr50005a02017-06-21 13:24:21 -07001203 return false;
Igor Murashkin37743352014-11-13 14:38:00 -08001204 }
1205
David Sehrb4005f02017-06-20 19:11:40 -07001206 RegionData<mirror::Object> object_region_data(os_,
1207 &remote_contents_,
1208 &zygote_contents_,
1209 boot_map_,
1210 image_header_);
Andreas Gampe7ad71d02016-04-04 13:49:18 -07001211
David Sehrb4005f02017-06-20 19:11:40 -07001212 RemoteProcesses remotes;
David Sehr20e271a2017-06-14 13:02:14 -07001213 if (zygote_pid_only_) {
David Sehrb4005f02017-06-20 19:11:40 -07001214 remotes = RemoteProcesses::kZygoteOnly;
1215 } else if (zygote_diff_pid_ > 0) {
1216 remotes = RemoteProcesses::kImageAndZygote;
David Sehr20e271a2017-06-14 13:02:14 -07001217 } else {
David Sehrb4005f02017-06-20 19:11:40 -07001218 remotes = RemoteProcesses::kImageOnly;
Mathieu Chartiercb044bc2016-04-01 13:56:41 -07001219 }
1220
David Sehrb4005f02017-06-20 19:11:40 -07001221 object_region_data.ProcessRegion(mapping_data,
1222 remotes,
1223 image_begin_unaligned,
1224 image_mirror_end_unaligned);
Igor Murashkin37743352014-11-13 14:38:00 -08001225
1226 return true;
1227 }
1228
Igor Murashkin37743352014-11-13 14:38:00 -08001229 static bool GetPageFrameNumber(File* page_map_file,
1230 size_t virtual_page_index,
1231 uint64_t* page_frame_number,
1232 std::string* error_msg) {
1233 CHECK(page_map_file != nullptr);
1234 CHECK(page_frame_number != nullptr);
1235 CHECK(error_msg != nullptr);
1236
1237 constexpr size_t kPageMapEntrySize = sizeof(uint64_t);
1238 constexpr uint64_t kPageFrameNumberMask = (1ULL << 55) - 1; // bits 0-54 [in /proc/$pid/pagemap]
1239 constexpr uint64_t kPageSoftDirtyMask = (1ULL << 55); // bit 55 [in /proc/$pid/pagemap]
1240
1241 uint64_t page_map_entry = 0;
1242
1243 // Read 64-bit entry from /proc/$pid/pagemap to get the physical page frame number
1244 if (!page_map_file->PreadFully(&page_map_entry, kPageMapEntrySize,
1245 virtual_page_index * kPageMapEntrySize)) {
1246 *error_msg = StringPrintf("Failed to read the virtual page index entry from %s",
1247 page_map_file->GetPath().c_str());
1248 return false;
1249 }
1250
1251 // TODO: seems useless, remove this.
1252 bool soft_dirty = (page_map_entry & kPageSoftDirtyMask) != 0;
1253 if ((false)) {
1254 LOG(VERBOSE) << soft_dirty; // Suppress unused warning
1255 UNREACHABLE();
1256 }
1257
1258 *page_frame_number = page_map_entry & kPageFrameNumberMask;
1259
1260 return true;
1261 }
1262
1263 static int IsPageDirty(File* page_map_file,
David Sehr50005a02017-06-21 13:24:21 -07001264 File* clean_pagemap_file,
1265 File* kpageflags_file,
1266 File* kpagecount_file,
Igor Murashkin37743352014-11-13 14:38:00 -08001267 size_t virtual_page_idx,
1268 size_t clean_virtual_page_idx,
1269 // Out parameters:
1270 uint64_t* page_count, std::string* error_msg) {
1271 CHECK(page_map_file != nullptr);
David Sehr50005a02017-06-21 13:24:21 -07001272 CHECK(clean_pagemap_file != nullptr);
1273 CHECK_NE(page_map_file, clean_pagemap_file);
1274 CHECK(kpageflags_file != nullptr);
1275 CHECK(kpagecount_file != nullptr);
Igor Murashkin37743352014-11-13 14:38:00 -08001276 CHECK(page_count != nullptr);
1277 CHECK(error_msg != nullptr);
1278
1279 // Constants are from https://www.kernel.org/doc/Documentation/vm/pagemap.txt
1280
1281 constexpr size_t kPageFlagsEntrySize = sizeof(uint64_t);
1282 constexpr size_t kPageCountEntrySize = sizeof(uint64_t);
1283 constexpr uint64_t kPageFlagsDirtyMask = (1ULL << 4); // in /proc/kpageflags
1284 constexpr uint64_t kPageFlagsNoPageMask = (1ULL << 20); // in /proc/kpageflags
1285 constexpr uint64_t kPageFlagsMmapMask = (1ULL << 11); // in /proc/kpageflags
1286
1287 uint64_t page_frame_number = 0;
1288 if (!GetPageFrameNumber(page_map_file, virtual_page_idx, &page_frame_number, error_msg)) {
1289 return -1;
1290 }
1291
1292 uint64_t page_frame_number_clean = 0;
David Sehr50005a02017-06-21 13:24:21 -07001293 if (!GetPageFrameNumber(clean_pagemap_file, clean_virtual_page_idx, &page_frame_number_clean,
Igor Murashkin37743352014-11-13 14:38:00 -08001294 error_msg)) {
1295 return -1;
1296 }
1297
1298 // Read 64-bit entry from /proc/kpageflags to get the dirty bit for a page
1299 uint64_t kpage_flags_entry = 0;
David Sehr50005a02017-06-21 13:24:21 -07001300 if (!kpageflags_file->PreadFully(&kpage_flags_entry,
Igor Murashkin37743352014-11-13 14:38:00 -08001301 kPageFlagsEntrySize,
1302 page_frame_number * kPageFlagsEntrySize)) {
1303 *error_msg = StringPrintf("Failed to read the page flags from %s",
David Sehr50005a02017-06-21 13:24:21 -07001304 kpageflags_file->GetPath().c_str());
Igor Murashkin37743352014-11-13 14:38:00 -08001305 return -1;
1306 }
1307
1308 // Read 64-bit entyry from /proc/kpagecount to get mapping counts for a page
David Sehr50005a02017-06-21 13:24:21 -07001309 if (!kpagecount_file->PreadFully(page_count /*out*/,
Igor Murashkin37743352014-11-13 14:38:00 -08001310 kPageCountEntrySize,
1311 page_frame_number * kPageCountEntrySize)) {
1312 *error_msg = StringPrintf("Failed to read the page count from %s",
David Sehr50005a02017-06-21 13:24:21 -07001313 kpagecount_file->GetPath().c_str());
Igor Murashkin37743352014-11-13 14:38:00 -08001314 return -1;
1315 }
1316
1317 // There must be a page frame at the requested address.
1318 CHECK_EQ(kpage_flags_entry & kPageFlagsNoPageMask, 0u);
1319 // The page frame must be memory mapped
1320 CHECK_NE(kpage_flags_entry & kPageFlagsMmapMask, 0u);
1321
1322 // Page is dirty, i.e. has diverged from file, if the 4th bit is set to 1
1323 bool flags_dirty = (kpage_flags_entry & kPageFlagsDirtyMask) != 0;
1324
1325 // page_frame_number_clean must come from the *same* process
1326 // but a *different* mmap than page_frame_number
1327 if (flags_dirty) {
1328 CHECK_NE(page_frame_number, page_frame_number_clean);
1329 }
1330
1331 return page_frame_number != page_frame_number_clean;
1332 }
1333
David Sehr50005a02017-06-21 13:24:21 -07001334 void PrintPidLine(const std::string& kind, pid_t pid) {
1335 if (pid < 0) {
1336 *os_ << kind << " DIFF PID: disabled\n\n";
1337 } else {
1338 *os_ << kind << " DIFF PID (" << pid << "): ";
1339 }
1340 }
1341
1342 static bool EndsWith(const std::string& str, const std::string& suffix) {
1343 return str.size() >= suffix.size() &&
1344 str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
1345 }
1346
1347 // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar)
1348 static std::string BaseName(const std::string& str) {
1349 size_t idx = str.rfind('/');
1350 if (idx == std::string::npos) {
1351 return str;
1352 }
1353
1354 return str.substr(idx + 1);
1355 }
1356
Igor Murashkin37743352014-11-13 14:38:00 -08001357 // Return the image location, stripped of any directories, e.g. "boot.art" or "core.art"
1358 std::string GetImageLocationBaseName() const {
1359 return BaseName(std::string(image_location_));
1360 }
1361
1362 std::ostream* os_;
1363 const ImageHeader& image_header_;
Andreas Gampe8994a042015-12-30 19:03:17 +00001364 const std::string image_location_;
Igor Murashkin37743352014-11-13 14:38:00 -08001365 pid_t image_diff_pid_; // Dump image diff against boot.art if pid is non-negative
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001366 pid_t zygote_diff_pid_; // Dump image diff against zygote boot.art if pid is non-negative
David Sehr20e271a2017-06-14 13:02:14 -07001367 bool zygote_pid_only_; // The user only specified a pid for the zygote.
Igor Murashkin37743352014-11-13 14:38:00 -08001368
David Sehr50005a02017-06-21 13:24:21 -07001369 // BacktraceMap used for finding the memory mapping of the image file.
1370 std::unique_ptr<BacktraceMap> proc_maps_;
1371 // Boot image mapping.
1372 backtrace_map_t boot_map_{}; // NOLINT
1373 // The size of the boot image mapping.
1374 size_t boot_map_size_;
1375 // The contents of /proc/<image_diff_pid_>/maps.
1376 std::vector<uint8_t> remote_contents_;
1377 // The contents of /proc/<zygote_diff_pid_>/maps.
1378 std::vector<uint8_t> zygote_contents_;
1379 // A File for reading /proc/<zygote_diff_pid_>/maps.
1380 File pagemap_file_;
1381 // A File for reading /proc/self/pagemap.
1382 File clean_pagemap_file_;
1383 // A File for reading /proc/kpageflags.
1384 File kpageflags_file_;
1385 // A File for reading /proc/kpagecount.
1386 File kpagecount_file_;
1387
Igor Murashkin37743352014-11-13 14:38:00 -08001388 DISALLOW_COPY_AND_ASSIGN(ImgDiagDumper);
1389};
1390
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001391static int DumpImage(Runtime* runtime,
1392 std::ostream* os,
1393 pid_t image_diff_pid,
1394 pid_t zygote_diff_pid) {
Igor Murashkin37743352014-11-13 14:38:00 -08001395 ScopedObjectAccess soa(Thread::Current());
1396 gc::Heap* heap = runtime->GetHeap();
Jeff Haodcdc85b2015-12-04 14:06:18 -08001397 std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
1398 CHECK(!image_spaces.empty());
1399 for (gc::space::ImageSpace* image_space : image_spaces) {
1400 const ImageHeader& image_header = image_space->GetImageHeader();
1401 if (!image_header.IsValid()) {
1402 fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
1403 return EXIT_FAILURE;
1404 }
1405
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001406 ImgDiagDumper img_diag_dumper(os,
1407 image_header,
1408 image_space->GetImageLocation(),
1409 image_diff_pid,
1410 zygote_diff_pid);
David Sehr50005a02017-06-21 13:24:21 -07001411 if (!img_diag_dumper.Init()) {
1412 return EXIT_FAILURE;
1413 }
Jeff Haodcdc85b2015-12-04 14:06:18 -08001414 if (!img_diag_dumper.Dump()) {
1415 return EXIT_FAILURE;
1416 }
Igor Murashkin37743352014-11-13 14:38:00 -08001417 }
Jeff Haodcdc85b2015-12-04 14:06:18 -08001418 return EXIT_SUCCESS;
Igor Murashkin37743352014-11-13 14:38:00 -08001419}
1420
1421struct ImgDiagArgs : public CmdlineArgs {
1422 protected:
1423 using Base = CmdlineArgs;
1424
1425 virtual ParseStatus ParseCustom(const StringPiece& option,
1426 std::string* error_msg) OVERRIDE {
1427 {
1428 ParseStatus base_parse = Base::ParseCustom(option, error_msg);
1429 if (base_parse != kParseUnknownArgument) {
1430 return base_parse;
1431 }
1432 }
1433
1434 if (option.starts_with("--image-diff-pid=")) {
1435 const char* image_diff_pid = option.substr(strlen("--image-diff-pid=")).data();
1436
1437 if (!ParseInt(image_diff_pid, &image_diff_pid_)) {
1438 *error_msg = "Image diff pid out of range";
1439 return kParseError;
1440 }
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001441 } else if (option.starts_with("--zygote-diff-pid=")) {
1442 const char* zygote_diff_pid = option.substr(strlen("--zygote-diff-pid=")).data();
1443
1444 if (!ParseInt(zygote_diff_pid, &zygote_diff_pid_)) {
1445 *error_msg = "Zygote diff pid out of range";
1446 return kParseError;
1447 }
Igor Murashkin37743352014-11-13 14:38:00 -08001448 } else {
1449 return kParseUnknownArgument;
1450 }
1451
1452 return kParseOk;
1453 }
1454
1455 virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
1456 // Perform the parent checks.
1457 ParseStatus parent_checks = Base::ParseChecks(error_msg);
1458 if (parent_checks != kParseOk) {
1459 return parent_checks;
1460 }
1461
1462 // Perform our own checks.
1463
1464 if (kill(image_diff_pid_,
1465 /*sig*/0) != 0) { // No signal is sent, perform error-checking only.
1466 // Check if the pid exists before proceeding.
1467 if (errno == ESRCH) {
1468 *error_msg = "Process specified does not exist";
1469 } else {
1470 *error_msg = StringPrintf("Failed to check process status: %s", strerror(errno));
1471 }
1472 return kParseError;
1473 } else if (instruction_set_ != kRuntimeISA) {
1474 // Don't allow different ISAs since the images are ISA-specific.
1475 // Right now the code assumes both the runtime ISA and the remote ISA are identical.
1476 *error_msg = "Must use the default runtime ISA; changing ISA is not supported.";
1477 return kParseError;
1478 }
1479
1480 return kParseOk;
1481 }
1482
1483 virtual std::string GetUsage() const {
1484 std::string usage;
1485
1486 usage +=
1487 "Usage: imgdiag [options] ...\n"
1488 " Example: imgdiag --image-diff-pid=$(pidof dex2oat)\n"
1489 " Example: adb shell imgdiag --image-diff-pid=$(pid zygote)\n"
1490 "\n";
1491
1492 usage += Base::GetUsage();
1493
1494 usage += // Optional.
1495 " --image-diff-pid=<pid>: provide the PID of a process whose boot.art you want to diff.\n"
1496 " Example: --image-diff-pid=$(pid zygote)\n"
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001497 " --zygote-diff-pid=<pid>: provide the PID of the zygote whose boot.art you want to diff "
1498 "against.\n"
1499 " Example: --zygote-diff-pid=$(pid zygote)\n"
Igor Murashkin37743352014-11-13 14:38:00 -08001500 "\n";
1501
1502 return usage;
1503 }
1504
1505 public:
1506 pid_t image_diff_pid_ = -1;
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001507 pid_t zygote_diff_pid_ = -1;
Igor Murashkin37743352014-11-13 14:38:00 -08001508};
1509
1510struct ImgDiagMain : public CmdlineMain<ImgDiagArgs> {
1511 virtual bool ExecuteWithRuntime(Runtime* runtime) {
1512 CHECK(args_ != nullptr);
1513
1514 return DumpImage(runtime,
Igor Murashkin37743352014-11-13 14:38:00 -08001515 args_->os_,
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001516 args_->image_diff_pid_,
1517 args_->zygote_diff_pid_) == EXIT_SUCCESS;
Igor Murashkin37743352014-11-13 14:38:00 -08001518 }
1519};
1520
1521} // namespace art
1522
1523int main(int argc, char** argv) {
1524 art::ImgDiagMain main;
1525 return main.Main(argc, argv);
1526}