blob: 5d9e361da34b49879f92eca6be8c2a6ec63b021a [file] [log] [blame]
Igor Murashkin37743352014-11-13 14:38:00 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <stdio.h>
18#include <stdlib.h>
19
20#include <fstream>
Andreas Gampe7ad71d02016-04-04 13:49:18 -070021#include <functional>
Igor Murashkin37743352014-11-13 14:38:00 -080022#include <iostream>
23#include <string>
24#include <vector>
25#include <set>
26#include <map>
Mathieu Chartiercb044bc2016-04-01 13:56:41 -070027#include <unordered_set>
Igor Murashkin37743352014-11-13 14:38:00 -080028
Andreas Gampe46ee31b2016-12-14 10:11:49 -080029#include "android-base/stringprintf.h"
30
Andreas Gampea1d2f952017-04-20 22:53:58 -070031#include "art_field-inl.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070032#include "art_method-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080033#include "base/unix_file/fd_file.h"
Igor Murashkin37743352014-11-13 14:38:00 -080034#include "gc/space/image_space.h"
35#include "gc/heap.h"
36#include "mirror/class-inl.h"
37#include "mirror/object-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080038#include "image.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070039#include "scoped_thread_state_change-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080040#include "os.h"
Igor Murashkin37743352014-11-13 14:38:00 -080041
42#include "cmdline.h"
43#include "backtrace/BacktraceMap.h"
44
45#include <sys/stat.h>
46#include <sys/types.h>
47#include <signal.h>
48
49namespace art {
50
Andreas Gampe46ee31b2016-12-14 10:11:49 -080051using android::base::StringPrintf;
52
David Sehrb4005f02017-06-20 19:11:40 -070053namespace {
54
55constexpr size_t kMaxAddressPrint = 5;
56
57enum class ProcessType {
58 kZygote,
59 kRemote
60};
61
62enum class RemoteProcesses {
63 kImageOnly,
64 kZygoteOnly,
65 kImageAndZygote
66};
67
68struct MappingData {
69 // The count of pages that are considered dirty by the OS.
70 size_t dirty_pages = 0;
71 // The count of pages that differ by at least one byte.
72 size_t different_pages = 0;
73 // The count of differing bytes.
74 size_t different_bytes = 0;
75 // The count of differing four-byte units.
76 size_t different_int32s = 0;
77 // The count of pages that have mapping count == 1.
78 size_t private_pages = 0;
79 // The count of private pages that are also dirty.
80 size_t private_dirty_pages = 0;
81 // The count of pages that are marked dirty but do not differ.
82 size_t false_dirty_pages = 0;
83 // Set of the local virtual page indices that are dirty.
84 std::set<size_t> dirty_page_set;
85};
86
87static std::string GetClassDescriptor(mirror::Class* klass)
88 REQUIRES_SHARED(Locks::mutator_lock_) {
89 CHECK(klass != nullptr);
90
91 std::string descriptor;
92 const char* descriptor_str = klass->GetDescriptor(&descriptor /*out*/);
93
94 return std::string(descriptor_str);
95}
96
97static std::string PrettyFieldValue(ArtField* field, mirror::Object* object)
98 REQUIRES_SHARED(Locks::mutator_lock_) {
99 std::ostringstream oss;
100 switch (field->GetTypeAsPrimitiveType()) {
101 case Primitive::kPrimNot: {
102 oss << object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
103 field->GetOffset());
104 break;
105 }
106 case Primitive::kPrimBoolean: {
107 oss << static_cast<bool>(object->GetFieldBoolean<kVerifyNone>(field->GetOffset()));
108 break;
109 }
110 case Primitive::kPrimByte: {
111 oss << static_cast<int32_t>(object->GetFieldByte<kVerifyNone>(field->GetOffset()));
112 break;
113 }
114 case Primitive::kPrimChar: {
115 oss << object->GetFieldChar<kVerifyNone>(field->GetOffset());
116 break;
117 }
118 case Primitive::kPrimShort: {
119 oss << object->GetFieldShort<kVerifyNone>(field->GetOffset());
120 break;
121 }
122 case Primitive::kPrimInt: {
123 oss << object->GetField32<kVerifyNone>(field->GetOffset());
124 break;
125 }
126 case Primitive::kPrimLong: {
127 oss << object->GetField64<kVerifyNone>(field->GetOffset());
128 break;
129 }
130 case Primitive::kPrimFloat: {
131 oss << object->GetField32<kVerifyNone>(field->GetOffset());
132 break;
133 }
134 case Primitive::kPrimDouble: {
135 oss << object->GetField64<kVerifyNone>(field->GetOffset());
136 break;
137 }
138 case Primitive::kPrimVoid: {
139 oss << "void";
140 break;
141 }
142 }
143 return oss.str();
144}
145
146template <typename K, typename V, typename D>
147static std::vector<std::pair<V, K>> SortByValueDesc(
148 const std::map<K, D> map,
149 std::function<V(const D&)> value_mapper = [](const D& d) { return static_cast<V>(d); }) {
150 // Store value->key so that we can use the default sort from pair which
151 // sorts by value first and then key
152 std::vector<std::pair<V, K>> value_key_vector;
153
154 for (const auto& kv_pair : map) {
155 value_key_vector.push_back(std::make_pair(value_mapper(kv_pair.second), kv_pair.first));
156 }
157
158 // Sort in reverse (descending order)
159 std::sort(value_key_vector.rbegin(), value_key_vector.rend());
160 return value_key_vector;
161}
162
163// Fixup a remote pointer that we read from a foreign boot.art to point to our own memory.
164// Returned pointer will point to inside of remote_contents.
165template <typename T>
166static T* FixUpRemotePointer(T* remote_ptr,
167 std::vector<uint8_t>& remote_contents,
168 const backtrace_map_t& boot_map) {
169 if (remote_ptr == nullptr) {
170 return nullptr;
171 }
172
173 uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr);
174
175 CHECK_LE(boot_map.start, remote);
176 CHECK_GT(boot_map.end, remote);
177
178 off_t boot_offset = remote - boot_map.start;
179
180 return reinterpret_cast<T*>(&remote_contents[boot_offset]);
181}
182
183template <typename T>
184static T* RemoteContentsPointerToLocal(T* remote_ptr,
185 std::vector<uint8_t>& remote_contents,
186 const ImageHeader& image_header) {
187 if (remote_ptr == nullptr) {
188 return nullptr;
189 }
190
191 uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr);
192 ptrdiff_t boot_offset = remote - &remote_contents[0];
193
194 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + boot_offset;
195
196 return reinterpret_cast<T*>(const_cast<uint8_t*>(local_ptr));
197}
198
199template <typename T> size_t EntrySize(T* entry);
200template<> size_t EntrySize(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
201 return object->SizeOf();
202}
203template<> size_t EntrySize(ArtMethod* art_method) REQUIRES_SHARED(Locks::mutator_lock_) {
204 return sizeof(*art_method);
205}
206
207template <typename T>
208static bool EntriesDiffer(T* entry1, T* entry2) REQUIRES_SHARED(Locks::mutator_lock_) {
209 return memcmp(entry1, entry2, EntrySize(entry1)) != 0;
210}
211
212template <typename T>
213struct RegionCommon {
214 public:
215 RegionCommon(std::ostream* os,
216 std::vector<uint8_t>* remote_contents,
217 std::vector<uint8_t>* zygote_contents,
218 const backtrace_map_t& boot_map,
219 const ImageHeader& image_header) :
220 os_(*os),
221 remote_contents_(remote_contents),
222 zygote_contents_(zygote_contents),
223 boot_map_(boot_map),
224 image_header_(image_header),
225 different_entries_(0),
226 dirty_entry_bytes_(0),
227 false_dirty_entry_bytes_(0) {
228 CHECK(remote_contents != nullptr);
229 CHECK(zygote_contents != nullptr);
230 }
231
232 void DumpSamplesAndOffsetCount() {
233 os_ << " sample object addresses: ";
234 for (size_t i = 0; i < dirty_entries_.size() && i < kMaxAddressPrint; ++i) {
235 T* entry = dirty_entries_[i];
236 os_ << reinterpret_cast<void*>(entry) << ", ";
237 }
238 os_ << "\n";
239 os_ << " dirty byte +offset:count list = ";
240 std::vector<std::pair<size_t, off_t>> field_dirty_count_sorted =
241 SortByValueDesc<off_t, size_t, size_t>(field_dirty_count_);
242 for (const std::pair<size_t, off_t>& pair : field_dirty_count_sorted) {
243 off_t offset = pair.second;
244 size_t count = pair.first;
245 os_ << "+" << offset << ":" << count << ", ";
246 }
247 os_ << "\n";
248 }
249
250 size_t GetDifferentEntryCount() const { return different_entries_; }
251 size_t GetDirtyEntryBytes() const { return dirty_entry_bytes_; }
252 size_t GetFalseDirtyEntryCount() const { return false_dirty_entries_.size(); }
253 size_t GetFalseDirtyEntryBytes() const { return false_dirty_entry_bytes_; }
254 size_t GetZygoteDirtyEntryCount() const { return zygote_dirty_entries_.size(); }
255
256 protected:
257 bool IsEntryOnDirtyPage(T* entry, const std::set<size_t>& dirty_pages) const
258 REQUIRES_SHARED(Locks::mutator_lock_) {
259 size_t size = EntrySize(entry);
260 size_t page_off = 0;
261 size_t current_page_idx;
262 uintptr_t entry_address = reinterpret_cast<uintptr_t>(entry);
263 // Iterate every page this entry belongs to
264 do {
265 current_page_idx = entry_address / kPageSize + page_off;
266 if (dirty_pages.find(current_page_idx) != dirty_pages.end()) {
267 // This entry is on a dirty page
268 return true;
269 }
270 page_off++;
271 } while ((current_page_idx * kPageSize) < RoundUp(entry_address + size, kObjectAlignment));
272 return false;
273 }
274
275 void AddZygoteDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
276 zygote_dirty_entries_.insert(entry);
277 }
278
279 void AddImageDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
280 image_dirty_entries_.insert(entry);
281 }
282
283 void AddFalseDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
284 false_dirty_entries_.push_back(entry);
285 false_dirty_entry_bytes_ += EntrySize(entry);
286 }
287
288 // The output stream to write to.
289 std::ostream& os_;
290 // The byte contents of the remote (image) process' image.
291 std::vector<uint8_t>* remote_contents_;
292 // The byte contents of the zygote process' image.
293 std::vector<uint8_t>* zygote_contents_;
294 const backtrace_map_t& boot_map_;
295 const ImageHeader& image_header_;
296
297 // Count of entries that are different.
298 size_t different_entries_;
299
300 // Local entries that are dirty (differ in at least one byte).
301 size_t dirty_entry_bytes_;
302 std::vector<T*> dirty_entries_;
303
304 // Local entries that are clean, but located on dirty pages.
305 size_t false_dirty_entry_bytes_;
306 std::vector<T*> false_dirty_entries_;
307
308 // Image dirty entries
309 // If zygote_pid_only_ == true, these are shared dirty entries in the zygote.
310 // If zygote_pid_only_ == false, these are private dirty entries in the application.
311 std::set<T*> image_dirty_entries_;
312
313 // Zygote dirty entries (probably private dirty).
314 // We only add entries here if they differed in both the image and the zygote, so
315 // they are probably private dirty.
316 std::set<T*> zygote_dirty_entries_;
317
318 std::map<off_t /* field offset */, size_t /* count */> field_dirty_count_;
319
320 private:
321 DISALLOW_COPY_AND_ASSIGN(RegionCommon);
322};
323
324template <typename T>
325class RegionSpecializedBase : public RegionCommon<T> {
326};
327
328// Region analysis for mirror::Objects
329template<>
330class RegionSpecializedBase<mirror::Object> : public RegionCommon<mirror::Object> {
331 public:
332 RegionSpecializedBase(std::ostream* os,
333 std::vector<uint8_t>* remote_contents,
334 std::vector<uint8_t>* zygote_contents,
335 const backtrace_map_t& boot_map,
336 const ImageHeader& image_header) :
337 RegionCommon<mirror::Object>(os, remote_contents, zygote_contents, boot_map, image_header),
338 os_(*os) { }
339
340 void CheckEntrySanity(const uint8_t* current) const
341 REQUIRES_SHARED(Locks::mutator_lock_) {
342 CHECK_ALIGNED(current, kObjectAlignment);
343 mirror::Object* entry = reinterpret_cast<mirror::Object*>(const_cast<uint8_t*>(current));
344 // Sanity check that we are reading a real mirror::Object
345 CHECK(entry->GetClass() != nullptr) << "Image object at address "
346 << entry
347 << " has null class";
348 if (kUseBakerReadBarrier) {
349 entry->AssertReadBarrierState();
350 }
351 }
352
353 mirror::Object* GetNextEntry(mirror::Object* entry)
354 REQUIRES_SHARED(Locks::mutator_lock_) {
355 uint8_t* next =
356 reinterpret_cast<uint8_t*>(entry) + RoundUp(EntrySize(entry), kObjectAlignment);
357 return reinterpret_cast<mirror::Object*>(next);
358 }
359
360 void VisitEntry(mirror::Object* entry)
361 REQUIRES_SHARED(Locks::mutator_lock_) {
362 // Unconditionally store the class descriptor in case we need it later
363 mirror::Class* klass = entry->GetClass();
364 class_data_[klass].descriptor = GetClassDescriptor(klass);
365 }
366
367 void AddCleanEntry(mirror::Object* entry)
368 REQUIRES_SHARED(Locks::mutator_lock_) {
369 class_data_[entry->GetClass()].AddCleanObject();
370 }
371
372 void AddFalseDirtyEntry(mirror::Object* entry)
373 REQUIRES_SHARED(Locks::mutator_lock_) {
374 RegionCommon<mirror::Object>::AddFalseDirtyEntry(entry);
375 class_data_[entry->GetClass()].AddFalseDirtyObject(entry);
376 }
377
378 void AddDirtyEntry(mirror::Object* entry, mirror::Object* entry_remote)
379 REQUIRES_SHARED(Locks::mutator_lock_) {
380 size_t entry_size = EntrySize(entry);
381 ++different_entries_;
382 dirty_entry_bytes_ += entry_size;
383 // Log dirty count and objects for class objects only.
384 mirror::Class* klass = entry->GetClass();
385 if (klass->IsClassClass()) {
386 // Increment counts for the fields that are dirty
387 const uint8_t* current = reinterpret_cast<const uint8_t*>(entry);
388 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(entry_remote);
389 for (size_t i = 0; i < entry_size; ++i) {
390 if (current[i] != current_remote[i]) {
391 field_dirty_count_[i]++;
392 }
393 }
394 dirty_entries_.push_back(entry);
395 }
396 class_data_[klass].AddDirtyObject(entry, entry_remote);
397 }
398
399 void DiffEntryContents(mirror::Object* entry, uint8_t* remote_bytes)
400 REQUIRES_SHARED(Locks::mutator_lock_) {
401 const char* tabs = " ";
402 // Attempt to find fields for all dirty bytes.
403 mirror::Class* klass = entry->GetClass();
404 if (entry->IsClass()) {
405 os_ << tabs
406 << "Class " << mirror::Class::PrettyClass(entry->AsClass()) << " " << entry << "\n";
407 } else {
408 os_ << tabs
409 << "Instance of " << mirror::Class::PrettyClass(klass) << " " << entry << "\n";
410 }
411
412 std::unordered_set<ArtField*> dirty_instance_fields;
413 std::unordered_set<ArtField*> dirty_static_fields;
414 // Examine the bytes comprising the Object, computing which fields are dirty
415 // and recording them for later display. If the Object is an array object,
416 // compute the dirty entries.
417 const uint8_t* entry_bytes = reinterpret_cast<const uint8_t*>(entry);
418 mirror::Object* remote_entry = reinterpret_cast<mirror::Object*>(remote_bytes);
419 for (size_t i = 0, count = entry->SizeOf(); i < count; ++i) {
420 if (entry_bytes[i] != remote_bytes[i]) {
421 ArtField* field = ArtField::FindInstanceFieldWithOffset</*exact*/false>(klass, i);
422 if (field != nullptr) {
423 dirty_instance_fields.insert(field);
424 } else if (entry->IsClass()) {
425 field = ArtField::FindStaticFieldWithOffset</*exact*/false>(entry->AsClass(), i);
426 if (field != nullptr) {
427 dirty_static_fields.insert(field);
428 }
429 }
430 if (field == nullptr) {
431 if (klass->IsArrayClass()) {
432 mirror::Class* component_type = klass->GetComponentType();
433 Primitive::Type primitive_type = component_type->GetPrimitiveType();
434 size_t component_size = Primitive::ComponentSize(primitive_type);
435 size_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
436 if (i >= data_offset) {
437 os_ << tabs << "Dirty array element " << (i - data_offset) / component_size << "\n";
438 // Skip to next element to prevent spam.
439 i += component_size - 1;
440 continue;
441 }
442 }
443 os_ << tabs << "No field for byte offset " << i << "\n";
444 }
445 }
446 }
447 // Dump different fields.
448 if (!dirty_instance_fields.empty()) {
449 os_ << tabs << "Dirty instance fields " << dirty_instance_fields.size() << "\n";
450 for (ArtField* field : dirty_instance_fields) {
451 os_ << tabs << ArtField::PrettyField(field)
452 << " original=" << PrettyFieldValue(field, entry)
453 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
454 }
455 }
456 if (!dirty_static_fields.empty()) {
457 os_ << tabs << "Dirty static fields " << dirty_static_fields.size() << "\n";
458 for (ArtField* field : dirty_static_fields) {
459 os_ << tabs << ArtField::PrettyField(field)
460 << " original=" << PrettyFieldValue(field, entry)
461 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
462 }
463 }
464 os_ << "\n";
465 }
466
467 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
468 // vector of pairs (size_t count, Class*)
469 auto dirty_object_class_values =
470 SortByValueDesc<mirror::Class*, size_t, ClassData>(
471 class_data_,
472 [](const ClassData& d) { return d.dirty_object_count; });
473 os_ << "\n" << " Dirty object count by class:\n";
474 for (const auto& vk_pair : dirty_object_class_values) {
475 size_t dirty_object_count = vk_pair.first;
476 mirror::Class* klass = vk_pair.second;
477 ClassData& class_data = class_data_[klass];
478 size_t object_sizes = class_data.dirty_object_size_in_bytes;
479 float avg_dirty_bytes_per_class =
480 class_data.dirty_object_byte_count * 1.0f / object_sizes;
481 float avg_object_size = object_sizes * 1.0f / dirty_object_count;
482 const std::string& descriptor = class_data.descriptor;
483 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
484 << "objects: " << dirty_object_count << ", "
485 << "avg dirty bytes: " << avg_dirty_bytes_per_class << ", "
486 << "avg object size: " << avg_object_size << ", "
487 << "class descriptor: '" << descriptor << "'"
488 << ")\n";
489 if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) {
490 DumpSamplesAndOffsetCount();
491 os_ << " field contents:\n";
492 for (mirror::Object* object : class_data.dirty_objects) {
493 // remote class object
494 auto remote_klass = reinterpret_cast<mirror::Class*>(object);
495 // local class object
496 auto local_klass =
497 RemoteContentsPointerToLocal(remote_klass,
498 *RegionCommon<mirror::Object>::remote_contents_,
499 RegionCommon<mirror::Object>::image_header_);
500 os_ << " " << reinterpret_cast<const void*>(object) << " ";
501 os_ << " class_status (remote): " << remote_klass->GetStatus() << ", ";
502 os_ << " class_status (local): " << local_klass->GetStatus();
503 os_ << "\n";
504 }
505 }
506 }
507 }
508
509 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
510 // vector of pairs (size_t count, Class*)
511 auto false_dirty_object_class_values =
512 SortByValueDesc<mirror::Class*, size_t, ClassData>(
513 class_data_,
514 [](const ClassData& d) { return d.false_dirty_object_count; });
515 os_ << "\n" << " False-dirty object count by class:\n";
516 for (const auto& vk_pair : false_dirty_object_class_values) {
517 size_t object_count = vk_pair.first;
518 mirror::Class* klass = vk_pair.second;
519 ClassData& class_data = class_data_[klass];
520 size_t object_sizes = class_data.false_dirty_byte_count;
521 float avg_object_size = object_sizes * 1.0f / object_count;
522 const std::string& descriptor = class_data.descriptor;
523 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
524 << "objects: " << object_count << ", "
525 << "avg object size: " << avg_object_size << ", "
526 << "total bytes: " << object_sizes << ", "
527 << "class descriptor: '" << descriptor << "'"
528 << ")\n";
529 }
530 }
531
532 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
533 // vector of pairs (size_t count, Class*)
534 auto clean_object_class_values =
535 SortByValueDesc<mirror::Class*, size_t, ClassData>(
536 class_data_,
537 [](const ClassData& d) { return d.clean_object_count; });
538 os_ << "\n" << " Clean object count by class:\n";
539 for (const auto& vk_pair : clean_object_class_values) {
540 os_ << " " << mirror::Class::PrettyClass(vk_pair.second) << " (" << vk_pair.first << ")\n";
541 }
542 }
543
544 private:
545 // Aggregate and detail class data from an image diff.
546 struct ClassData {
547 size_t dirty_object_count = 0;
548 // Track only the byte-per-byte dirtiness (in bytes)
549 size_t dirty_object_byte_count = 0;
550 // Track the object-by-object dirtiness (in bytes)
551 size_t dirty_object_size_in_bytes = 0;
552 size_t clean_object_count = 0;
553 std::string descriptor;
554 size_t false_dirty_byte_count = 0;
555 size_t false_dirty_object_count = 0;
556 std::vector<mirror::Object*> false_dirty_objects;
557 // Remote pointers to dirty objects
558 std::vector<mirror::Object*> dirty_objects;
559
560 void AddCleanObject() REQUIRES_SHARED(Locks::mutator_lock_) {
561 ++clean_object_count;
562 }
563
564 void AddDirtyObject(mirror::Object* object, mirror::Object* object_remote)
565 REQUIRES_SHARED(Locks::mutator_lock_) {
566 ++dirty_object_count;
567 dirty_object_byte_count += CountDirtyBytes(object, object_remote);
568 dirty_object_size_in_bytes += EntrySize(object);
569 dirty_objects.push_back(object_remote);
570 }
571
572 void AddFalseDirtyObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
573 ++false_dirty_object_count;
574 false_dirty_objects.push_back(object);
575 false_dirty_byte_count += EntrySize(object);
576 }
577
578 private:
579 // Go byte-by-byte and figure out what exactly got dirtied
580 static size_t CountDirtyBytes(mirror::Object* object1, mirror::Object* object2)
581 REQUIRES_SHARED(Locks::mutator_lock_) {
582 const uint8_t* cur1 = reinterpret_cast<const uint8_t*>(object1);
583 const uint8_t* cur2 = reinterpret_cast<const uint8_t*>(object2);
584 size_t dirty_bytes = 0;
585 size_t object_size = EntrySize(object1);
586 for (size_t i = 0; i < object_size; ++i) {
587 if (cur1[i] != cur2[i]) {
588 dirty_bytes++;
589 }
590 }
591 return dirty_bytes;
592 }
593 };
594
595 std::ostream& os_;
596 std::map<mirror::Class*, ClassData> class_data_;
597
598 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
599};
600
601// Region analysis for ArtMethods.
602// TODO: most of these need work.
603template<>
604class RegionSpecializedBase<ArtMethod> : RegionCommon<ArtMethod> {
605 public:
606 RegionSpecializedBase(std::ostream* os,
607 std::vector<uint8_t>* remote_contents,
608 std::vector<uint8_t>* zygote_contents,
609 const backtrace_map_t& boot_map,
610 const ImageHeader& image_header) :
611 RegionCommon<ArtMethod>(os, remote_contents, zygote_contents, boot_map, image_header),
612 os_(*os) { }
613
614 void CheckEntrySanity(const uint8_t* current ATTRIBUTE_UNUSED) const
615 REQUIRES_SHARED(Locks::mutator_lock_) {
616 }
617
618 ArtMethod* GetNextEntry(ArtMethod* entry)
619 REQUIRES_SHARED(Locks::mutator_lock_) {
620 uint8_t* next = reinterpret_cast<uint8_t*>(entry) + RoundUp(EntrySize(entry), kObjectAlignment);
621 return reinterpret_cast<ArtMethod*>(next);
622 }
623
624 void VisitEntry(ArtMethod* method ATTRIBUTE_UNUSED)
625 REQUIRES_SHARED(Locks::mutator_lock_) {
626 }
627
628 void AddFalseDirtyEntry(ArtMethod* method)
629 REQUIRES_SHARED(Locks::mutator_lock_) {
630 RegionCommon<ArtMethod>::AddFalseDirtyEntry(method);
631 }
632
633 void AddCleanEntry(ArtMethod* method ATTRIBUTE_UNUSED) {
634 }
635
636 void AddDirtyEntry(ArtMethod* method, ArtMethod* method_remote)
637 REQUIRES_SHARED(Locks::mutator_lock_) {
638 size_t entry_size = EntrySize(method);
639 ++different_entries_;
640 dirty_entry_bytes_ += entry_size;
641 // Increment counts for the fields that are dirty
642 const uint8_t* current = reinterpret_cast<const uint8_t*>(method);
643 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(method_remote);
644 // ArtMethods always log their dirty count and entries.
645 for (size_t i = 0; i < entry_size; ++i) {
646 if (current[i] != current_remote[i]) {
647 field_dirty_count_[i]++;
648 }
649 }
650 dirty_entries_.push_back(method);
651 }
652
653 void DiffEntryContents(ArtMethod* method ATTRIBUTE_UNUSED,
654 uint8_t* remote_bytes ATTRIBUTE_UNUSED)
655 REQUIRES_SHARED(Locks::mutator_lock_) {
656 }
657
658 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
659 DumpSamplesAndOffsetCount();
660 os_ << " field contents:\n";
661 for (ArtMethod* method : dirty_entries_) {
662 // remote method
663 auto art_method = reinterpret_cast<ArtMethod*>(method);
664 // remote class
665 mirror::Class* remote_declaring_class =
666 FixUpRemotePointer(art_method->GetDeclaringClass(),
667 *RegionCommon<ArtMethod>::remote_contents_,
668 RegionCommon<ArtMethod>::boot_map_);
669 // local class
670 mirror::Class* declaring_class =
671 RemoteContentsPointerToLocal(remote_declaring_class,
672 *RegionCommon<ArtMethod>::remote_contents_,
673 RegionCommon<ArtMethod>::image_header_);
674 DumpOneArtMethod(art_method, declaring_class, remote_declaring_class);
675 }
676 }
677
678 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
679 os_ << " field contents:\n";
680 for (ArtMethod* method : false_dirty_entries_) {
681 // local class
682 mirror::Class* declaring_class = method->GetDeclaringClass();
683 DumpOneArtMethod(method, declaring_class, nullptr);
684 }
685 }
686
687 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
688 }
689
690 private:
691 std::ostream& os_;
692
693 void DumpOneArtMethod(ArtMethod* art_method,
694 mirror::Class* declaring_class,
695 mirror::Class* remote_declaring_class)
696 REQUIRES_SHARED(Locks::mutator_lock_) {
697 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
698 os_ << " " << reinterpret_cast<const void*>(art_method) << " ";
699 os_ << " entryPointFromJni: "
700 << reinterpret_cast<const void*>(art_method->GetDataPtrSize(pointer_size)) << ", ";
701 os_ << " entryPointFromQuickCompiledCode: "
702 << reinterpret_cast<const void*>(
703 art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size))
704 << ", ";
705 os_ << " isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
706 os_ << " class_status (local): " << declaring_class->GetStatus();
707 if (remote_declaring_class != nullptr) {
708 os_ << ", class_status (remote): " << remote_declaring_class->GetStatus();
709 }
710 os_ << "\n";
711 }
712
713 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
714};
715
716template <typename T>
717class RegionData : public RegionSpecializedBase<T> {
718 public:
719 RegionData(std::ostream* os,
720 std::vector<uint8_t>* remote_contents,
721 std::vector<uint8_t>* zygote_contents,
722 const backtrace_map_t& boot_map,
723 const ImageHeader& image_header) :
724 RegionSpecializedBase<T>(os, remote_contents, zygote_contents, boot_map, image_header),
725 os_(*os) {
726 CHECK(remote_contents != nullptr);
727 CHECK(zygote_contents != nullptr);
728 }
729
730 // Walk over the type T entries in theregion between begin_image_ptr and end_image_ptr,
731 // collecting and reporting data regarding dirty, difference, etc.
732 void ProcessRegion(const MappingData& mapping_data,
733 RemoteProcesses remotes,
734 const uint8_t* begin_image_ptr,
735 const uint8_t* end_image_ptr)
736 REQUIRES_SHARED(Locks::mutator_lock_) {
737 const uint8_t* current = begin_image_ptr + RoundUp(sizeof(ImageHeader), kObjectAlignment);
738 T* entry = reinterpret_cast<T*>(const_cast<uint8_t*>(current));
739 while (reinterpret_cast<uintptr_t>(entry) < reinterpret_cast<uintptr_t>(end_image_ptr)) {
740 ComputeEntryDirty(entry, begin_image_ptr, mapping_data.dirty_page_set);
741
742 entry = RegionSpecializedBase<T>::GetNextEntry(entry);
743 }
744
745 // Looking at only dirty pages, figure out how many of those bytes belong to dirty entries.
746 // TODO: fix this now that there are multiple regions in a mapping.
747 float true_dirtied_percent =
748 RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * kPageSize);
749
750 // Entry specific statistics.
751 os_ << RegionCommon<T>::GetDifferentEntryCount() << " different entries, \n "
752 << RegionCommon<T>::GetDirtyEntryBytes() << " different entry [bytes], \n "
753 << RegionCommon<T>::GetFalseDirtyEntryCount() << " false dirty entries,\n "
754 << RegionCommon<T>::GetFalseDirtyEntryBytes() << " false dirty entry [bytes], \n "
755 << true_dirtied_percent << " different entries-vs-total in a dirty page;\n "
756 << "";
757
758 if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
759 // We only reach this point if both pids were specified. Furthermore,
760 // entries are only displayed here if they differed in both the image
761 // and the zygote, so they are probably private dirty.
762 CHECK(remotes == RemoteProcesses::kImageAndZygote);
763 os_ << "\n" << " Zygote dirty entries (probably shared dirty): ";
764 DiffDirtyEntries(ProcessType::kZygote, begin_image_ptr, RegionCommon<T>::zygote_contents_);
765 }
766 os_ << "\n";
767 switch (remotes) {
768 case RemoteProcesses::kZygoteOnly:
769 os_ << " Zygote shared dirty entries: ";
770 break;
771 case RemoteProcesses::kImageAndZygote:
772 os_ << " Application dirty entries (private dirty): ";
773 break;
774 case RemoteProcesses::kImageOnly:
775 os_ << " Application dirty entries (unknown whether private or shared dirty): ";
776 break;
777 }
778 DiffDirtyEntries(ProcessType::kRemote, begin_image_ptr, RegionCommon<T>::remote_contents_);
779 RegionSpecializedBase<T>::DumpDirtyEntries();
780 RegionSpecializedBase<T>::DumpFalseDirtyEntries();
781 RegionSpecializedBase<T>::DumpCleanEntries();
782 }
783
784 private:
785 std::ostream& os_;
786
787 void DiffDirtyEntries(ProcessType process_type,
788 const uint8_t* begin_image_ptr,
789 std::vector<uint8_t>* contents)
790 REQUIRES_SHARED(Locks::mutator_lock_) {
791 os_ << RegionCommon<T>::dirty_entries_.size() << "\n";
792 const std::set<T*>& entries =
793 (process_type == ProcessType::kZygote) ?
794 RegionCommon<T>::zygote_dirty_entries_:
795 RegionCommon<T>::image_dirty_entries_;
796 for (T* entry : entries) {
797 uint8_t* entry_bytes = reinterpret_cast<uint8_t*>(entry);
798 ptrdiff_t offset = entry_bytes - begin_image_ptr;
799 uint8_t* remote_bytes = &(*contents)[offset];
800 RegionSpecializedBase<T>::DiffEntryContents(entry, remote_bytes);
801 }
802 }
803
804 void ComputeEntryDirty(T* entry,
805 const uint8_t* begin_image_ptr,
806 const std::set<size_t>& dirty_pages)
807 REQUIRES_SHARED(Locks::mutator_lock_) {
808 // Set up pointers in the remote and the zygote for comparison.
809 uint8_t* current = reinterpret_cast<uint8_t*>(entry);
810 ptrdiff_t offset = current - begin_image_ptr;
811 T* entry_remote =
812 reinterpret_cast<T*>(const_cast<uint8_t*>(&(*RegionCommon<T>::remote_contents_)[offset]));
813 const uint8_t* current_zygote =
814 RegionCommon<T>::zygote_contents_->empty() ? nullptr :
815 &(*RegionCommon<T>::zygote_contents_)[offset];
816 T* entry_zygote = reinterpret_cast<T*>(const_cast<uint8_t*>(current_zygote));
817 // Visit and classify entries at the current location.
818 RegionSpecializedBase<T>::VisitEntry(entry);
819 bool different_image_entry = EntriesDiffer(entry, entry_remote);
820 if (different_image_entry) {
821 bool different_zygote_entry = false;
822 if (entry_zygote != nullptr) {
823 different_zygote_entry = EntriesDiffer(entry, entry_zygote);
824 }
825 if (different_zygote_entry) {
826 // Different from zygote.
827 RegionCommon<T>::AddZygoteDirtyEntry(entry);
828 RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
829 } else {
830 // Just different from image.
831 RegionCommon<T>::AddImageDirtyEntry(entry);
832 RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
833 }
834 } else {
835 RegionSpecializedBase<T>::AddCleanEntry(entry);
836 }
837 if (!different_image_entry && RegionCommon<T>::IsEntryOnDirtyPage(entry, dirty_pages)) {
838 // This entry was either never mutated or got mutated back to the same value.
839 // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
840 RegionSpecializedBase<T>::AddFalseDirtyEntry(entry);
841 }
842 }
843
844 DISALLOW_COPY_AND_ASSIGN(RegionData);
845};
846
847} // namespace
848
849
Igor Murashkin37743352014-11-13 14:38:00 -0800850class ImgDiagDumper {
851 public:
852 explicit ImgDiagDumper(std::ostream* os,
Mathieu Chartiercb044bc2016-04-01 13:56:41 -0700853 const ImageHeader& image_header,
854 const std::string& image_location,
Mathieu Chartierc5196cd2016-04-08 14:08:37 -0700855 pid_t image_diff_pid,
856 pid_t zygote_diff_pid)
Igor Murashkin37743352014-11-13 14:38:00 -0800857 : os_(os),
858 image_header_(image_header),
859 image_location_(image_location),
Mathieu Chartierc5196cd2016-04-08 14:08:37 -0700860 image_diff_pid_(image_diff_pid),
David Sehr20e271a2017-06-14 13:02:14 -0700861 zygote_diff_pid_(zygote_diff_pid),
862 zygote_pid_only_(false) {}
Igor Murashkin37743352014-11-13 14:38:00 -0800863
David Sehr50005a02017-06-21 13:24:21 -0700864 bool Init() {
Igor Murashkin37743352014-11-13 14:38:00 -0800865 std::ostream& os = *os_;
Mathieu Chartiercb044bc2016-04-01 13:56:41 -0700866
David Sehr50005a02017-06-21 13:24:21 -0700867 if (image_diff_pid_ < 0 && zygote_diff_pid_ < 0) {
868 os << "Either --image-diff-pid or --zygote-diff-pid (or both) must be specified.\n";
869 return false;
Igor Murashkin37743352014-11-13 14:38:00 -0800870 }
871
David Sehr50005a02017-06-21 13:24:21 -0700872 // To avoid the combinations of command-line argument use cases:
873 // If the user invoked with only --zygote-diff-pid, shuffle that to
874 // image_diff_pid_, invalidate zygote_diff_pid_, and remember that
875 // image_diff_pid_ is now special.
876 if (image_diff_pid_ < 0) {
877 image_diff_pid_ = zygote_diff_pid_;
878 zygote_diff_pid_ = -1;
879 zygote_pid_only_ = true;
David Sehr45de57f2017-06-21 05:03:22 +0000880 }
Igor Murashkin37743352014-11-13 14:38:00 -0800881
David Sehr45de57f2017-06-21 05:03:22 +0000882 {
883 struct stat sts;
884 std::string proc_pid_str =
885 StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
886 if (stat(proc_pid_str.c_str(), &sts) == -1) {
887 os << "Process does not exist";
888 return false;
Igor Murashkin37743352014-11-13 14:38:00 -0800889 }
890 }
891
David Sehr45de57f2017-06-21 05:03:22 +0000892 // Open /proc/$pid/maps to view memory maps
David Sehr50005a02017-06-21 13:24:21 -0700893 auto tmp_proc_maps = std::unique_ptr<BacktraceMap>(BacktraceMap::Create(image_diff_pid_));
894 if (tmp_proc_maps == nullptr) {
David Sehr45de57f2017-06-21 05:03:22 +0000895 os << "Could not read backtrace maps";
896 return false;
897 }
Igor Murashkin37743352014-11-13 14:38:00 -0800898
David Sehr45de57f2017-06-21 05:03:22 +0000899 bool found_boot_map = false;
David Sehr45de57f2017-06-21 05:03:22 +0000900 // Find the memory map only for boot.art
David Sehr50005a02017-06-21 13:24:21 -0700901 for (const backtrace_map_t& map : *tmp_proc_maps) {
David Sehr45de57f2017-06-21 05:03:22 +0000902 if (EndsWith(map.name, GetImageLocationBaseName())) {
903 if ((map.flags & PROT_WRITE) != 0) {
David Sehr50005a02017-06-21 13:24:21 -0700904 boot_map_ = map;
David Sehr45de57f2017-06-21 05:03:22 +0000905 found_boot_map = true;
906 break;
David Sehr0627be32017-06-16 13:50:02 -0700907 }
David Sehr45de57f2017-06-21 05:03:22 +0000908 // In actuality there's more than 1 map, but the second one is read-only.
909 // The one we care about is the write-able map.
910 // The readonly maps are guaranteed to be identical, so its not interesting to compare
911 // them.
David Sehr0627be32017-06-16 13:50:02 -0700912 }
913 }
David Sehr0627be32017-06-16 13:50:02 -0700914
David Sehr45de57f2017-06-21 05:03:22 +0000915 if (!found_boot_map) {
916 os << "Could not find map for " << GetImageLocationBaseName();
917 return false;
918 }
David Sehr50005a02017-06-21 13:24:21 -0700919 // Sanity check boot_map_.
920 CHECK(boot_map_.end >= boot_map_.start);
921 boot_map_size_ = boot_map_.end - boot_map_.start;
David Sehr0627be32017-06-16 13:50:02 -0700922
David Sehr50005a02017-06-21 13:24:21 -0700923 // Open /proc/<image_diff_pid_>/mem and read as remote_contents_.
924 std::string image_file_name =
925 StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
926 auto image_map_file = std::unique_ptr<File>(OS::OpenFileForReading(image_file_name.c_str()));
927 if (image_map_file == nullptr) {
928 os << "Failed to open " << image_file_name << " for reading";
929 return false;
930 }
931 std::vector<uint8_t> tmp_remote_contents(boot_map_size_);
932 if (!image_map_file->PreadFully(&tmp_remote_contents[0], boot_map_size_, boot_map_.start)) {
933 os << "Could not fully read file " << image_file_name;
934 return false;
935 }
936
937 // If zygote_diff_pid_ != -1, open /proc/<zygote_diff_pid_>/mem and read as zygote_contents_.
938 std::vector<uint8_t> tmp_zygote_contents;
939 if (zygote_diff_pid_ != -1) {
940 std::string zygote_file_name =
941 StringPrintf("/proc/%ld/mem", static_cast<long>(zygote_diff_pid_)); // NOLINT [runtime/int]
942 std::unique_ptr<File> zygote_map_file(OS::OpenFileForReading(zygote_file_name.c_str()));
943 if (zygote_map_file == nullptr) {
944 os << "Failed to open " << zygote_file_name << " for reading";
945 return false;
946 }
947 // The boot map should be at the same address.
948 tmp_zygote_contents.reserve(boot_map_size_);
949 if (!zygote_map_file->PreadFully(&tmp_zygote_contents[0], boot_map_size_, boot_map_.start)) {
950 LOG(WARNING) << "Could not fully read zygote file " << zygote_file_name;
951 return false;
952 }
953 }
954
955 // Open /proc/<image_diff_pid_>/pagemap.
956 std::string pagemap_file_name = StringPrintf(
957 "/proc/%ld/pagemap", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
958 auto tmp_pagemap_file =
959 std::unique_ptr<File>(OS::OpenFileForReading(pagemap_file_name.c_str()));
960 if (tmp_pagemap_file == nullptr) {
961 os << "Failed to open " << pagemap_file_name << " for reading: " << strerror(errno);
962 return false;
963 }
964
965 // Not truly clean, mmap-ing boot.art again would be more pristine, but close enough
966 const char* clean_pagemap_file_name = "/proc/self/pagemap";
967 auto tmp_clean_pagemap_file = std::unique_ptr<File>(
968 OS::OpenFileForReading(clean_pagemap_file_name));
969 if (tmp_clean_pagemap_file == nullptr) {
970 os << "Failed to open " << clean_pagemap_file_name << " for reading: " << strerror(errno);
971 return false;
972 }
973
974 auto tmp_kpageflags_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpageflags"));
975 if (tmp_kpageflags_file == nullptr) {
976 os << "Failed to open /proc/kpageflags for reading: " << strerror(errno);
977 return false;
978 }
979
980 auto tmp_kpagecount_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpagecount"));
981 if (tmp_kpagecount_file == nullptr) {
982 os << "Failed to open /proc/kpagecount for reading:" << strerror(errno);
983 return false;
984 }
985
David Sehrb4005f02017-06-20 19:11:40 -0700986 // Commit the mappings, etc.
David Sehr50005a02017-06-21 13:24:21 -0700987 proc_maps_ = std::move(tmp_proc_maps);
988 remote_contents_ = std::move(tmp_remote_contents);
989 zygote_contents_ = std::move(tmp_zygote_contents);
990 pagemap_file_ = std::move(*tmp_pagemap_file.release());
991 clean_pagemap_file_ = std::move(*tmp_clean_pagemap_file.release());
992 kpageflags_file_ = std::move(*tmp_kpageflags_file.release());
993 kpagecount_file_ = std::move(*tmp_kpagecount_file.release());
994
995 return true;
996 }
997
998 bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
999 std::ostream& os = *os_;
1000 os << "IMAGE LOCATION: " << image_location_ << "\n\n";
1001
1002 os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
1003
1004 os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
1005
1006 PrintPidLine("IMAGE", image_diff_pid_);
1007 os << "\n\n";
1008 PrintPidLine("ZYGOTE", zygote_diff_pid_);
1009 bool ret = true;
1010 if (image_diff_pid_ >= 0 || zygote_diff_pid_ >= 0) {
1011 ret = DumpImageDiff();
1012 os << "\n\n";
1013 }
1014
1015 os << std::flush;
1016
1017 return ret;
1018 }
1019
1020 private:
1021 bool DumpImageDiff()
1022 REQUIRES_SHARED(Locks::mutator_lock_) {
1023 return DumpImageDiffMap();
1024 }
1025
David Sehrb4005f02017-06-20 19:11:40 -07001026 bool ComputeDirtyBytes(const uint8_t* image_begin, MappingData* mapping_data /*out*/) {
David Sehr50005a02017-06-21 13:24:21 -07001027 std::ostream& os = *os_;
1028
1029 size_t virtual_page_idx = 0; // Virtual page number (for an absolute memory address)
1030 size_t page_idx = 0; // Page index relative to 0
1031 size_t previous_page_idx = 0; // Previous page index relative to 0
1032
1033
1034 // Iterate through one page at a time. Boot map begin/end already implicitly aligned.
1035 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; begin += kPageSize) {
1036 ptrdiff_t offset = begin - boot_map_.start;
1037
1038 // We treat the image header as part of the memory map for now
1039 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1040 // But it might still be interesting to see if any of the ImageHeader data mutated
1041 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1042 uint8_t* remote_ptr = &remote_contents_[offset];
1043
1044 if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) {
David Sehrb4005f02017-06-20 19:11:40 -07001045 mapping_data->different_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001046
1047 // Count the number of 32-bit integers that are different.
1048 for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) {
1049 uint32_t* remote_ptr_int32 = reinterpret_cast<uint32_t*>(remote_ptr);
1050 const uint32_t* local_ptr_int32 = reinterpret_cast<const uint32_t*>(local_ptr);
1051
1052 if (remote_ptr_int32[i] != local_ptr_int32[i]) {
David Sehrb4005f02017-06-20 19:11:40 -07001053 mapping_data->different_int32s++;
David Sehr50005a02017-06-21 13:24:21 -07001054 }
1055 }
1056 }
1057 }
1058
1059 // Iterate through one byte at a time.
1060 ptrdiff_t page_off_begin = image_header_.GetImageBegin() - image_begin;
1061 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; ++begin) {
1062 previous_page_idx = page_idx;
1063 ptrdiff_t offset = begin - boot_map_.start;
1064
1065 // We treat the image header as part of the memory map for now
1066 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1067 // But it might still be interesting to see if any of the ImageHeader data mutated
1068 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1069 uint8_t* remote_ptr = &remote_contents_[offset];
1070
1071 virtual_page_idx = reinterpret_cast<uintptr_t>(local_ptr) / kPageSize;
1072
1073 // Calculate the page index, relative to the 0th page where the image begins
1074 page_idx = (offset + page_off_begin) / kPageSize;
1075 if (*local_ptr != *remote_ptr) {
1076 // Track number of bytes that are different
David Sehrb4005f02017-06-20 19:11:40 -07001077 mapping_data->different_bytes++;
David Sehr50005a02017-06-21 13:24:21 -07001078 }
1079
1080 // Independently count the # of dirty pages on the remote side
1081 size_t remote_virtual_page_idx = begin / kPageSize;
1082 if (previous_page_idx != page_idx) {
1083 uint64_t page_count = 0xC0FFEE;
1084 // TODO: virtual_page_idx needs to be from the same process
1085 std::string error_msg;
1086 int dirtiness = (IsPageDirty(&pagemap_file_, // Image-diff-pid procmap
1087 &clean_pagemap_file_, // Self procmap
1088 &kpageflags_file_,
1089 &kpagecount_file_,
1090 remote_virtual_page_idx, // potentially "dirty" page
1091 virtual_page_idx, // true "clean" page
1092 &page_count,
1093 &error_msg));
1094 if (dirtiness < 0) {
1095 os << error_msg;
1096 return false;
1097 } else if (dirtiness > 0) {
David Sehrb4005f02017-06-20 19:11:40 -07001098 mapping_data->dirty_pages++;
1099 mapping_data->dirty_page_set.insert(mapping_data->dirty_page_set.end(), virtual_page_idx);
David Sehr50005a02017-06-21 13:24:21 -07001100 }
1101
1102 bool is_dirty = dirtiness > 0;
1103 bool is_private = page_count == 1;
1104
1105 if (page_count == 1) {
David Sehrb4005f02017-06-20 19:11:40 -07001106 mapping_data->private_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001107 }
1108
1109 if (is_dirty && is_private) {
David Sehrb4005f02017-06-20 19:11:40 -07001110 mapping_data->private_dirty_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001111 }
1112 }
1113 }
David Sehrb4005f02017-06-20 19:11:40 -07001114 mapping_data->false_dirty_pages = mapping_data->dirty_pages - mapping_data->different_pages;
1115 // Print low-level (bytes, int32s, pages) statistics.
1116 os << mapping_data->different_bytes << " differing bytes,\n "
1117 << mapping_data->different_int32s << " differing int32s,\n "
1118 << mapping_data->different_pages << " differing pages,\n "
1119 << mapping_data->dirty_pages << " pages are dirty;\n "
1120 << mapping_data->false_dirty_pages << " pages are false dirty;\n "
1121 << mapping_data->private_pages << " pages are private;\n "
1122 << mapping_data->private_dirty_pages << " pages are Private_Dirty\n ";
1123
David Sehr50005a02017-06-21 13:24:21 -07001124 return true;
1125 }
1126
David Sehr50005a02017-06-21 13:24:21 -07001127 // Look at /proc/$pid/mem and only diff the things from there
1128 bool DumpImageDiffMap()
David Sehrb4005f02017-06-20 19:11:40 -07001129 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehr50005a02017-06-21 13:24:21 -07001130 std::ostream& os = *os_;
Igor Murashkin37743352014-11-13 14:38:00 -08001131 std::string error_msg;
1132
1133 // Walk the bytes and diff against our boot image
Igor Murashkin37743352014-11-13 14:38:00 -08001134 os << "\nObserving boot image header at address "
David Sehr50005a02017-06-21 13:24:21 -07001135 << reinterpret_cast<const void*>(&image_header_)
Igor Murashkin37743352014-11-13 14:38:00 -08001136 << "\n\n";
1137
David Sehr50005a02017-06-21 13:24:21 -07001138 const uint8_t* image_begin_unaligned = image_header_.GetImageBegin();
Mathieu Chartierc7853442015-03-27 14:35:38 -07001139 const uint8_t* image_mirror_end_unaligned = image_begin_unaligned +
David Sehr50005a02017-06-21 13:24:21 -07001140 image_header_.GetImageSection(ImageHeader::kSectionObjects).Size();
1141 const uint8_t* image_end_unaligned = image_begin_unaligned + image_header_.GetImageSize();
Igor Murashkin37743352014-11-13 14:38:00 -08001142
1143 // Adjust range to nearest page
1144 const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize);
1145 const uint8_t* image_end = AlignUp(image_end_unaligned, kPageSize);
1146
David Sehr50005a02017-06-21 13:24:21 -07001147 if (reinterpret_cast<uintptr_t>(image_begin) > boot_map_.start ||
1148 reinterpret_cast<uintptr_t>(image_end) < boot_map_.end) {
Igor Murashkin37743352014-11-13 14:38:00 -08001149 // Sanity check that we aren't trying to read a completely different boot image
1150 os << "Remote boot map is out of range of local boot map: " <<
1151 "local begin " << reinterpret_cast<const void*>(image_begin) <<
1152 ", local end " << reinterpret_cast<const void*>(image_end) <<
David Sehr50005a02017-06-21 13:24:21 -07001153 ", remote begin " << reinterpret_cast<const void*>(boot_map_.start) <<
1154 ", remote end " << reinterpret_cast<const void*>(boot_map_.end);
Igor Murashkin37743352014-11-13 14:38:00 -08001155 return false;
1156 // If we wanted even more validation we could map the ImageHeader from the file
1157 }
1158
David Sehrb4005f02017-06-20 19:11:40 -07001159 MappingData mapping_data;
David Sehr45de57f2017-06-21 05:03:22 +00001160
David Sehrb4005f02017-06-20 19:11:40 -07001161 os << "Mapping at [" << reinterpret_cast<void*>(boot_map_.start) << ", "
1162 << reinterpret_cast<void*>(boot_map_.end) << ") had:\n ";
1163 if (!ComputeDirtyBytes(image_begin, &mapping_data)) {
David Sehr50005a02017-06-21 13:24:21 -07001164 return false;
Igor Murashkin37743352014-11-13 14:38:00 -08001165 }
1166
David Sehrb4005f02017-06-20 19:11:40 -07001167 RegionData<mirror::Object> object_region_data(os_,
1168 &remote_contents_,
1169 &zygote_contents_,
1170 boot_map_,
1171 image_header_);
Andreas Gampe7ad71d02016-04-04 13:49:18 -07001172
David Sehrb4005f02017-06-20 19:11:40 -07001173 RemoteProcesses remotes;
David Sehr20e271a2017-06-14 13:02:14 -07001174 if (zygote_pid_only_) {
David Sehrb4005f02017-06-20 19:11:40 -07001175 remotes = RemoteProcesses::kZygoteOnly;
1176 } else if (zygote_diff_pid_ > 0) {
1177 remotes = RemoteProcesses::kImageAndZygote;
David Sehr20e271a2017-06-14 13:02:14 -07001178 } else {
David Sehrb4005f02017-06-20 19:11:40 -07001179 remotes = RemoteProcesses::kImageOnly;
Mathieu Chartiercb044bc2016-04-01 13:56:41 -07001180 }
1181
David Sehrb4005f02017-06-20 19:11:40 -07001182 object_region_data.ProcessRegion(mapping_data,
1183 remotes,
1184 image_begin_unaligned,
1185 image_mirror_end_unaligned);
Igor Murashkin37743352014-11-13 14:38:00 -08001186
1187 return true;
1188 }
1189
Igor Murashkin37743352014-11-13 14:38:00 -08001190 static bool GetPageFrameNumber(File* page_map_file,
1191 size_t virtual_page_index,
1192 uint64_t* page_frame_number,
1193 std::string* error_msg) {
1194 CHECK(page_map_file != nullptr);
1195 CHECK(page_frame_number != nullptr);
1196 CHECK(error_msg != nullptr);
1197
1198 constexpr size_t kPageMapEntrySize = sizeof(uint64_t);
1199 constexpr uint64_t kPageFrameNumberMask = (1ULL << 55) - 1; // bits 0-54 [in /proc/$pid/pagemap]
1200 constexpr uint64_t kPageSoftDirtyMask = (1ULL << 55); // bit 55 [in /proc/$pid/pagemap]
1201
1202 uint64_t page_map_entry = 0;
1203
1204 // Read 64-bit entry from /proc/$pid/pagemap to get the physical page frame number
1205 if (!page_map_file->PreadFully(&page_map_entry, kPageMapEntrySize,
1206 virtual_page_index * kPageMapEntrySize)) {
1207 *error_msg = StringPrintf("Failed to read the virtual page index entry from %s",
1208 page_map_file->GetPath().c_str());
1209 return false;
1210 }
1211
1212 // TODO: seems useless, remove this.
1213 bool soft_dirty = (page_map_entry & kPageSoftDirtyMask) != 0;
1214 if ((false)) {
1215 LOG(VERBOSE) << soft_dirty; // Suppress unused warning
1216 UNREACHABLE();
1217 }
1218
1219 *page_frame_number = page_map_entry & kPageFrameNumberMask;
1220
1221 return true;
1222 }
1223
1224 static int IsPageDirty(File* page_map_file,
David Sehr50005a02017-06-21 13:24:21 -07001225 File* clean_pagemap_file,
1226 File* kpageflags_file,
1227 File* kpagecount_file,
Igor Murashkin37743352014-11-13 14:38:00 -08001228 size_t virtual_page_idx,
1229 size_t clean_virtual_page_idx,
1230 // Out parameters:
1231 uint64_t* page_count, std::string* error_msg) {
1232 CHECK(page_map_file != nullptr);
David Sehr50005a02017-06-21 13:24:21 -07001233 CHECK(clean_pagemap_file != nullptr);
1234 CHECK_NE(page_map_file, clean_pagemap_file);
1235 CHECK(kpageflags_file != nullptr);
1236 CHECK(kpagecount_file != nullptr);
Igor Murashkin37743352014-11-13 14:38:00 -08001237 CHECK(page_count != nullptr);
1238 CHECK(error_msg != nullptr);
1239
1240 // Constants are from https://www.kernel.org/doc/Documentation/vm/pagemap.txt
1241
1242 constexpr size_t kPageFlagsEntrySize = sizeof(uint64_t);
1243 constexpr size_t kPageCountEntrySize = sizeof(uint64_t);
1244 constexpr uint64_t kPageFlagsDirtyMask = (1ULL << 4); // in /proc/kpageflags
1245 constexpr uint64_t kPageFlagsNoPageMask = (1ULL << 20); // in /proc/kpageflags
1246 constexpr uint64_t kPageFlagsMmapMask = (1ULL << 11); // in /proc/kpageflags
1247
1248 uint64_t page_frame_number = 0;
1249 if (!GetPageFrameNumber(page_map_file, virtual_page_idx, &page_frame_number, error_msg)) {
1250 return -1;
1251 }
1252
1253 uint64_t page_frame_number_clean = 0;
David Sehr50005a02017-06-21 13:24:21 -07001254 if (!GetPageFrameNumber(clean_pagemap_file, clean_virtual_page_idx, &page_frame_number_clean,
Igor Murashkin37743352014-11-13 14:38:00 -08001255 error_msg)) {
1256 return -1;
1257 }
1258
1259 // Read 64-bit entry from /proc/kpageflags to get the dirty bit for a page
1260 uint64_t kpage_flags_entry = 0;
David Sehr50005a02017-06-21 13:24:21 -07001261 if (!kpageflags_file->PreadFully(&kpage_flags_entry,
Igor Murashkin37743352014-11-13 14:38:00 -08001262 kPageFlagsEntrySize,
1263 page_frame_number * kPageFlagsEntrySize)) {
1264 *error_msg = StringPrintf("Failed to read the page flags from %s",
David Sehr50005a02017-06-21 13:24:21 -07001265 kpageflags_file->GetPath().c_str());
Igor Murashkin37743352014-11-13 14:38:00 -08001266 return -1;
1267 }
1268
1269 // Read 64-bit entyry from /proc/kpagecount to get mapping counts for a page
David Sehr50005a02017-06-21 13:24:21 -07001270 if (!kpagecount_file->PreadFully(page_count /*out*/,
Igor Murashkin37743352014-11-13 14:38:00 -08001271 kPageCountEntrySize,
1272 page_frame_number * kPageCountEntrySize)) {
1273 *error_msg = StringPrintf("Failed to read the page count from %s",
David Sehr50005a02017-06-21 13:24:21 -07001274 kpagecount_file->GetPath().c_str());
Igor Murashkin37743352014-11-13 14:38:00 -08001275 return -1;
1276 }
1277
1278 // There must be a page frame at the requested address.
1279 CHECK_EQ(kpage_flags_entry & kPageFlagsNoPageMask, 0u);
1280 // The page frame must be memory mapped
1281 CHECK_NE(kpage_flags_entry & kPageFlagsMmapMask, 0u);
1282
1283 // Page is dirty, i.e. has diverged from file, if the 4th bit is set to 1
1284 bool flags_dirty = (kpage_flags_entry & kPageFlagsDirtyMask) != 0;
1285
1286 // page_frame_number_clean must come from the *same* process
1287 // but a *different* mmap than page_frame_number
1288 if (flags_dirty) {
1289 CHECK_NE(page_frame_number, page_frame_number_clean);
1290 }
1291
1292 return page_frame_number != page_frame_number_clean;
1293 }
1294
David Sehr50005a02017-06-21 13:24:21 -07001295 void PrintPidLine(const std::string& kind, pid_t pid) {
1296 if (pid < 0) {
1297 *os_ << kind << " DIFF PID: disabled\n\n";
1298 } else {
1299 *os_ << kind << " DIFF PID (" << pid << "): ";
1300 }
1301 }
1302
1303 static bool EndsWith(const std::string& str, const std::string& suffix) {
1304 return str.size() >= suffix.size() &&
1305 str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
1306 }
1307
1308 // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar)
1309 static std::string BaseName(const std::string& str) {
1310 size_t idx = str.rfind('/');
1311 if (idx == std::string::npos) {
1312 return str;
1313 }
1314
1315 return str.substr(idx + 1);
1316 }
1317
Igor Murashkin37743352014-11-13 14:38:00 -08001318 // Return the image location, stripped of any directories, e.g. "boot.art" or "core.art"
1319 std::string GetImageLocationBaseName() const {
1320 return BaseName(std::string(image_location_));
1321 }
1322
1323 std::ostream* os_;
1324 const ImageHeader& image_header_;
Andreas Gampe8994a042015-12-30 19:03:17 +00001325 const std::string image_location_;
Igor Murashkin37743352014-11-13 14:38:00 -08001326 pid_t image_diff_pid_; // Dump image diff against boot.art if pid is non-negative
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001327 pid_t zygote_diff_pid_; // Dump image diff against zygote boot.art if pid is non-negative
David Sehr20e271a2017-06-14 13:02:14 -07001328 bool zygote_pid_only_; // The user only specified a pid for the zygote.
Igor Murashkin37743352014-11-13 14:38:00 -08001329
David Sehr50005a02017-06-21 13:24:21 -07001330 // BacktraceMap used for finding the memory mapping of the image file.
1331 std::unique_ptr<BacktraceMap> proc_maps_;
1332 // Boot image mapping.
1333 backtrace_map_t boot_map_{}; // NOLINT
1334 // The size of the boot image mapping.
1335 size_t boot_map_size_;
1336 // The contents of /proc/<image_diff_pid_>/maps.
1337 std::vector<uint8_t> remote_contents_;
1338 // The contents of /proc/<zygote_diff_pid_>/maps.
1339 std::vector<uint8_t> zygote_contents_;
1340 // A File for reading /proc/<zygote_diff_pid_>/maps.
1341 File pagemap_file_;
1342 // A File for reading /proc/self/pagemap.
1343 File clean_pagemap_file_;
1344 // A File for reading /proc/kpageflags.
1345 File kpageflags_file_;
1346 // A File for reading /proc/kpagecount.
1347 File kpagecount_file_;
1348
Igor Murashkin37743352014-11-13 14:38:00 -08001349 DISALLOW_COPY_AND_ASSIGN(ImgDiagDumper);
1350};
1351
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001352static int DumpImage(Runtime* runtime,
1353 std::ostream* os,
1354 pid_t image_diff_pid,
1355 pid_t zygote_diff_pid) {
Igor Murashkin37743352014-11-13 14:38:00 -08001356 ScopedObjectAccess soa(Thread::Current());
1357 gc::Heap* heap = runtime->GetHeap();
Jeff Haodcdc85b2015-12-04 14:06:18 -08001358 std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
1359 CHECK(!image_spaces.empty());
1360 for (gc::space::ImageSpace* image_space : image_spaces) {
1361 const ImageHeader& image_header = image_space->GetImageHeader();
1362 if (!image_header.IsValid()) {
1363 fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
1364 return EXIT_FAILURE;
1365 }
1366
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001367 ImgDiagDumper img_diag_dumper(os,
1368 image_header,
1369 image_space->GetImageLocation(),
1370 image_diff_pid,
1371 zygote_diff_pid);
David Sehr50005a02017-06-21 13:24:21 -07001372 if (!img_diag_dumper.Init()) {
1373 return EXIT_FAILURE;
1374 }
Jeff Haodcdc85b2015-12-04 14:06:18 -08001375 if (!img_diag_dumper.Dump()) {
1376 return EXIT_FAILURE;
1377 }
Igor Murashkin37743352014-11-13 14:38:00 -08001378 }
Jeff Haodcdc85b2015-12-04 14:06:18 -08001379 return EXIT_SUCCESS;
Igor Murashkin37743352014-11-13 14:38:00 -08001380}
1381
1382struct ImgDiagArgs : public CmdlineArgs {
1383 protected:
1384 using Base = CmdlineArgs;
1385
1386 virtual ParseStatus ParseCustom(const StringPiece& option,
1387 std::string* error_msg) OVERRIDE {
1388 {
1389 ParseStatus base_parse = Base::ParseCustom(option, error_msg);
1390 if (base_parse != kParseUnknownArgument) {
1391 return base_parse;
1392 }
1393 }
1394
1395 if (option.starts_with("--image-diff-pid=")) {
1396 const char* image_diff_pid = option.substr(strlen("--image-diff-pid=")).data();
1397
1398 if (!ParseInt(image_diff_pid, &image_diff_pid_)) {
1399 *error_msg = "Image diff pid out of range";
1400 return kParseError;
1401 }
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001402 } else if (option.starts_with("--zygote-diff-pid=")) {
1403 const char* zygote_diff_pid = option.substr(strlen("--zygote-diff-pid=")).data();
1404
1405 if (!ParseInt(zygote_diff_pid, &zygote_diff_pid_)) {
1406 *error_msg = "Zygote diff pid out of range";
1407 return kParseError;
1408 }
Igor Murashkin37743352014-11-13 14:38:00 -08001409 } else {
1410 return kParseUnknownArgument;
1411 }
1412
1413 return kParseOk;
1414 }
1415
1416 virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
1417 // Perform the parent checks.
1418 ParseStatus parent_checks = Base::ParseChecks(error_msg);
1419 if (parent_checks != kParseOk) {
1420 return parent_checks;
1421 }
1422
1423 // Perform our own checks.
1424
1425 if (kill(image_diff_pid_,
1426 /*sig*/0) != 0) { // No signal is sent, perform error-checking only.
1427 // Check if the pid exists before proceeding.
1428 if (errno == ESRCH) {
1429 *error_msg = "Process specified does not exist";
1430 } else {
1431 *error_msg = StringPrintf("Failed to check process status: %s", strerror(errno));
1432 }
1433 return kParseError;
1434 } else if (instruction_set_ != kRuntimeISA) {
1435 // Don't allow different ISAs since the images are ISA-specific.
1436 // Right now the code assumes both the runtime ISA and the remote ISA are identical.
1437 *error_msg = "Must use the default runtime ISA; changing ISA is not supported.";
1438 return kParseError;
1439 }
1440
1441 return kParseOk;
1442 }
1443
1444 virtual std::string GetUsage() const {
1445 std::string usage;
1446
1447 usage +=
1448 "Usage: imgdiag [options] ...\n"
1449 " Example: imgdiag --image-diff-pid=$(pidof dex2oat)\n"
1450 " Example: adb shell imgdiag --image-diff-pid=$(pid zygote)\n"
1451 "\n";
1452
1453 usage += Base::GetUsage();
1454
1455 usage += // Optional.
1456 " --image-diff-pid=<pid>: provide the PID of a process whose boot.art you want to diff.\n"
1457 " Example: --image-diff-pid=$(pid zygote)\n"
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001458 " --zygote-diff-pid=<pid>: provide the PID of the zygote whose boot.art you want to diff "
1459 "against.\n"
1460 " Example: --zygote-diff-pid=$(pid zygote)\n"
Igor Murashkin37743352014-11-13 14:38:00 -08001461 "\n";
1462
1463 return usage;
1464 }
1465
1466 public:
1467 pid_t image_diff_pid_ = -1;
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001468 pid_t zygote_diff_pid_ = -1;
Igor Murashkin37743352014-11-13 14:38:00 -08001469};
1470
1471struct ImgDiagMain : public CmdlineMain<ImgDiagArgs> {
1472 virtual bool ExecuteWithRuntime(Runtime* runtime) {
1473 CHECK(args_ != nullptr);
1474
1475 return DumpImage(runtime,
Igor Murashkin37743352014-11-13 14:38:00 -08001476 args_->os_,
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001477 args_->image_diff_pid_,
1478 args_->zygote_diff_pid_) == EXIT_SUCCESS;
Igor Murashkin37743352014-11-13 14:38:00 -08001479 }
1480};
1481
1482} // namespace art
1483
1484int main(int argc, char** argv) {
1485 art::ImgDiagMain main;
1486 return main.Main(argc, argv);
1487}