blob: 51b3d753d70ea3eb47b44c6698da1580c968c5e6 [file] [log] [blame]
Igor Murashkin37743352014-11-13 14:38:00 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <stdio.h>
18#include <stdlib.h>
19
20#include <fstream>
Andreas Gampe7ad71d02016-04-04 13:49:18 -070021#include <functional>
Igor Murashkin37743352014-11-13 14:38:00 -080022#include <iostream>
Igor Murashkin37743352014-11-13 14:38:00 -080023#include <map>
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070024#include <set>
25#include <string>
Mathieu Chartiercb044bc2016-04-01 13:56:41 -070026#include <unordered_set>
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070027#include <vector>
Igor Murashkin37743352014-11-13 14:38:00 -080028
Andreas Gampef9411702018-09-06 17:16:57 -070029#include <android-base/parseint.h>
Andreas Gampe46ee31b2016-12-14 10:11:49 -080030#include "android-base/stringprintf.h"
31
Andreas Gampea1d2f952017-04-20 22:53:58 -070032#include "art_field-inl.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070033#include "art_method-inl.h"
David Sehrc431b9d2018-03-02 12:01:51 -080034#include "base/os.h"
Igor Murashkin37743352014-11-13 14:38:00 -080035#include "base/unix_file/fd_file.h"
David Sehra49e0532017-08-25 08:05:29 -070036#include "class_linker.h"
Igor Murashkin37743352014-11-13 14:38:00 -080037#include "gc/heap.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070038#include "gc/space/image_space.h"
39#include "image.h"
Igor Murashkin37743352014-11-13 14:38:00 -080040#include "mirror/class-inl.h"
41#include "mirror/object-inl.h"
David Sehra49e0532017-08-25 08:05:29 -070042#include "oat.h"
43#include "oat_file.h"
44#include "oat_file_manager.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070045#include "scoped_thread_state_change-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080046
Igor Murashkin37743352014-11-13 14:38:00 -080047#include "backtrace/BacktraceMap.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070048#include "cmdline.h"
Igor Murashkin37743352014-11-13 14:38:00 -080049
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070050#include <signal.h>
Igor Murashkin37743352014-11-13 14:38:00 -080051#include <sys/stat.h>
52#include <sys/types.h>
Igor Murashkin37743352014-11-13 14:38:00 -080053
54namespace art {
55
Andreas Gampe46ee31b2016-12-14 10:11:49 -080056using android::base::StringPrintf;
57
David Sehrb4005f02017-06-20 19:11:40 -070058namespace {
59
60constexpr size_t kMaxAddressPrint = 5;
61
62enum class ProcessType {
63 kZygote,
64 kRemote
65};
66
67enum class RemoteProcesses {
68 kImageOnly,
69 kZygoteOnly,
70 kImageAndZygote
71};
72
73struct MappingData {
74 // The count of pages that are considered dirty by the OS.
75 size_t dirty_pages = 0;
76 // The count of pages that differ by at least one byte.
77 size_t different_pages = 0;
78 // The count of differing bytes.
79 size_t different_bytes = 0;
80 // The count of differing four-byte units.
81 size_t different_int32s = 0;
82 // The count of pages that have mapping count == 1.
83 size_t private_pages = 0;
84 // The count of private pages that are also dirty.
85 size_t private_dirty_pages = 0;
86 // The count of pages that are marked dirty but do not differ.
87 size_t false_dirty_pages = 0;
88 // Set of the local virtual page indices that are dirty.
89 std::set<size_t> dirty_page_set;
90};
91
92static std::string GetClassDescriptor(mirror::Class* klass)
93 REQUIRES_SHARED(Locks::mutator_lock_) {
94 CHECK(klass != nullptr);
95
96 std::string descriptor;
97 const char* descriptor_str = klass->GetDescriptor(&descriptor /*out*/);
98
99 return std::string(descriptor_str);
100}
101
102static std::string PrettyFieldValue(ArtField* field, mirror::Object* object)
103 REQUIRES_SHARED(Locks::mutator_lock_) {
104 std::ostringstream oss;
105 switch (field->GetTypeAsPrimitiveType()) {
106 case Primitive::kPrimNot: {
107 oss << object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
108 field->GetOffset());
109 break;
110 }
111 case Primitive::kPrimBoolean: {
112 oss << static_cast<bool>(object->GetFieldBoolean<kVerifyNone>(field->GetOffset()));
113 break;
114 }
115 case Primitive::kPrimByte: {
116 oss << static_cast<int32_t>(object->GetFieldByte<kVerifyNone>(field->GetOffset()));
117 break;
118 }
119 case Primitive::kPrimChar: {
120 oss << object->GetFieldChar<kVerifyNone>(field->GetOffset());
121 break;
122 }
123 case Primitive::kPrimShort: {
124 oss << object->GetFieldShort<kVerifyNone>(field->GetOffset());
125 break;
126 }
127 case Primitive::kPrimInt: {
128 oss << object->GetField32<kVerifyNone>(field->GetOffset());
129 break;
130 }
131 case Primitive::kPrimLong: {
132 oss << object->GetField64<kVerifyNone>(field->GetOffset());
133 break;
134 }
135 case Primitive::kPrimFloat: {
136 oss << object->GetField32<kVerifyNone>(field->GetOffset());
137 break;
138 }
139 case Primitive::kPrimDouble: {
140 oss << object->GetField64<kVerifyNone>(field->GetOffset());
141 break;
142 }
143 case Primitive::kPrimVoid: {
144 oss << "void";
145 break;
146 }
147 }
148 return oss.str();
149}
150
151template <typename K, typename V, typename D>
152static std::vector<std::pair<V, K>> SortByValueDesc(
153 const std::map<K, D> map,
154 std::function<V(const D&)> value_mapper = [](const D& d) { return static_cast<V>(d); }) {
155 // Store value->key so that we can use the default sort from pair which
156 // sorts by value first and then key
157 std::vector<std::pair<V, K>> value_key_vector;
158
159 for (const auto& kv_pair : map) {
160 value_key_vector.push_back(std::make_pair(value_mapper(kv_pair.second), kv_pair.first));
161 }
162
163 // Sort in reverse (descending order)
164 std::sort(value_key_vector.rbegin(), value_key_vector.rend());
165 return value_key_vector;
166}
167
168// Fixup a remote pointer that we read from a foreign boot.art to point to our own memory.
169// Returned pointer will point to inside of remote_contents.
170template <typename T>
Vladimir Markod93e3742018-07-18 10:58:13 +0100171static ObjPtr<T> FixUpRemotePointer(ObjPtr<T> remote_ptr,
172 std::vector<uint8_t>& remote_contents,
173 const backtrace_map_t& boot_map)
174 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehrb4005f02017-06-20 19:11:40 -0700175 if (remote_ptr == nullptr) {
176 return nullptr;
177 }
178
Vladimir Markod93e3742018-07-18 10:58:13 +0100179 uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr.Ptr());
David Sehrb4005f02017-06-20 19:11:40 -0700180
Mathieu Chartier21f7ac12018-07-09 16:18:27 -0700181 // In the case the remote pointer is out of range, it probably belongs to another image.
182 // Just return null for this case.
183 if (remote < boot_map.start || remote >= boot_map.end) {
184 return nullptr;
185 }
David Sehrb4005f02017-06-20 19:11:40 -0700186
187 off_t boot_offset = remote - boot_map.start;
188
189 return reinterpret_cast<T*>(&remote_contents[boot_offset]);
190}
191
192template <typename T>
Vladimir Markod93e3742018-07-18 10:58:13 +0100193static ObjPtr<T> RemoteContentsPointerToLocal(ObjPtr<T> remote_ptr,
194 std::vector<uint8_t>& remote_contents,
195 const ImageHeader& image_header)
196 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehrb4005f02017-06-20 19:11:40 -0700197 if (remote_ptr == nullptr) {
198 return nullptr;
199 }
200
Vladimir Markod93e3742018-07-18 10:58:13 +0100201 uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr.Ptr());
David Sehrb4005f02017-06-20 19:11:40 -0700202 ptrdiff_t boot_offset = remote - &remote_contents[0];
203
204 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + boot_offset;
205
206 return reinterpret_cast<T*>(const_cast<uint8_t*>(local_ptr));
207}
208
209template <typename T> size_t EntrySize(T* entry);
210template<> size_t EntrySize(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
211 return object->SizeOf();
212}
213template<> size_t EntrySize(ArtMethod* art_method) REQUIRES_SHARED(Locks::mutator_lock_) {
214 return sizeof(*art_method);
215}
216
217template <typename T>
218static bool EntriesDiffer(T* entry1, T* entry2) REQUIRES_SHARED(Locks::mutator_lock_) {
219 return memcmp(entry1, entry2, EntrySize(entry1)) != 0;
220}
221
222template <typename T>
223struct RegionCommon {
224 public:
225 RegionCommon(std::ostream* os,
226 std::vector<uint8_t>* remote_contents,
227 std::vector<uint8_t>* zygote_contents,
228 const backtrace_map_t& boot_map,
229 const ImageHeader& image_header) :
230 os_(*os),
231 remote_contents_(remote_contents),
232 zygote_contents_(zygote_contents),
233 boot_map_(boot_map),
234 image_header_(image_header),
235 different_entries_(0),
236 dirty_entry_bytes_(0),
237 false_dirty_entry_bytes_(0) {
238 CHECK(remote_contents != nullptr);
239 CHECK(zygote_contents != nullptr);
240 }
241
242 void DumpSamplesAndOffsetCount() {
243 os_ << " sample object addresses: ";
244 for (size_t i = 0; i < dirty_entries_.size() && i < kMaxAddressPrint; ++i) {
245 T* entry = dirty_entries_[i];
246 os_ << reinterpret_cast<void*>(entry) << ", ";
247 }
248 os_ << "\n";
249 os_ << " dirty byte +offset:count list = ";
250 std::vector<std::pair<size_t, off_t>> field_dirty_count_sorted =
251 SortByValueDesc<off_t, size_t, size_t>(field_dirty_count_);
252 for (const std::pair<size_t, off_t>& pair : field_dirty_count_sorted) {
253 off_t offset = pair.second;
254 size_t count = pair.first;
255 os_ << "+" << offset << ":" << count << ", ";
256 }
257 os_ << "\n";
258 }
259
260 size_t GetDifferentEntryCount() const { return different_entries_; }
261 size_t GetDirtyEntryBytes() const { return dirty_entry_bytes_; }
262 size_t GetFalseDirtyEntryCount() const { return false_dirty_entries_.size(); }
263 size_t GetFalseDirtyEntryBytes() const { return false_dirty_entry_bytes_; }
264 size_t GetZygoteDirtyEntryCount() const { return zygote_dirty_entries_.size(); }
265
266 protected:
267 bool IsEntryOnDirtyPage(T* entry, const std::set<size_t>& dirty_pages) const
268 REQUIRES_SHARED(Locks::mutator_lock_) {
269 size_t size = EntrySize(entry);
270 size_t page_off = 0;
271 size_t current_page_idx;
272 uintptr_t entry_address = reinterpret_cast<uintptr_t>(entry);
273 // Iterate every page this entry belongs to
274 do {
275 current_page_idx = entry_address / kPageSize + page_off;
276 if (dirty_pages.find(current_page_idx) != dirty_pages.end()) {
277 // This entry is on a dirty page
278 return true;
279 }
280 page_off++;
281 } while ((current_page_idx * kPageSize) < RoundUp(entry_address + size, kObjectAlignment));
282 return false;
283 }
284
285 void AddZygoteDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
286 zygote_dirty_entries_.insert(entry);
287 }
288
289 void AddImageDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
290 image_dirty_entries_.insert(entry);
291 }
292
293 void AddFalseDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
294 false_dirty_entries_.push_back(entry);
295 false_dirty_entry_bytes_ += EntrySize(entry);
296 }
297
298 // The output stream to write to.
299 std::ostream& os_;
300 // The byte contents of the remote (image) process' image.
301 std::vector<uint8_t>* remote_contents_;
302 // The byte contents of the zygote process' image.
303 std::vector<uint8_t>* zygote_contents_;
304 const backtrace_map_t& boot_map_;
305 const ImageHeader& image_header_;
306
307 // Count of entries that are different.
308 size_t different_entries_;
309
310 // Local entries that are dirty (differ in at least one byte).
311 size_t dirty_entry_bytes_;
312 std::vector<T*> dirty_entries_;
313
314 // Local entries that are clean, but located on dirty pages.
315 size_t false_dirty_entry_bytes_;
316 std::vector<T*> false_dirty_entries_;
317
318 // Image dirty entries
319 // If zygote_pid_only_ == true, these are shared dirty entries in the zygote.
320 // If zygote_pid_only_ == false, these are private dirty entries in the application.
321 std::set<T*> image_dirty_entries_;
322
323 // Zygote dirty entries (probably private dirty).
324 // We only add entries here if they differed in both the image and the zygote, so
325 // they are probably private dirty.
326 std::set<T*> zygote_dirty_entries_;
327
328 std::map<off_t /* field offset */, size_t /* count */> field_dirty_count_;
329
330 private:
331 DISALLOW_COPY_AND_ASSIGN(RegionCommon);
332};
333
334template <typename T>
335class RegionSpecializedBase : public RegionCommon<T> {
336};
337
338// Region analysis for mirror::Objects
David Sehra49e0532017-08-25 08:05:29 -0700339class ImgObjectVisitor : public ObjectVisitor {
340 public:
341 using ComputeDirtyFunc = std::function<void(mirror::Object* object,
342 const uint8_t* begin_image_ptr,
343 const std::set<size_t>& dirty_pages)>;
Andreas Gampe68562142018-06-20 21:49:11 +0000344 ImgObjectVisitor(ComputeDirtyFunc dirty_func,
David Sehra49e0532017-08-25 08:05:29 -0700345 const uint8_t* begin_image_ptr,
346 const std::set<size_t>& dirty_pages) :
Andreas Gampebc802de2018-06-20 17:24:11 -0700347 dirty_func_(std::move(dirty_func)),
David Sehra49e0532017-08-25 08:05:29 -0700348 begin_image_ptr_(begin_image_ptr),
349 dirty_pages_(dirty_pages) { }
350
Roland Levillainf73caca2018-08-24 17:19:07 +0100351 ~ImgObjectVisitor() override { }
David Sehra49e0532017-08-25 08:05:29 -0700352
Roland Levillainf73caca2018-08-24 17:19:07 +0100353 void Visit(mirror::Object* object) override REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700354 // Sanity check that we are reading a real mirror::Object
355 CHECK(object->GetClass() != nullptr) << "Image object at address "
356 << object
357 << " has null class";
358 if (kUseBakerReadBarrier) {
359 object->AssertReadBarrierState();
360 }
361 dirty_func_(object, begin_image_ptr_, dirty_pages_);
362 }
363
364 private:
Andreas Gampebc802de2018-06-20 17:24:11 -0700365 const ComputeDirtyFunc dirty_func_;
David Sehra49e0532017-08-25 08:05:29 -0700366 const uint8_t* begin_image_ptr_;
367 const std::set<size_t>& dirty_pages_;
368};
369
David Sehrb4005f02017-06-20 19:11:40 -0700370template<>
371class RegionSpecializedBase<mirror::Object> : public RegionCommon<mirror::Object> {
372 public:
373 RegionSpecializedBase(std::ostream* os,
374 std::vector<uint8_t>* remote_contents,
375 std::vector<uint8_t>* zygote_contents,
376 const backtrace_map_t& boot_map,
Jeff Haoc23b0c02017-07-27 18:19:38 -0700377 const ImageHeader& image_header,
378 bool dump_dirty_objects)
379 : RegionCommon<mirror::Object>(os, remote_contents, zygote_contents, boot_map, image_header),
380 os_(*os),
381 dump_dirty_objects_(dump_dirty_objects) { }
David Sehrb4005f02017-06-20 19:11:40 -0700382
David Sehra49e0532017-08-25 08:05:29 -0700383 // Define a common public type name for use by RegionData.
384 using VisitorClass = ImgObjectVisitor;
David Sehrb4005f02017-06-20 19:11:40 -0700385
David Sehra49e0532017-08-25 08:05:29 -0700386 void VisitEntries(VisitorClass* visitor,
387 uint8_t* base,
388 PointerSize pointer_size)
David Sehrb4005f02017-06-20 19:11:40 -0700389 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700390 RegionCommon<mirror::Object>::image_header_.VisitObjects(visitor, base, pointer_size);
David Sehrb4005f02017-06-20 19:11:40 -0700391 }
392
393 void VisitEntry(mirror::Object* entry)
394 REQUIRES_SHARED(Locks::mutator_lock_) {
395 // Unconditionally store the class descriptor in case we need it later
396 mirror::Class* klass = entry->GetClass();
397 class_data_[klass].descriptor = GetClassDescriptor(klass);
398 }
399
400 void AddCleanEntry(mirror::Object* entry)
401 REQUIRES_SHARED(Locks::mutator_lock_) {
402 class_data_[entry->GetClass()].AddCleanObject();
403 }
404
405 void AddFalseDirtyEntry(mirror::Object* entry)
406 REQUIRES_SHARED(Locks::mutator_lock_) {
407 RegionCommon<mirror::Object>::AddFalseDirtyEntry(entry);
408 class_data_[entry->GetClass()].AddFalseDirtyObject(entry);
409 }
410
411 void AddDirtyEntry(mirror::Object* entry, mirror::Object* entry_remote)
412 REQUIRES_SHARED(Locks::mutator_lock_) {
413 size_t entry_size = EntrySize(entry);
414 ++different_entries_;
415 dirty_entry_bytes_ += entry_size;
416 // Log dirty count and objects for class objects only.
417 mirror::Class* klass = entry->GetClass();
418 if (klass->IsClassClass()) {
419 // Increment counts for the fields that are dirty
420 const uint8_t* current = reinterpret_cast<const uint8_t*>(entry);
421 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(entry_remote);
422 for (size_t i = 0; i < entry_size; ++i) {
423 if (current[i] != current_remote[i]) {
424 field_dirty_count_[i]++;
425 }
426 }
427 dirty_entries_.push_back(entry);
428 }
429 class_data_[klass].AddDirtyObject(entry, entry_remote);
430 }
431
Jeff Haoc23b0c02017-07-27 18:19:38 -0700432 void DiffEntryContents(mirror::Object* entry,
433 uint8_t* remote_bytes,
434 const uint8_t* base_ptr,
435 bool log_dirty_objects)
David Sehrb4005f02017-06-20 19:11:40 -0700436 REQUIRES_SHARED(Locks::mutator_lock_) {
437 const char* tabs = " ";
438 // Attempt to find fields for all dirty bytes.
439 mirror::Class* klass = entry->GetClass();
440 if (entry->IsClass()) {
441 os_ << tabs
442 << "Class " << mirror::Class::PrettyClass(entry->AsClass()) << " " << entry << "\n";
443 } else {
444 os_ << tabs
445 << "Instance of " << mirror::Class::PrettyClass(klass) << " " << entry << "\n";
446 }
447
448 std::unordered_set<ArtField*> dirty_instance_fields;
449 std::unordered_set<ArtField*> dirty_static_fields;
450 // Examine the bytes comprising the Object, computing which fields are dirty
451 // and recording them for later display. If the Object is an array object,
452 // compute the dirty entries.
David Sehrb4005f02017-06-20 19:11:40 -0700453 mirror::Object* remote_entry = reinterpret_cast<mirror::Object*>(remote_bytes);
454 for (size_t i = 0, count = entry->SizeOf(); i < count; ++i) {
Mathieu Chartier51e79652017-07-24 15:43:38 -0700455 if (base_ptr[i] != remote_bytes[i]) {
David Sehrb4005f02017-06-20 19:11:40 -0700456 ArtField* field = ArtField::FindInstanceFieldWithOffset</*exact*/false>(klass, i);
457 if (field != nullptr) {
458 dirty_instance_fields.insert(field);
459 } else if (entry->IsClass()) {
460 field = ArtField::FindStaticFieldWithOffset</*exact*/false>(entry->AsClass(), i);
461 if (field != nullptr) {
462 dirty_static_fields.insert(field);
463 }
464 }
465 if (field == nullptr) {
466 if (klass->IsArrayClass()) {
467 mirror::Class* component_type = klass->GetComponentType();
468 Primitive::Type primitive_type = component_type->GetPrimitiveType();
469 size_t component_size = Primitive::ComponentSize(primitive_type);
470 size_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
471 if (i >= data_offset) {
472 os_ << tabs << "Dirty array element " << (i - data_offset) / component_size << "\n";
473 // Skip to next element to prevent spam.
474 i += component_size - 1;
475 continue;
476 }
477 }
478 os_ << tabs << "No field for byte offset " << i << "\n";
479 }
480 }
481 }
482 // Dump different fields.
483 if (!dirty_instance_fields.empty()) {
484 os_ << tabs << "Dirty instance fields " << dirty_instance_fields.size() << "\n";
485 for (ArtField* field : dirty_instance_fields) {
486 os_ << tabs << ArtField::PrettyField(field)
487 << " original=" << PrettyFieldValue(field, entry)
488 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
489 }
490 }
491 if (!dirty_static_fields.empty()) {
Jeff Haoc23b0c02017-07-27 18:19:38 -0700492 if (dump_dirty_objects_ && log_dirty_objects) {
493 dirty_objects_.insert(entry);
494 }
David Sehrb4005f02017-06-20 19:11:40 -0700495 os_ << tabs << "Dirty static fields " << dirty_static_fields.size() << "\n";
496 for (ArtField* field : dirty_static_fields) {
497 os_ << tabs << ArtField::PrettyField(field)
498 << " original=" << PrettyFieldValue(field, entry)
499 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
500 }
501 }
502 os_ << "\n";
503 }
504
Jeff Haoc23b0c02017-07-27 18:19:38 -0700505 void DumpDirtyObjects() REQUIRES_SHARED(Locks::mutator_lock_) {
506 for (mirror::Object* obj : dirty_objects_) {
507 if (obj->IsClass()) {
508 os_ << "Private dirty object: " << obj->AsClass()->PrettyDescriptor() << "\n";
509 }
510 }
511 }
512
David Sehrb4005f02017-06-20 19:11:40 -0700513 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
514 // vector of pairs (size_t count, Class*)
515 auto dirty_object_class_values =
516 SortByValueDesc<mirror::Class*, size_t, ClassData>(
517 class_data_,
518 [](const ClassData& d) { return d.dirty_object_count; });
519 os_ << "\n" << " Dirty object count by class:\n";
520 for (const auto& vk_pair : dirty_object_class_values) {
521 size_t dirty_object_count = vk_pair.first;
522 mirror::Class* klass = vk_pair.second;
523 ClassData& class_data = class_data_[klass];
524 size_t object_sizes = class_data.dirty_object_size_in_bytes;
525 float avg_dirty_bytes_per_class =
526 class_data.dirty_object_byte_count * 1.0f / object_sizes;
527 float avg_object_size = object_sizes * 1.0f / dirty_object_count;
528 const std::string& descriptor = class_data.descriptor;
529 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
530 << "objects: " << dirty_object_count << ", "
531 << "avg dirty bytes: " << avg_dirty_bytes_per_class << ", "
532 << "avg object size: " << avg_object_size << ", "
533 << "class descriptor: '" << descriptor << "'"
534 << ")\n";
535 if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) {
536 DumpSamplesAndOffsetCount();
537 os_ << " field contents:\n";
538 for (mirror::Object* object : class_data.dirty_objects) {
539 // remote class object
Vladimir Markod93e3742018-07-18 10:58:13 +0100540 ObjPtr<mirror::Class> remote_klass =
541 ObjPtr<mirror::Class>::DownCast<mirror::Object>(object);
David Sehrb4005f02017-06-20 19:11:40 -0700542 // local class object
Vladimir Markod93e3742018-07-18 10:58:13 +0100543 ObjPtr<mirror::Class> local_klass =
David Sehrb4005f02017-06-20 19:11:40 -0700544 RemoteContentsPointerToLocal(remote_klass,
545 *RegionCommon<mirror::Object>::remote_contents_,
546 RegionCommon<mirror::Object>::image_header_);
547 os_ << " " << reinterpret_cast<const void*>(object) << " ";
548 os_ << " class_status (remote): " << remote_klass->GetStatus() << ", ";
549 os_ << " class_status (local): " << local_klass->GetStatus();
550 os_ << "\n";
551 }
552 }
553 }
554 }
555
556 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
557 // vector of pairs (size_t count, Class*)
558 auto false_dirty_object_class_values =
559 SortByValueDesc<mirror::Class*, size_t, ClassData>(
560 class_data_,
561 [](const ClassData& d) { return d.false_dirty_object_count; });
562 os_ << "\n" << " False-dirty object count by class:\n";
563 for (const auto& vk_pair : false_dirty_object_class_values) {
564 size_t object_count = vk_pair.first;
565 mirror::Class* klass = vk_pair.second;
566 ClassData& class_data = class_data_[klass];
567 size_t object_sizes = class_data.false_dirty_byte_count;
568 float avg_object_size = object_sizes * 1.0f / object_count;
569 const std::string& descriptor = class_data.descriptor;
570 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
571 << "objects: " << object_count << ", "
572 << "avg object size: " << avg_object_size << ", "
573 << "total bytes: " << object_sizes << ", "
574 << "class descriptor: '" << descriptor << "'"
575 << ")\n";
576 }
577 }
578
579 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
580 // vector of pairs (size_t count, Class*)
581 auto clean_object_class_values =
582 SortByValueDesc<mirror::Class*, size_t, ClassData>(
583 class_data_,
584 [](const ClassData& d) { return d.clean_object_count; });
585 os_ << "\n" << " Clean object count by class:\n";
586 for (const auto& vk_pair : clean_object_class_values) {
587 os_ << " " << mirror::Class::PrettyClass(vk_pair.second) << " (" << vk_pair.first << ")\n";
588 }
589 }
590
591 private:
592 // Aggregate and detail class data from an image diff.
593 struct ClassData {
594 size_t dirty_object_count = 0;
595 // Track only the byte-per-byte dirtiness (in bytes)
596 size_t dirty_object_byte_count = 0;
597 // Track the object-by-object dirtiness (in bytes)
598 size_t dirty_object_size_in_bytes = 0;
599 size_t clean_object_count = 0;
600 std::string descriptor;
601 size_t false_dirty_byte_count = 0;
602 size_t false_dirty_object_count = 0;
603 std::vector<mirror::Object*> false_dirty_objects;
604 // Remote pointers to dirty objects
605 std::vector<mirror::Object*> dirty_objects;
606
607 void AddCleanObject() REQUIRES_SHARED(Locks::mutator_lock_) {
608 ++clean_object_count;
609 }
610
611 void AddDirtyObject(mirror::Object* object, mirror::Object* object_remote)
612 REQUIRES_SHARED(Locks::mutator_lock_) {
613 ++dirty_object_count;
614 dirty_object_byte_count += CountDirtyBytes(object, object_remote);
615 dirty_object_size_in_bytes += EntrySize(object);
616 dirty_objects.push_back(object_remote);
617 }
618
619 void AddFalseDirtyObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
620 ++false_dirty_object_count;
621 false_dirty_objects.push_back(object);
622 false_dirty_byte_count += EntrySize(object);
623 }
624
625 private:
626 // Go byte-by-byte and figure out what exactly got dirtied
627 static size_t CountDirtyBytes(mirror::Object* object1, mirror::Object* object2)
628 REQUIRES_SHARED(Locks::mutator_lock_) {
629 const uint8_t* cur1 = reinterpret_cast<const uint8_t*>(object1);
630 const uint8_t* cur2 = reinterpret_cast<const uint8_t*>(object2);
631 size_t dirty_bytes = 0;
632 size_t object_size = EntrySize(object1);
633 for (size_t i = 0; i < object_size; ++i) {
634 if (cur1[i] != cur2[i]) {
635 dirty_bytes++;
636 }
637 }
638 return dirty_bytes;
639 }
640 };
641
642 std::ostream& os_;
Jeff Haoc23b0c02017-07-27 18:19:38 -0700643 bool dump_dirty_objects_;
644 std::unordered_set<mirror::Object*> dirty_objects_;
David Sehrb4005f02017-06-20 19:11:40 -0700645 std::map<mirror::Class*, ClassData> class_data_;
646
647 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
648};
649
650// Region analysis for ArtMethods.
David Sehra49e0532017-08-25 08:05:29 -0700651class ImgArtMethodVisitor : public ArtMethodVisitor {
652 public:
653 using ComputeDirtyFunc = std::function<void(ArtMethod*,
654 const uint8_t*,
655 const std::set<size_t>&)>;
Andreas Gampe68562142018-06-20 21:49:11 +0000656 ImgArtMethodVisitor(ComputeDirtyFunc dirty_func,
David Sehra49e0532017-08-25 08:05:29 -0700657 const uint8_t* begin_image_ptr,
658 const std::set<size_t>& dirty_pages) :
Andreas Gampebc802de2018-06-20 17:24:11 -0700659 dirty_func_(std::move(dirty_func)),
David Sehra49e0532017-08-25 08:05:29 -0700660 begin_image_ptr_(begin_image_ptr),
661 dirty_pages_(dirty_pages) { }
Roland Levillainf73caca2018-08-24 17:19:07 +0100662 ~ImgArtMethodVisitor() override { }
663 void Visit(ArtMethod* method) override {
David Sehra49e0532017-08-25 08:05:29 -0700664 dirty_func_(method, begin_image_ptr_, dirty_pages_);
665 }
666
667 private:
Andreas Gampebc802de2018-06-20 17:24:11 -0700668 const ComputeDirtyFunc dirty_func_;
David Sehra49e0532017-08-25 08:05:29 -0700669 const uint8_t* begin_image_ptr_;
670 const std::set<size_t>& dirty_pages_;
671};
672
673// Struct and functor for computing offsets of members of ArtMethods.
674// template <typename RegionType>
675struct MemberInfo {
676 template <typename T>
677 void operator() (const ArtMethod* method, const T* member_address, const std::string& name) {
678 // Check that member_address is a pointer inside *method.
679 DCHECK(reinterpret_cast<uintptr_t>(method) <= reinterpret_cast<uintptr_t>(member_address));
680 DCHECK(reinterpret_cast<uintptr_t>(member_address) + sizeof(T) <=
681 reinterpret_cast<uintptr_t>(method) + sizeof(ArtMethod));
682 size_t offset =
683 reinterpret_cast<uintptr_t>(member_address) - reinterpret_cast<uintptr_t>(method);
684 offset_to_name_size_.insert({offset, NameAndSize(sizeof(T), name)});
685 }
686
687 struct NameAndSize {
688 size_t size_;
689 std::string name_;
690 NameAndSize(size_t size, const std::string& name) : size_(size), name_(name) { }
691 NameAndSize() : size_(0), name_("INVALID") { }
692 };
693
694 std::map<size_t, NameAndSize> offset_to_name_size_;
695};
696
David Sehrb4005f02017-06-20 19:11:40 -0700697template<>
David Sehra49e0532017-08-25 08:05:29 -0700698class RegionSpecializedBase<ArtMethod> : public RegionCommon<ArtMethod> {
David Sehrb4005f02017-06-20 19:11:40 -0700699 public:
700 RegionSpecializedBase(std::ostream* os,
701 std::vector<uint8_t>* remote_contents,
702 std::vector<uint8_t>* zygote_contents,
703 const backtrace_map_t& boot_map,
David Sehra49e0532017-08-25 08:05:29 -0700704 const ImageHeader& image_header,
705 bool dump_dirty_objects ATTRIBUTE_UNUSED)
706 : RegionCommon<ArtMethod>(os, remote_contents, zygote_contents, boot_map, image_header),
707 os_(*os) {
708 // Prepare the table for offset to member lookups.
709 ArtMethod* art_method = reinterpret_cast<ArtMethod*>(&(*remote_contents)[0]);
710 art_method->VisitMembers(member_info_);
711 // Prepare the table for address to symbolic entry point names.
712 BuildEntryPointNames();
713 class_linker_ = Runtime::Current()->GetClassLinker();
David Sehrb4005f02017-06-20 19:11:40 -0700714 }
715
David Sehra49e0532017-08-25 08:05:29 -0700716 // Define a common public type name for use by RegionData.
717 using VisitorClass = ImgArtMethodVisitor;
718
719 void VisitEntries(VisitorClass* visitor,
720 uint8_t* base,
721 PointerSize pointer_size)
David Sehrb4005f02017-06-20 19:11:40 -0700722 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700723 RegionCommon<ArtMethod>::image_header_.VisitPackedArtMethods(visitor, base, pointer_size);
David Sehrb4005f02017-06-20 19:11:40 -0700724 }
725
726 void VisitEntry(ArtMethod* method ATTRIBUTE_UNUSED)
727 REQUIRES_SHARED(Locks::mutator_lock_) {
728 }
729
David Sehra49e0532017-08-25 08:05:29 -0700730 void AddCleanEntry(ArtMethod* method ATTRIBUTE_UNUSED) {
731 }
732
David Sehrb4005f02017-06-20 19:11:40 -0700733 void AddFalseDirtyEntry(ArtMethod* method)
734 REQUIRES_SHARED(Locks::mutator_lock_) {
735 RegionCommon<ArtMethod>::AddFalseDirtyEntry(method);
736 }
737
David Sehrb4005f02017-06-20 19:11:40 -0700738 void AddDirtyEntry(ArtMethod* method, ArtMethod* method_remote)
739 REQUIRES_SHARED(Locks::mutator_lock_) {
740 size_t entry_size = EntrySize(method);
741 ++different_entries_;
742 dirty_entry_bytes_ += entry_size;
743 // Increment counts for the fields that are dirty
744 const uint8_t* current = reinterpret_cast<const uint8_t*>(method);
745 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(method_remote);
746 // ArtMethods always log their dirty count and entries.
747 for (size_t i = 0; i < entry_size; ++i) {
748 if (current[i] != current_remote[i]) {
749 field_dirty_count_[i]++;
750 }
751 }
752 dirty_entries_.push_back(method);
753 }
754
David Sehra49e0532017-08-25 08:05:29 -0700755 void DiffEntryContents(ArtMethod* method,
756 uint8_t* remote_bytes,
757 const uint8_t* base_ptr,
758 bool log_dirty_objects ATTRIBUTE_UNUSED)
David Sehrb4005f02017-06-20 19:11:40 -0700759 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700760 const char* tabs = " ";
761 os_ << tabs << "ArtMethod " << ArtMethod::PrettyMethod(method) << "\n";
762
763 std::unordered_set<size_t> dirty_members;
764 // Examine the members comprising the ArtMethod, computing which members are dirty.
765 for (const std::pair<size_t, MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
766 const size_t offset = p.first;
767 if (memcmp(base_ptr + offset, remote_bytes + offset, p.second.size_) != 0) {
768 dirty_members.insert(p.first);
769 }
770 }
771 // Dump different fields.
772 if (!dirty_members.empty()) {
773 os_ << tabs << "Dirty members " << dirty_members.size() << "\n";
774 for (size_t offset : dirty_members) {
775 const MemberInfo::NameAndSize& member_info = member_info_.offset_to_name_size_[offset];
776 os_ << tabs << member_info.name_
777 << " original=" << StringFromBytes(base_ptr + offset, member_info.size_)
778 << " remote=" << StringFromBytes(remote_bytes + offset, member_info.size_)
779 << "\n";
780 }
781 }
782 os_ << "\n";
783 }
784
785 void DumpDirtyObjects() REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehrb4005f02017-06-20 19:11:40 -0700786 }
787
788 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
789 DumpSamplesAndOffsetCount();
David Sehra49e0532017-08-25 08:05:29 -0700790 os_ << " offset to field map:\n";
791 for (const std::pair<size_t, MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
792 const size_t offset = p.first;
793 const size_t size = p.second.size_;
794 os_ << StringPrintf(" %zu-%zu: ", offset, offset + size - 1)
795 << p.second.name_
796 << std::endl;
797 }
798
David Sehrb4005f02017-06-20 19:11:40 -0700799 os_ << " field contents:\n";
800 for (ArtMethod* method : dirty_entries_) {
801 // remote method
802 auto art_method = reinterpret_cast<ArtMethod*>(method);
803 // remote class
Vladimir Markod93e3742018-07-18 10:58:13 +0100804 ObjPtr<mirror::Class> remote_declaring_class =
David Sehrb4005f02017-06-20 19:11:40 -0700805 FixUpRemotePointer(art_method->GetDeclaringClass(),
806 *RegionCommon<ArtMethod>::remote_contents_,
807 RegionCommon<ArtMethod>::boot_map_);
808 // local class
Vladimir Markod93e3742018-07-18 10:58:13 +0100809 ObjPtr<mirror::Class> declaring_class =
David Sehrb4005f02017-06-20 19:11:40 -0700810 RemoteContentsPointerToLocal(remote_declaring_class,
811 *RegionCommon<ArtMethod>::remote_contents_,
812 RegionCommon<ArtMethod>::image_header_);
813 DumpOneArtMethod(art_method, declaring_class, remote_declaring_class);
814 }
815 }
816
817 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700818 os_ << "\n" << " False-dirty ArtMethods\n";
David Sehrb4005f02017-06-20 19:11:40 -0700819 os_ << " field contents:\n";
820 for (ArtMethod* method : false_dirty_entries_) {
821 // local class
Vladimir Markod93e3742018-07-18 10:58:13 +0100822 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
David Sehrb4005f02017-06-20 19:11:40 -0700823 DumpOneArtMethod(method, declaring_class, nullptr);
824 }
825 }
826
827 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
828 }
829
830 private:
831 std::ostream& os_;
David Sehra49e0532017-08-25 08:05:29 -0700832 MemberInfo member_info_;
833 std::map<const void*, std::string> entry_point_names_;
834 ClassLinker* class_linker_;
835
836 // Compute a map of addresses to names in the boot OAT file(s).
837 void BuildEntryPointNames() {
838 OatFileManager& oat_file_manager = Runtime::Current()->GetOatFileManager();
839 std::vector<const OatFile*> boot_oat_files = oat_file_manager.GetBootOatFiles();
840 for (const OatFile* oat_file : boot_oat_files) {
841 const OatHeader& oat_header = oat_file->GetOatHeader();
842 const void* i2ib = oat_header.GetInterpreterToInterpreterBridge();
843 if (i2ib != nullptr) {
844 entry_point_names_[i2ib] = "InterpreterToInterpreterBridge (from boot oat file)";
845 }
846 const void* i2ccb = oat_header.GetInterpreterToCompiledCodeBridge();
847 if (i2ccb != nullptr) {
848 entry_point_names_[i2ccb] = "InterpreterToCompiledCodeBridge (from boot oat file)";
849 }
850 const void* jdl = oat_header.GetJniDlsymLookup();
851 if (jdl != nullptr) {
852 entry_point_names_[jdl] = "JniDlsymLookup (from boot oat file)";
853 }
854 const void* qgjt = oat_header.GetQuickGenericJniTrampoline();
855 if (qgjt != nullptr) {
856 entry_point_names_[qgjt] = "QuickGenericJniTrampoline (from boot oat file)";
857 }
858 const void* qrt = oat_header.GetQuickResolutionTrampoline();
859 if (qrt != nullptr) {
860 entry_point_names_[qrt] = "QuickResolutionTrampoline (from boot oat file)";
861 }
862 const void* qict = oat_header.GetQuickImtConflictTrampoline();
863 if (qict != nullptr) {
864 entry_point_names_[qict] = "QuickImtConflictTrampoline (from boot oat file)";
865 }
866 const void* q2ib = oat_header.GetQuickToInterpreterBridge();
867 if (q2ib != nullptr) {
868 entry_point_names_[q2ib] = "QuickToInterpreterBridge (from boot oat file)";
869 }
870 }
871 }
872
873 std::string StringFromBytes(const uint8_t* bytes, size_t size) {
874 switch (size) {
875 case 1:
876 return StringPrintf("%" PRIx8, *bytes);
877 case 2:
878 return StringPrintf("%" PRIx16, *reinterpret_cast<const uint16_t*>(bytes));
879 case 4:
880 case 8: {
881 // Compute an address if the bytes might contain one.
882 uint64_t intval;
883 if (size == 4) {
884 intval = *reinterpret_cast<const uint32_t*>(bytes);
885 } else {
886 intval = *reinterpret_cast<const uint64_t*>(bytes);
887 }
888 const void* addr = reinterpret_cast<const void*>(intval);
889 // Match the address against those that have Is* methods in the ClassLinker.
890 if (class_linker_->IsQuickToInterpreterBridge(addr)) {
891 return "QuickToInterpreterBridge";
892 } else if (class_linker_->IsQuickGenericJniStub(addr)) {
893 return "QuickGenericJniStub";
894 } else if (class_linker_->IsQuickResolutionStub(addr)) {
895 return "QuickResolutionStub";
896 } else if (class_linker_->IsJniDlsymLookupStub(addr)) {
897 return "JniDlsymLookupStub";
898 }
899 // Match the address against those that we saved from the boot OAT files.
900 if (entry_point_names_.find(addr) != entry_point_names_.end()) {
901 return entry_point_names_[addr];
902 }
903 return StringPrintf("%" PRIx64, intval);
904 }
905 default:
906 LOG(WARNING) << "Don't know how to convert " << size << " bytes to integer";
907 return "<UNKNOWN>";
908 }
909 }
David Sehrb4005f02017-06-20 19:11:40 -0700910
911 void DumpOneArtMethod(ArtMethod* art_method,
Vladimir Markod93e3742018-07-18 10:58:13 +0100912 ObjPtr<mirror::Class> declaring_class,
913 ObjPtr<mirror::Class> remote_declaring_class)
David Sehrb4005f02017-06-20 19:11:40 -0700914 REQUIRES_SHARED(Locks::mutator_lock_) {
915 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
916 os_ << " " << reinterpret_cast<const void*>(art_method) << " ";
917 os_ << " entryPointFromJni: "
918 << reinterpret_cast<const void*>(art_method->GetDataPtrSize(pointer_size)) << ", ";
919 os_ << " entryPointFromQuickCompiledCode: "
920 << reinterpret_cast<const void*>(
921 art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size))
922 << ", ";
923 os_ << " isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
David Sehra49e0532017-08-25 08:05:29 -0700924 // Null for runtime metionds.
925 if (declaring_class != nullptr) {
926 os_ << " class_status (local): " << declaring_class->GetStatus();
927 }
David Sehrb4005f02017-06-20 19:11:40 -0700928 if (remote_declaring_class != nullptr) {
929 os_ << ", class_status (remote): " << remote_declaring_class->GetStatus();
930 }
931 os_ << "\n";
932 }
933
934 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
935};
936
937template <typename T>
938class RegionData : public RegionSpecializedBase<T> {
939 public:
940 RegionData(std::ostream* os,
941 std::vector<uint8_t>* remote_contents,
942 std::vector<uint8_t>* zygote_contents,
943 const backtrace_map_t& boot_map,
Jeff Haoc23b0c02017-07-27 18:19:38 -0700944 const ImageHeader& image_header,
945 bool dump_dirty_objects)
946 : RegionSpecializedBase<T>(os,
947 remote_contents,
948 zygote_contents,
949 boot_map,
950 image_header,
951 dump_dirty_objects),
952 os_(*os) {
David Sehrb4005f02017-06-20 19:11:40 -0700953 CHECK(remote_contents != nullptr);
954 CHECK(zygote_contents != nullptr);
955 }
956
957 // Walk over the type T entries in theregion between begin_image_ptr and end_image_ptr,
958 // collecting and reporting data regarding dirty, difference, etc.
959 void ProcessRegion(const MappingData& mapping_data,
960 RemoteProcesses remotes,
David Sehra49e0532017-08-25 08:05:29 -0700961 const uint8_t* begin_image_ptr)
David Sehrb4005f02017-06-20 19:11:40 -0700962 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700963 typename RegionSpecializedBase<T>::VisitorClass visitor(
964 [this](T* entry,
965 const uint8_t* begin_image_ptr,
966 const std::set<size_t>& dirty_page_set) REQUIRES_SHARED(Locks::mutator_lock_) {
967 this->ComputeEntryDirty(entry, begin_image_ptr, dirty_page_set);
968 },
969 begin_image_ptr,
970 mapping_data.dirty_page_set);
971 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
972 RegionSpecializedBase<T>::VisitEntries(&visitor,
973 const_cast<uint8_t*>(begin_image_ptr),
974 pointer_size);
David Sehrb4005f02017-06-20 19:11:40 -0700975
976 // Looking at only dirty pages, figure out how many of those bytes belong to dirty entries.
977 // TODO: fix this now that there are multiple regions in a mapping.
978 float true_dirtied_percent =
979 RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * kPageSize);
980
981 // Entry specific statistics.
982 os_ << RegionCommon<T>::GetDifferentEntryCount() << " different entries, \n "
983 << RegionCommon<T>::GetDirtyEntryBytes() << " different entry [bytes], \n "
984 << RegionCommon<T>::GetFalseDirtyEntryCount() << " false dirty entries,\n "
985 << RegionCommon<T>::GetFalseDirtyEntryBytes() << " false dirty entry [bytes], \n "
986 << true_dirtied_percent << " different entries-vs-total in a dirty page;\n "
Mathieu Chartier51e79652017-07-24 15:43:38 -0700987 << "\n";
David Sehrb4005f02017-06-20 19:11:40 -0700988
Mathieu Chartier51e79652017-07-24 15:43:38 -0700989 const uint8_t* base_ptr = begin_image_ptr;
David Sehrb4005f02017-06-20 19:11:40 -0700990 switch (remotes) {
991 case RemoteProcesses::kZygoteOnly:
992 os_ << " Zygote shared dirty entries: ";
993 break;
994 case RemoteProcesses::kImageAndZygote:
995 os_ << " Application dirty entries (private dirty): ";
Mathieu Chartier51e79652017-07-24 15:43:38 -0700996 // If we are dumping private dirty, diff against the zygote map to make it clearer what
997 // fields caused the page to be private dirty.
998 base_ptr = &RegionCommon<T>::zygote_contents_->operator[](0);
David Sehrb4005f02017-06-20 19:11:40 -0700999 break;
1000 case RemoteProcesses::kImageOnly:
1001 os_ << " Application dirty entries (unknown whether private or shared dirty): ";
1002 break;
1003 }
Mathieu Chartier51e79652017-07-24 15:43:38 -07001004 DiffDirtyEntries(ProcessType::kRemote,
1005 begin_image_ptr,
1006 RegionCommon<T>::remote_contents_,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001007 base_ptr,
1008 /*log_dirty_objects*/true);
Mathieu Chartier51e79652017-07-24 15:43:38 -07001009 // Print shared dirty after since it's less important.
1010 if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
1011 // We only reach this point if both pids were specified. Furthermore,
1012 // entries are only displayed here if they differed in both the image
1013 // and the zygote, so they are probably private dirty.
1014 CHECK(remotes == RemoteProcesses::kImageAndZygote);
1015 os_ << "\n" << " Zygote dirty entries (probably shared dirty): ";
1016 DiffDirtyEntries(ProcessType::kZygote,
1017 begin_image_ptr,
1018 RegionCommon<T>::zygote_contents_,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001019 begin_image_ptr,
1020 /*log_dirty_objects*/false);
Mathieu Chartier51e79652017-07-24 15:43:38 -07001021 }
Jeff Haoc23b0c02017-07-27 18:19:38 -07001022 RegionSpecializedBase<T>::DumpDirtyObjects();
David Sehrb4005f02017-06-20 19:11:40 -07001023 RegionSpecializedBase<T>::DumpDirtyEntries();
1024 RegionSpecializedBase<T>::DumpFalseDirtyEntries();
1025 RegionSpecializedBase<T>::DumpCleanEntries();
1026 }
1027
1028 private:
1029 std::ostream& os_;
1030
1031 void DiffDirtyEntries(ProcessType process_type,
1032 const uint8_t* begin_image_ptr,
Mathieu Chartier51e79652017-07-24 15:43:38 -07001033 std::vector<uint8_t>* contents,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001034 const uint8_t* base_ptr,
1035 bool log_dirty_objects)
David Sehrb4005f02017-06-20 19:11:40 -07001036 REQUIRES_SHARED(Locks::mutator_lock_) {
1037 os_ << RegionCommon<T>::dirty_entries_.size() << "\n";
1038 const std::set<T*>& entries =
1039 (process_type == ProcessType::kZygote) ?
1040 RegionCommon<T>::zygote_dirty_entries_:
1041 RegionCommon<T>::image_dirty_entries_;
1042 for (T* entry : entries) {
1043 uint8_t* entry_bytes = reinterpret_cast<uint8_t*>(entry);
1044 ptrdiff_t offset = entry_bytes - begin_image_ptr;
1045 uint8_t* remote_bytes = &(*contents)[offset];
Jeff Haoc23b0c02017-07-27 18:19:38 -07001046 RegionSpecializedBase<T>::DiffEntryContents(entry,
1047 remote_bytes,
1048 &base_ptr[offset],
1049 log_dirty_objects);
David Sehrb4005f02017-06-20 19:11:40 -07001050 }
1051 }
1052
1053 void ComputeEntryDirty(T* entry,
1054 const uint8_t* begin_image_ptr,
1055 const std::set<size_t>& dirty_pages)
1056 REQUIRES_SHARED(Locks::mutator_lock_) {
1057 // Set up pointers in the remote and the zygote for comparison.
1058 uint8_t* current = reinterpret_cast<uint8_t*>(entry);
1059 ptrdiff_t offset = current - begin_image_ptr;
1060 T* entry_remote =
1061 reinterpret_cast<T*>(const_cast<uint8_t*>(&(*RegionCommon<T>::remote_contents_)[offset]));
Mathieu Chartier51e79652017-07-24 15:43:38 -07001062 const bool have_zygote = !RegionCommon<T>::zygote_contents_->empty();
David Sehrb4005f02017-06-20 19:11:40 -07001063 const uint8_t* current_zygote =
Mathieu Chartier51e79652017-07-24 15:43:38 -07001064 have_zygote ? &(*RegionCommon<T>::zygote_contents_)[offset] : nullptr;
David Sehrb4005f02017-06-20 19:11:40 -07001065 T* entry_zygote = reinterpret_cast<T*>(const_cast<uint8_t*>(current_zygote));
1066 // Visit and classify entries at the current location.
1067 RegionSpecializedBase<T>::VisitEntry(entry);
Mathieu Chartier51e79652017-07-24 15:43:38 -07001068
1069 // Test private dirty first.
1070 bool is_dirty = false;
1071 if (have_zygote) {
1072 bool private_dirty = EntriesDiffer(entry_zygote, entry_remote);
1073 if (private_dirty) {
1074 // Private dirty, app vs zygote.
1075 is_dirty = true;
David Sehrb4005f02017-06-20 19:11:40 -07001076 RegionCommon<T>::AddImageDirtyEntry(entry);
David Sehrb4005f02017-06-20 19:11:40 -07001077 }
Mathieu Chartier51e79652017-07-24 15:43:38 -07001078 if (EntriesDiffer(entry_zygote, entry)) {
1079 // Shared dirty, zygote vs image.
1080 is_dirty = true;
1081 RegionCommon<T>::AddZygoteDirtyEntry(entry);
1082 }
1083 } else if (EntriesDiffer(entry_remote, entry)) {
1084 // Shared or private dirty, app vs image.
1085 is_dirty = true;
1086 RegionCommon<T>::AddImageDirtyEntry(entry);
1087 }
1088 if (is_dirty) {
1089 // TODO: Add support dirty entries in zygote and image.
1090 RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
David Sehrb4005f02017-06-20 19:11:40 -07001091 } else {
1092 RegionSpecializedBase<T>::AddCleanEntry(entry);
Mathieu Chartier51e79652017-07-24 15:43:38 -07001093 if (RegionCommon<T>::IsEntryOnDirtyPage(entry, dirty_pages)) {
1094 // This entry was either never mutated or got mutated back to the same value.
1095 // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
1096 RegionSpecializedBase<T>::AddFalseDirtyEntry(entry);
1097 }
David Sehrb4005f02017-06-20 19:11:40 -07001098 }
1099 }
1100
1101 DISALLOW_COPY_AND_ASSIGN(RegionData);
1102};
1103
1104} // namespace
1105
1106
Igor Murashkin37743352014-11-13 14:38:00 -08001107class ImgDiagDumper {
1108 public:
1109 explicit ImgDiagDumper(std::ostream* os,
Mathieu Chartiercb044bc2016-04-01 13:56:41 -07001110 const ImageHeader& image_header,
1111 const std::string& image_location,
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001112 pid_t image_diff_pid,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001113 pid_t zygote_diff_pid,
1114 bool dump_dirty_objects)
Igor Murashkin37743352014-11-13 14:38:00 -08001115 : os_(os),
1116 image_header_(image_header),
1117 image_location_(image_location),
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001118 image_diff_pid_(image_diff_pid),
David Sehr20e271a2017-06-14 13:02:14 -07001119 zygote_diff_pid_(zygote_diff_pid),
Jeff Haoc23b0c02017-07-27 18:19:38 -07001120 dump_dirty_objects_(dump_dirty_objects),
David Sehr20e271a2017-06-14 13:02:14 -07001121 zygote_pid_only_(false) {}
Igor Murashkin37743352014-11-13 14:38:00 -08001122
David Sehr50005a02017-06-21 13:24:21 -07001123 bool Init() {
Igor Murashkin37743352014-11-13 14:38:00 -08001124 std::ostream& os = *os_;
Mathieu Chartiercb044bc2016-04-01 13:56:41 -07001125
David Sehr50005a02017-06-21 13:24:21 -07001126 if (image_diff_pid_ < 0 && zygote_diff_pid_ < 0) {
1127 os << "Either --image-diff-pid or --zygote-diff-pid (or both) must be specified.\n";
1128 return false;
Igor Murashkin37743352014-11-13 14:38:00 -08001129 }
1130
David Sehr50005a02017-06-21 13:24:21 -07001131 // To avoid the combinations of command-line argument use cases:
1132 // If the user invoked with only --zygote-diff-pid, shuffle that to
1133 // image_diff_pid_, invalidate zygote_diff_pid_, and remember that
1134 // image_diff_pid_ is now special.
1135 if (image_diff_pid_ < 0) {
1136 image_diff_pid_ = zygote_diff_pid_;
1137 zygote_diff_pid_ = -1;
1138 zygote_pid_only_ = true;
David Sehr45de57f2017-06-21 05:03:22 +00001139 }
Igor Murashkin37743352014-11-13 14:38:00 -08001140
David Sehr45de57f2017-06-21 05:03:22 +00001141 {
1142 struct stat sts;
1143 std::string proc_pid_str =
1144 StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
1145 if (stat(proc_pid_str.c_str(), &sts) == -1) {
1146 os << "Process does not exist";
1147 return false;
Igor Murashkin37743352014-11-13 14:38:00 -08001148 }
1149 }
1150
David Sehr45de57f2017-06-21 05:03:22 +00001151 // Open /proc/$pid/maps to view memory maps
David Sehr50005a02017-06-21 13:24:21 -07001152 auto tmp_proc_maps = std::unique_ptr<BacktraceMap>(BacktraceMap::Create(image_diff_pid_));
1153 if (tmp_proc_maps == nullptr) {
David Sehr45de57f2017-06-21 05:03:22 +00001154 os << "Could not read backtrace maps";
1155 return false;
1156 }
Igor Murashkin37743352014-11-13 14:38:00 -08001157
David Sehr45de57f2017-06-21 05:03:22 +00001158 bool found_boot_map = false;
David Sehr45de57f2017-06-21 05:03:22 +00001159 // Find the memory map only for boot.art
Christopher Ferris5cf8b532017-12-03 12:46:17 -08001160 for (const backtrace_map_t* map : *tmp_proc_maps) {
1161 if (EndsWith(map->name, GetImageLocationBaseName())) {
1162 if ((map->flags & PROT_WRITE) != 0) {
1163 boot_map_ = *map;
David Sehr45de57f2017-06-21 05:03:22 +00001164 found_boot_map = true;
1165 break;
David Sehr0627be32017-06-16 13:50:02 -07001166 }
David Sehr45de57f2017-06-21 05:03:22 +00001167 // In actuality there's more than 1 map, but the second one is read-only.
1168 // The one we care about is the write-able map.
1169 // The readonly maps are guaranteed to be identical, so its not interesting to compare
1170 // them.
David Sehr0627be32017-06-16 13:50:02 -07001171 }
1172 }
David Sehr0627be32017-06-16 13:50:02 -07001173
David Sehr45de57f2017-06-21 05:03:22 +00001174 if (!found_boot_map) {
1175 os << "Could not find map for " << GetImageLocationBaseName();
1176 return false;
1177 }
David Sehr50005a02017-06-21 13:24:21 -07001178 // Sanity check boot_map_.
1179 CHECK(boot_map_.end >= boot_map_.start);
1180 boot_map_size_ = boot_map_.end - boot_map_.start;
David Sehr0627be32017-06-16 13:50:02 -07001181
David Sehr50005a02017-06-21 13:24:21 -07001182 // Open /proc/<image_diff_pid_>/mem and read as remote_contents_.
1183 std::string image_file_name =
1184 StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
1185 auto image_map_file = std::unique_ptr<File>(OS::OpenFileForReading(image_file_name.c_str()));
1186 if (image_map_file == nullptr) {
1187 os << "Failed to open " << image_file_name << " for reading";
1188 return false;
1189 }
1190 std::vector<uint8_t> tmp_remote_contents(boot_map_size_);
1191 if (!image_map_file->PreadFully(&tmp_remote_contents[0], boot_map_size_, boot_map_.start)) {
1192 os << "Could not fully read file " << image_file_name;
1193 return false;
1194 }
1195
1196 // If zygote_diff_pid_ != -1, open /proc/<zygote_diff_pid_>/mem and read as zygote_contents_.
1197 std::vector<uint8_t> tmp_zygote_contents;
1198 if (zygote_diff_pid_ != -1) {
1199 std::string zygote_file_name =
1200 StringPrintf("/proc/%ld/mem", static_cast<long>(zygote_diff_pid_)); // NOLINT [runtime/int]
1201 std::unique_ptr<File> zygote_map_file(OS::OpenFileForReading(zygote_file_name.c_str()));
1202 if (zygote_map_file == nullptr) {
1203 os << "Failed to open " << zygote_file_name << " for reading";
1204 return false;
1205 }
1206 // The boot map should be at the same address.
Mathieu Chartier51e79652017-07-24 15:43:38 -07001207 tmp_zygote_contents.resize(boot_map_size_);
David Sehr50005a02017-06-21 13:24:21 -07001208 if (!zygote_map_file->PreadFully(&tmp_zygote_contents[0], boot_map_size_, boot_map_.start)) {
1209 LOG(WARNING) << "Could not fully read zygote file " << zygote_file_name;
1210 return false;
1211 }
1212 }
1213
1214 // Open /proc/<image_diff_pid_>/pagemap.
1215 std::string pagemap_file_name = StringPrintf(
1216 "/proc/%ld/pagemap", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
1217 auto tmp_pagemap_file =
1218 std::unique_ptr<File>(OS::OpenFileForReading(pagemap_file_name.c_str()));
1219 if (tmp_pagemap_file == nullptr) {
1220 os << "Failed to open " << pagemap_file_name << " for reading: " << strerror(errno);
1221 return false;
1222 }
1223
1224 // Not truly clean, mmap-ing boot.art again would be more pristine, but close enough
1225 const char* clean_pagemap_file_name = "/proc/self/pagemap";
1226 auto tmp_clean_pagemap_file = std::unique_ptr<File>(
1227 OS::OpenFileForReading(clean_pagemap_file_name));
1228 if (tmp_clean_pagemap_file == nullptr) {
1229 os << "Failed to open " << clean_pagemap_file_name << " for reading: " << strerror(errno);
1230 return false;
1231 }
1232
1233 auto tmp_kpageflags_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpageflags"));
1234 if (tmp_kpageflags_file == nullptr) {
1235 os << "Failed to open /proc/kpageflags for reading: " << strerror(errno);
1236 return false;
1237 }
1238
1239 auto tmp_kpagecount_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpagecount"));
1240 if (tmp_kpagecount_file == nullptr) {
1241 os << "Failed to open /proc/kpagecount for reading:" << strerror(errno);
1242 return false;
1243 }
1244
David Sehrb4005f02017-06-20 19:11:40 -07001245 // Commit the mappings, etc.
David Sehr50005a02017-06-21 13:24:21 -07001246 proc_maps_ = std::move(tmp_proc_maps);
1247 remote_contents_ = std::move(tmp_remote_contents);
1248 zygote_contents_ = std::move(tmp_zygote_contents);
1249 pagemap_file_ = std::move(*tmp_pagemap_file.release());
1250 clean_pagemap_file_ = std::move(*tmp_clean_pagemap_file.release());
1251 kpageflags_file_ = std::move(*tmp_kpageflags_file.release());
1252 kpagecount_file_ = std::move(*tmp_kpagecount_file.release());
1253
1254 return true;
1255 }
1256
1257 bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
1258 std::ostream& os = *os_;
1259 os << "IMAGE LOCATION: " << image_location_ << "\n\n";
1260
1261 os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
1262
1263 os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
1264
1265 PrintPidLine("IMAGE", image_diff_pid_);
1266 os << "\n\n";
1267 PrintPidLine("ZYGOTE", zygote_diff_pid_);
1268 bool ret = true;
1269 if (image_diff_pid_ >= 0 || zygote_diff_pid_ >= 0) {
1270 ret = DumpImageDiff();
1271 os << "\n\n";
1272 }
1273
1274 os << std::flush;
1275
1276 return ret;
1277 }
1278
1279 private:
1280 bool DumpImageDiff()
1281 REQUIRES_SHARED(Locks::mutator_lock_) {
1282 return DumpImageDiffMap();
1283 }
1284
David Sehrb4005f02017-06-20 19:11:40 -07001285 bool ComputeDirtyBytes(const uint8_t* image_begin, MappingData* mapping_data /*out*/) {
David Sehr50005a02017-06-21 13:24:21 -07001286 std::ostream& os = *os_;
1287
1288 size_t virtual_page_idx = 0; // Virtual page number (for an absolute memory address)
1289 size_t page_idx = 0; // Page index relative to 0
1290 size_t previous_page_idx = 0; // Previous page index relative to 0
1291
1292
1293 // Iterate through one page at a time. Boot map begin/end already implicitly aligned.
1294 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; begin += kPageSize) {
1295 ptrdiff_t offset = begin - boot_map_.start;
1296
1297 // We treat the image header as part of the memory map for now
1298 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1299 // But it might still be interesting to see if any of the ImageHeader data mutated
1300 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1301 uint8_t* remote_ptr = &remote_contents_[offset];
1302
1303 if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) {
David Sehrb4005f02017-06-20 19:11:40 -07001304 mapping_data->different_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001305
1306 // Count the number of 32-bit integers that are different.
1307 for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) {
1308 uint32_t* remote_ptr_int32 = reinterpret_cast<uint32_t*>(remote_ptr);
1309 const uint32_t* local_ptr_int32 = reinterpret_cast<const uint32_t*>(local_ptr);
1310
1311 if (remote_ptr_int32[i] != local_ptr_int32[i]) {
David Sehrb4005f02017-06-20 19:11:40 -07001312 mapping_data->different_int32s++;
David Sehr50005a02017-06-21 13:24:21 -07001313 }
1314 }
1315 }
1316 }
1317
Mathieu Chartier728f8502017-07-28 17:35:30 -07001318 std::vector<size_t> private_dirty_pages_for_section(ImageHeader::kSectionCount, 0u);
1319
David Sehr50005a02017-06-21 13:24:21 -07001320 // Iterate through one byte at a time.
1321 ptrdiff_t page_off_begin = image_header_.GetImageBegin() - image_begin;
1322 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; ++begin) {
1323 previous_page_idx = page_idx;
1324 ptrdiff_t offset = begin - boot_map_.start;
1325
1326 // We treat the image header as part of the memory map for now
1327 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1328 // But it might still be interesting to see if any of the ImageHeader data mutated
1329 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1330 uint8_t* remote_ptr = &remote_contents_[offset];
1331
1332 virtual_page_idx = reinterpret_cast<uintptr_t>(local_ptr) / kPageSize;
1333
1334 // Calculate the page index, relative to the 0th page where the image begins
1335 page_idx = (offset + page_off_begin) / kPageSize;
1336 if (*local_ptr != *remote_ptr) {
1337 // Track number of bytes that are different
David Sehrb4005f02017-06-20 19:11:40 -07001338 mapping_data->different_bytes++;
David Sehr50005a02017-06-21 13:24:21 -07001339 }
1340
1341 // Independently count the # of dirty pages on the remote side
1342 size_t remote_virtual_page_idx = begin / kPageSize;
1343 if (previous_page_idx != page_idx) {
1344 uint64_t page_count = 0xC0FFEE;
1345 // TODO: virtual_page_idx needs to be from the same process
1346 std::string error_msg;
1347 int dirtiness = (IsPageDirty(&pagemap_file_, // Image-diff-pid procmap
1348 &clean_pagemap_file_, // Self procmap
1349 &kpageflags_file_,
1350 &kpagecount_file_,
1351 remote_virtual_page_idx, // potentially "dirty" page
1352 virtual_page_idx, // true "clean" page
1353 &page_count,
1354 &error_msg));
1355 if (dirtiness < 0) {
1356 os << error_msg;
1357 return false;
1358 } else if (dirtiness > 0) {
David Sehrb4005f02017-06-20 19:11:40 -07001359 mapping_data->dirty_pages++;
1360 mapping_data->dirty_page_set.insert(mapping_data->dirty_page_set.end(), virtual_page_idx);
David Sehr50005a02017-06-21 13:24:21 -07001361 }
1362
1363 bool is_dirty = dirtiness > 0;
1364 bool is_private = page_count == 1;
1365
1366 if (page_count == 1) {
David Sehrb4005f02017-06-20 19:11:40 -07001367 mapping_data->private_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001368 }
1369
1370 if (is_dirty && is_private) {
David Sehrb4005f02017-06-20 19:11:40 -07001371 mapping_data->private_dirty_pages++;
Mathieu Chartier728f8502017-07-28 17:35:30 -07001372 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
1373 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i);
1374 if (image_header_.GetImageSection(section).Contains(offset)) {
1375 ++private_dirty_pages_for_section[i];
1376 }
1377 }
David Sehr50005a02017-06-21 13:24:21 -07001378 }
1379 }
1380 }
David Sehrb4005f02017-06-20 19:11:40 -07001381 mapping_data->false_dirty_pages = mapping_data->dirty_pages - mapping_data->different_pages;
1382 // Print low-level (bytes, int32s, pages) statistics.
1383 os << mapping_data->different_bytes << " differing bytes,\n "
1384 << mapping_data->different_int32s << " differing int32s,\n "
1385 << mapping_data->different_pages << " differing pages,\n "
1386 << mapping_data->dirty_pages << " pages are dirty;\n "
1387 << mapping_data->false_dirty_pages << " pages are false dirty;\n "
1388 << mapping_data->private_pages << " pages are private;\n "
Mathieu Chartier728f8502017-07-28 17:35:30 -07001389 << mapping_data->private_dirty_pages << " pages are Private_Dirty\n "
1390 << "\n";
1391
1392 size_t total_private_dirty_pages = std::accumulate(private_dirty_pages_for_section.begin(),
1393 private_dirty_pages_for_section.end(),
1394 0u);
1395 os << "Image sections (total private dirty pages " << total_private_dirty_pages << ")\n";
1396 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
1397 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i);
1398 os << section << " " << image_header_.GetImageSection(section)
1399 << " private dirty pages=" << private_dirty_pages_for_section[i] << "\n";
1400 }
1401 os << "\n";
David Sehrb4005f02017-06-20 19:11:40 -07001402
David Sehr50005a02017-06-21 13:24:21 -07001403 return true;
1404 }
1405
David Sehr50005a02017-06-21 13:24:21 -07001406 // Look at /proc/$pid/mem and only diff the things from there
1407 bool DumpImageDiffMap()
David Sehrb4005f02017-06-20 19:11:40 -07001408 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehr50005a02017-06-21 13:24:21 -07001409 std::ostream& os = *os_;
Igor Murashkin37743352014-11-13 14:38:00 -08001410 std::string error_msg;
1411
1412 // Walk the bytes and diff against our boot image
Igor Murashkin37743352014-11-13 14:38:00 -08001413 os << "\nObserving boot image header at address "
David Sehr50005a02017-06-21 13:24:21 -07001414 << reinterpret_cast<const void*>(&image_header_)
Igor Murashkin37743352014-11-13 14:38:00 -08001415 << "\n\n";
1416
David Sehr50005a02017-06-21 13:24:21 -07001417 const uint8_t* image_begin_unaligned = image_header_.GetImageBegin();
David Sehr50005a02017-06-21 13:24:21 -07001418 const uint8_t* image_end_unaligned = image_begin_unaligned + image_header_.GetImageSize();
Igor Murashkin37743352014-11-13 14:38:00 -08001419
1420 // Adjust range to nearest page
1421 const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize);
1422 const uint8_t* image_end = AlignUp(image_end_unaligned, kPageSize);
1423
David Sehr50005a02017-06-21 13:24:21 -07001424 if (reinterpret_cast<uintptr_t>(image_begin) > boot_map_.start ||
1425 reinterpret_cast<uintptr_t>(image_end) < boot_map_.end) {
Igor Murashkin37743352014-11-13 14:38:00 -08001426 // Sanity check that we aren't trying to read a completely different boot image
1427 os << "Remote boot map is out of range of local boot map: " <<
1428 "local begin " << reinterpret_cast<const void*>(image_begin) <<
1429 ", local end " << reinterpret_cast<const void*>(image_end) <<
David Sehr50005a02017-06-21 13:24:21 -07001430 ", remote begin " << reinterpret_cast<const void*>(boot_map_.start) <<
1431 ", remote end " << reinterpret_cast<const void*>(boot_map_.end);
Igor Murashkin37743352014-11-13 14:38:00 -08001432 return false;
1433 // If we wanted even more validation we could map the ImageHeader from the file
1434 }
1435
David Sehrb4005f02017-06-20 19:11:40 -07001436 MappingData mapping_data;
David Sehr45de57f2017-06-21 05:03:22 +00001437
David Sehrb4005f02017-06-20 19:11:40 -07001438 os << "Mapping at [" << reinterpret_cast<void*>(boot_map_.start) << ", "
1439 << reinterpret_cast<void*>(boot_map_.end) << ") had:\n ";
1440 if (!ComputeDirtyBytes(image_begin, &mapping_data)) {
David Sehr50005a02017-06-21 13:24:21 -07001441 return false;
Igor Murashkin37743352014-11-13 14:38:00 -08001442 }
David Sehrb4005f02017-06-20 19:11:40 -07001443 RemoteProcesses remotes;
David Sehr20e271a2017-06-14 13:02:14 -07001444 if (zygote_pid_only_) {
David Sehrb4005f02017-06-20 19:11:40 -07001445 remotes = RemoteProcesses::kZygoteOnly;
1446 } else if (zygote_diff_pid_ > 0) {
1447 remotes = RemoteProcesses::kImageAndZygote;
David Sehr20e271a2017-06-14 13:02:14 -07001448 } else {
David Sehrb4005f02017-06-20 19:11:40 -07001449 remotes = RemoteProcesses::kImageOnly;
Mathieu Chartiercb044bc2016-04-01 13:56:41 -07001450 }
1451
David Sehra49e0532017-08-25 08:05:29 -07001452 // Check all the mirror::Object entries in the image.
1453 RegionData<mirror::Object> object_region_data(os_,
1454 &remote_contents_,
1455 &zygote_contents_,
1456 boot_map_,
1457 image_header_,
1458 dump_dirty_objects_);
David Sehrb4005f02017-06-20 19:11:40 -07001459 object_region_data.ProcessRegion(mapping_data,
1460 remotes,
David Sehra49e0532017-08-25 08:05:29 -07001461 image_begin_unaligned);
Igor Murashkin37743352014-11-13 14:38:00 -08001462
David Sehra49e0532017-08-25 08:05:29 -07001463 // Check all the ArtMethod entries in the image.
1464 RegionData<ArtMethod> artmethod_region_data(os_,
1465 &remote_contents_,
1466 &zygote_contents_,
1467 boot_map_,
1468 image_header_,
1469 dump_dirty_objects_);
1470 artmethod_region_data.ProcessRegion(mapping_data,
1471 remotes,
1472 image_begin_unaligned);
Igor Murashkin37743352014-11-13 14:38:00 -08001473 return true;
1474 }
1475
Igor Murashkin37743352014-11-13 14:38:00 -08001476 static bool GetPageFrameNumber(File* page_map_file,
1477 size_t virtual_page_index,
1478 uint64_t* page_frame_number,
1479 std::string* error_msg) {
1480 CHECK(page_map_file != nullptr);
1481 CHECK(page_frame_number != nullptr);
1482 CHECK(error_msg != nullptr);
1483
1484 constexpr size_t kPageMapEntrySize = sizeof(uint64_t);
1485 constexpr uint64_t kPageFrameNumberMask = (1ULL << 55) - 1; // bits 0-54 [in /proc/$pid/pagemap]
1486 constexpr uint64_t kPageSoftDirtyMask = (1ULL << 55); // bit 55 [in /proc/$pid/pagemap]
1487
1488 uint64_t page_map_entry = 0;
1489
1490 // Read 64-bit entry from /proc/$pid/pagemap to get the physical page frame number
1491 if (!page_map_file->PreadFully(&page_map_entry, kPageMapEntrySize,
1492 virtual_page_index * kPageMapEntrySize)) {
1493 *error_msg = StringPrintf("Failed to read the virtual page index entry from %s",
1494 page_map_file->GetPath().c_str());
1495 return false;
1496 }
1497
1498 // TODO: seems useless, remove this.
1499 bool soft_dirty = (page_map_entry & kPageSoftDirtyMask) != 0;
1500 if ((false)) {
1501 LOG(VERBOSE) << soft_dirty; // Suppress unused warning
1502 UNREACHABLE();
1503 }
1504
1505 *page_frame_number = page_map_entry & kPageFrameNumberMask;
1506
1507 return true;
1508 }
1509
1510 static int IsPageDirty(File* page_map_file,
David Sehr50005a02017-06-21 13:24:21 -07001511 File* clean_pagemap_file,
1512 File* kpageflags_file,
1513 File* kpagecount_file,
Igor Murashkin37743352014-11-13 14:38:00 -08001514 size_t virtual_page_idx,
1515 size_t clean_virtual_page_idx,
1516 // Out parameters:
1517 uint64_t* page_count, std::string* error_msg) {
1518 CHECK(page_map_file != nullptr);
David Sehr50005a02017-06-21 13:24:21 -07001519 CHECK(clean_pagemap_file != nullptr);
1520 CHECK_NE(page_map_file, clean_pagemap_file);
1521 CHECK(kpageflags_file != nullptr);
1522 CHECK(kpagecount_file != nullptr);
Igor Murashkin37743352014-11-13 14:38:00 -08001523 CHECK(page_count != nullptr);
1524 CHECK(error_msg != nullptr);
1525
1526 // Constants are from https://www.kernel.org/doc/Documentation/vm/pagemap.txt
1527
1528 constexpr size_t kPageFlagsEntrySize = sizeof(uint64_t);
1529 constexpr size_t kPageCountEntrySize = sizeof(uint64_t);
1530 constexpr uint64_t kPageFlagsDirtyMask = (1ULL << 4); // in /proc/kpageflags
1531 constexpr uint64_t kPageFlagsNoPageMask = (1ULL << 20); // in /proc/kpageflags
1532 constexpr uint64_t kPageFlagsMmapMask = (1ULL << 11); // in /proc/kpageflags
1533
1534 uint64_t page_frame_number = 0;
1535 if (!GetPageFrameNumber(page_map_file, virtual_page_idx, &page_frame_number, error_msg)) {
1536 return -1;
1537 }
1538
1539 uint64_t page_frame_number_clean = 0;
David Sehr50005a02017-06-21 13:24:21 -07001540 if (!GetPageFrameNumber(clean_pagemap_file, clean_virtual_page_idx, &page_frame_number_clean,
Igor Murashkin37743352014-11-13 14:38:00 -08001541 error_msg)) {
1542 return -1;
1543 }
1544
1545 // Read 64-bit entry from /proc/kpageflags to get the dirty bit for a page
1546 uint64_t kpage_flags_entry = 0;
David Sehr50005a02017-06-21 13:24:21 -07001547 if (!kpageflags_file->PreadFully(&kpage_flags_entry,
Igor Murashkin37743352014-11-13 14:38:00 -08001548 kPageFlagsEntrySize,
1549 page_frame_number * kPageFlagsEntrySize)) {
1550 *error_msg = StringPrintf("Failed to read the page flags from %s",
David Sehr50005a02017-06-21 13:24:21 -07001551 kpageflags_file->GetPath().c_str());
Igor Murashkin37743352014-11-13 14:38:00 -08001552 return -1;
1553 }
1554
1555 // Read 64-bit entyry from /proc/kpagecount to get mapping counts for a page
David Sehr50005a02017-06-21 13:24:21 -07001556 if (!kpagecount_file->PreadFully(page_count /*out*/,
Igor Murashkin37743352014-11-13 14:38:00 -08001557 kPageCountEntrySize,
1558 page_frame_number * kPageCountEntrySize)) {
1559 *error_msg = StringPrintf("Failed to read the page count from %s",
David Sehr50005a02017-06-21 13:24:21 -07001560 kpagecount_file->GetPath().c_str());
Igor Murashkin37743352014-11-13 14:38:00 -08001561 return -1;
1562 }
1563
1564 // There must be a page frame at the requested address.
1565 CHECK_EQ(kpage_flags_entry & kPageFlagsNoPageMask, 0u);
1566 // The page frame must be memory mapped
1567 CHECK_NE(kpage_flags_entry & kPageFlagsMmapMask, 0u);
1568
1569 // Page is dirty, i.e. has diverged from file, if the 4th bit is set to 1
1570 bool flags_dirty = (kpage_flags_entry & kPageFlagsDirtyMask) != 0;
1571
1572 // page_frame_number_clean must come from the *same* process
1573 // but a *different* mmap than page_frame_number
1574 if (flags_dirty) {
1575 CHECK_NE(page_frame_number, page_frame_number_clean);
1576 }
1577
1578 return page_frame_number != page_frame_number_clean;
1579 }
1580
David Sehr50005a02017-06-21 13:24:21 -07001581 void PrintPidLine(const std::string& kind, pid_t pid) {
1582 if (pid < 0) {
1583 *os_ << kind << " DIFF PID: disabled\n\n";
1584 } else {
1585 *os_ << kind << " DIFF PID (" << pid << "): ";
1586 }
1587 }
1588
1589 static bool EndsWith(const std::string& str, const std::string& suffix) {
1590 return str.size() >= suffix.size() &&
1591 str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
1592 }
1593
1594 // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar)
1595 static std::string BaseName(const std::string& str) {
1596 size_t idx = str.rfind('/');
1597 if (idx == std::string::npos) {
1598 return str;
1599 }
1600
1601 return str.substr(idx + 1);
1602 }
1603
Igor Murashkin37743352014-11-13 14:38:00 -08001604 // Return the image location, stripped of any directories, e.g. "boot.art" or "core.art"
1605 std::string GetImageLocationBaseName() const {
1606 return BaseName(std::string(image_location_));
1607 }
1608
1609 std::ostream* os_;
1610 const ImageHeader& image_header_;
Andreas Gampe8994a042015-12-30 19:03:17 +00001611 const std::string image_location_;
Igor Murashkin37743352014-11-13 14:38:00 -08001612 pid_t image_diff_pid_; // Dump image diff against boot.art if pid is non-negative
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001613 pid_t zygote_diff_pid_; // Dump image diff against zygote boot.art if pid is non-negative
Jeff Haoc23b0c02017-07-27 18:19:38 -07001614 bool dump_dirty_objects_; // Adds dumping of objects that are dirty.
David Sehr20e271a2017-06-14 13:02:14 -07001615 bool zygote_pid_only_; // The user only specified a pid for the zygote.
Igor Murashkin37743352014-11-13 14:38:00 -08001616
David Sehr50005a02017-06-21 13:24:21 -07001617 // BacktraceMap used for finding the memory mapping of the image file.
1618 std::unique_ptr<BacktraceMap> proc_maps_;
1619 // Boot image mapping.
Igor Murashkin5573c372017-11-16 13:34:30 -08001620 backtrace_map_t boot_map_{};
David Sehr50005a02017-06-21 13:24:21 -07001621 // The size of the boot image mapping.
1622 size_t boot_map_size_;
1623 // The contents of /proc/<image_diff_pid_>/maps.
1624 std::vector<uint8_t> remote_contents_;
1625 // The contents of /proc/<zygote_diff_pid_>/maps.
1626 std::vector<uint8_t> zygote_contents_;
1627 // A File for reading /proc/<zygote_diff_pid_>/maps.
1628 File pagemap_file_;
1629 // A File for reading /proc/self/pagemap.
1630 File clean_pagemap_file_;
1631 // A File for reading /proc/kpageflags.
1632 File kpageflags_file_;
1633 // A File for reading /proc/kpagecount.
1634 File kpagecount_file_;
1635
Igor Murashkin37743352014-11-13 14:38:00 -08001636 DISALLOW_COPY_AND_ASSIGN(ImgDiagDumper);
1637};
1638
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001639static int DumpImage(Runtime* runtime,
1640 std::ostream* os,
1641 pid_t image_diff_pid,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001642 pid_t zygote_diff_pid,
1643 bool dump_dirty_objects) {
Igor Murashkin37743352014-11-13 14:38:00 -08001644 ScopedObjectAccess soa(Thread::Current());
1645 gc::Heap* heap = runtime->GetHeap();
Jeff Haodcdc85b2015-12-04 14:06:18 -08001646 std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
1647 CHECK(!image_spaces.empty());
1648 for (gc::space::ImageSpace* image_space : image_spaces) {
1649 const ImageHeader& image_header = image_space->GetImageHeader();
1650 if (!image_header.IsValid()) {
1651 fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
1652 return EXIT_FAILURE;
1653 }
1654
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001655 ImgDiagDumper img_diag_dumper(os,
1656 image_header,
1657 image_space->GetImageLocation(),
1658 image_diff_pid,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001659 zygote_diff_pid,
1660 dump_dirty_objects);
David Sehr50005a02017-06-21 13:24:21 -07001661 if (!img_diag_dumper.Init()) {
1662 return EXIT_FAILURE;
1663 }
Jeff Haodcdc85b2015-12-04 14:06:18 -08001664 if (!img_diag_dumper.Dump()) {
1665 return EXIT_FAILURE;
1666 }
Igor Murashkin37743352014-11-13 14:38:00 -08001667 }
Jeff Haodcdc85b2015-12-04 14:06:18 -08001668 return EXIT_SUCCESS;
Igor Murashkin37743352014-11-13 14:38:00 -08001669}
1670
1671struct ImgDiagArgs : public CmdlineArgs {
1672 protected:
1673 using Base = CmdlineArgs;
1674
Roland Levillainf73caca2018-08-24 17:19:07 +01001675 ParseStatus ParseCustom(const StringPiece& option, std::string* error_msg) override {
Igor Murashkin37743352014-11-13 14:38:00 -08001676 {
1677 ParseStatus base_parse = Base::ParseCustom(option, error_msg);
1678 if (base_parse != kParseUnknownArgument) {
1679 return base_parse;
1680 }
1681 }
1682
1683 if (option.starts_with("--image-diff-pid=")) {
1684 const char* image_diff_pid = option.substr(strlen("--image-diff-pid=")).data();
1685
Andreas Gampef9411702018-09-06 17:16:57 -07001686 if (!android::base::ParseInt(image_diff_pid, &image_diff_pid_)) {
Igor Murashkin37743352014-11-13 14:38:00 -08001687 *error_msg = "Image diff pid out of range";
1688 return kParseError;
1689 }
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001690 } else if (option.starts_with("--zygote-diff-pid=")) {
1691 const char* zygote_diff_pid = option.substr(strlen("--zygote-diff-pid=")).data();
1692
Andreas Gampef9411702018-09-06 17:16:57 -07001693 if (!android::base::ParseInt(zygote_diff_pid, &zygote_diff_pid_)) {
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001694 *error_msg = "Zygote diff pid out of range";
1695 return kParseError;
1696 }
Jeff Haoc23b0c02017-07-27 18:19:38 -07001697 } else if (option == "--dump-dirty-objects") {
1698 dump_dirty_objects_ = true;
Igor Murashkin37743352014-11-13 14:38:00 -08001699 } else {
1700 return kParseUnknownArgument;
1701 }
1702
1703 return kParseOk;
1704 }
1705
Roland Levillainf73caca2018-08-24 17:19:07 +01001706 ParseStatus ParseChecks(std::string* error_msg) override {
Igor Murashkin37743352014-11-13 14:38:00 -08001707 // Perform the parent checks.
1708 ParseStatus parent_checks = Base::ParseChecks(error_msg);
1709 if (parent_checks != kParseOk) {
1710 return parent_checks;
1711 }
1712
1713 // Perform our own checks.
1714
1715 if (kill(image_diff_pid_,
1716 /*sig*/0) != 0) { // No signal is sent, perform error-checking only.
1717 // Check if the pid exists before proceeding.
1718 if (errno == ESRCH) {
1719 *error_msg = "Process specified does not exist";
1720 } else {
1721 *error_msg = StringPrintf("Failed to check process status: %s", strerror(errno));
1722 }
1723 return kParseError;
Andreas Gampe8fae4b52017-09-27 20:04:47 -07001724 } else if (instruction_set_ != InstructionSet::kNone && instruction_set_ != kRuntimeISA) {
Igor Murashkin37743352014-11-13 14:38:00 -08001725 // Don't allow different ISAs since the images are ISA-specific.
1726 // Right now the code assumes both the runtime ISA and the remote ISA are identical.
1727 *error_msg = "Must use the default runtime ISA; changing ISA is not supported.";
1728 return kParseError;
1729 }
1730
1731 return kParseOk;
1732 }
1733
Andreas Gampefa6a1b02018-09-07 08:11:55 -07001734 std::string GetUsage() const override {
Igor Murashkin37743352014-11-13 14:38:00 -08001735 std::string usage;
1736
1737 usage +=
1738 "Usage: imgdiag [options] ...\n"
1739 " Example: imgdiag --image-diff-pid=$(pidof dex2oat)\n"
1740 " Example: adb shell imgdiag --image-diff-pid=$(pid zygote)\n"
1741 "\n";
1742
1743 usage += Base::GetUsage();
1744
1745 usage += // Optional.
1746 " --image-diff-pid=<pid>: provide the PID of a process whose boot.art you want to diff.\n"
1747 " Example: --image-diff-pid=$(pid zygote)\n"
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001748 " --zygote-diff-pid=<pid>: provide the PID of the zygote whose boot.art you want to diff "
1749 "against.\n"
1750 " Example: --zygote-diff-pid=$(pid zygote)\n"
Jeff Haoc23b0c02017-07-27 18:19:38 -07001751 " --dump-dirty-objects: additionally output dirty objects of interest.\n"
Igor Murashkin37743352014-11-13 14:38:00 -08001752 "\n";
1753
1754 return usage;
1755 }
1756
1757 public:
1758 pid_t image_diff_pid_ = -1;
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001759 pid_t zygote_diff_pid_ = -1;
Jeff Haoc23b0c02017-07-27 18:19:38 -07001760 bool dump_dirty_objects_ = false;
Igor Murashkin37743352014-11-13 14:38:00 -08001761};
1762
1763struct ImgDiagMain : public CmdlineMain<ImgDiagArgs> {
Andreas Gampefa6a1b02018-09-07 08:11:55 -07001764 bool ExecuteWithRuntime(Runtime* runtime) override {
Igor Murashkin37743352014-11-13 14:38:00 -08001765 CHECK(args_ != nullptr);
1766
1767 return DumpImage(runtime,
Igor Murashkin37743352014-11-13 14:38:00 -08001768 args_->os_,
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001769 args_->image_diff_pid_,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001770 args_->zygote_diff_pid_,
1771 args_->dump_dirty_objects_) == EXIT_SUCCESS;
Igor Murashkin37743352014-11-13 14:38:00 -08001772 }
1773};
1774
1775} // namespace art
1776
1777int main(int argc, char** argv) {
1778 art::ImgDiagMain main;
1779 return main.Main(argc, argv);
1780}