blob: 5cd77e0fd6c6b388044a88ad910226cf16331071 [file] [log] [blame]
Igor Murashkin37743352014-11-13 14:38:00 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <stdio.h>
18#include <stdlib.h>
19
20#include <fstream>
Andreas Gampe7ad71d02016-04-04 13:49:18 -070021#include <functional>
Igor Murashkin37743352014-11-13 14:38:00 -080022#include <iostream>
Igor Murashkin37743352014-11-13 14:38:00 -080023#include <map>
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070024#include <set>
25#include <string>
Mathieu Chartiercb044bc2016-04-01 13:56:41 -070026#include <unordered_set>
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070027#include <vector>
Igor Murashkin37743352014-11-13 14:38:00 -080028
Andreas Gampef9411702018-09-06 17:16:57 -070029#include <android-base/parseint.h>
Andreas Gampe46ee31b2016-12-14 10:11:49 -080030#include "android-base/stringprintf.h"
31
Andreas Gampea1d2f952017-04-20 22:53:58 -070032#include "art_field-inl.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070033#include "art_method-inl.h"
David Sehrc431b9d2018-03-02 12:01:51 -080034#include "base/os.h"
Igor Murashkin37743352014-11-13 14:38:00 -080035#include "base/unix_file/fd_file.h"
David Sehra49e0532017-08-25 08:05:29 -070036#include "class_linker.h"
Igor Murashkin37743352014-11-13 14:38:00 -080037#include "gc/heap.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070038#include "gc/space/image_space.h"
Vladimir Marko4df2d802018-09-27 16:42:44 +000039#include "image-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080040#include "mirror/class-inl.h"
41#include "mirror/object-inl.h"
David Sehra49e0532017-08-25 08:05:29 -070042#include "oat.h"
43#include "oat_file.h"
44#include "oat_file_manager.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070045#include "scoped_thread_state_change-inl.h"
Igor Murashkin37743352014-11-13 14:38:00 -080046
Igor Murashkin37743352014-11-13 14:38:00 -080047#include "backtrace/BacktraceMap.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070048#include "cmdline.h"
Igor Murashkin37743352014-11-13 14:38:00 -080049
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070050#include <signal.h>
Igor Murashkin37743352014-11-13 14:38:00 -080051#include <sys/stat.h>
52#include <sys/types.h>
Igor Murashkin37743352014-11-13 14:38:00 -080053
54namespace art {
55
Andreas Gampe46ee31b2016-12-14 10:11:49 -080056using android::base::StringPrintf;
57
David Sehrb4005f02017-06-20 19:11:40 -070058namespace {
59
60constexpr size_t kMaxAddressPrint = 5;
61
62enum class ProcessType {
63 kZygote,
64 kRemote
65};
66
67enum class RemoteProcesses {
68 kImageOnly,
69 kZygoteOnly,
70 kImageAndZygote
71};
72
73struct MappingData {
74 // The count of pages that are considered dirty by the OS.
75 size_t dirty_pages = 0;
76 // The count of pages that differ by at least one byte.
77 size_t different_pages = 0;
78 // The count of differing bytes.
79 size_t different_bytes = 0;
80 // The count of differing four-byte units.
81 size_t different_int32s = 0;
82 // The count of pages that have mapping count == 1.
83 size_t private_pages = 0;
84 // The count of private pages that are also dirty.
85 size_t private_dirty_pages = 0;
86 // The count of pages that are marked dirty but do not differ.
87 size_t false_dirty_pages = 0;
88 // Set of the local virtual page indices that are dirty.
89 std::set<size_t> dirty_page_set;
90};
91
92static std::string GetClassDescriptor(mirror::Class* klass)
93 REQUIRES_SHARED(Locks::mutator_lock_) {
94 CHECK(klass != nullptr);
95
96 std::string descriptor;
97 const char* descriptor_str = klass->GetDescriptor(&descriptor /*out*/);
98
99 return std::string(descriptor_str);
100}
101
102static std::string PrettyFieldValue(ArtField* field, mirror::Object* object)
103 REQUIRES_SHARED(Locks::mutator_lock_) {
104 std::ostringstream oss;
105 switch (field->GetTypeAsPrimitiveType()) {
106 case Primitive::kPrimNot: {
107 oss << object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
108 field->GetOffset());
109 break;
110 }
111 case Primitive::kPrimBoolean: {
112 oss << static_cast<bool>(object->GetFieldBoolean<kVerifyNone>(field->GetOffset()));
113 break;
114 }
115 case Primitive::kPrimByte: {
116 oss << static_cast<int32_t>(object->GetFieldByte<kVerifyNone>(field->GetOffset()));
117 break;
118 }
119 case Primitive::kPrimChar: {
120 oss << object->GetFieldChar<kVerifyNone>(field->GetOffset());
121 break;
122 }
123 case Primitive::kPrimShort: {
124 oss << object->GetFieldShort<kVerifyNone>(field->GetOffset());
125 break;
126 }
127 case Primitive::kPrimInt: {
128 oss << object->GetField32<kVerifyNone>(field->GetOffset());
129 break;
130 }
131 case Primitive::kPrimLong: {
132 oss << object->GetField64<kVerifyNone>(field->GetOffset());
133 break;
134 }
135 case Primitive::kPrimFloat: {
136 oss << object->GetField32<kVerifyNone>(field->GetOffset());
137 break;
138 }
139 case Primitive::kPrimDouble: {
140 oss << object->GetField64<kVerifyNone>(field->GetOffset());
141 break;
142 }
143 case Primitive::kPrimVoid: {
144 oss << "void";
145 break;
146 }
147 }
148 return oss.str();
149}
150
151template <typename K, typename V, typename D>
152static std::vector<std::pair<V, K>> SortByValueDesc(
153 const std::map<K, D> map,
154 std::function<V(const D&)> value_mapper = [](const D& d) { return static_cast<V>(d); }) {
155 // Store value->key so that we can use the default sort from pair which
156 // sorts by value first and then key
157 std::vector<std::pair<V, K>> value_key_vector;
158
159 for (const auto& kv_pair : map) {
160 value_key_vector.push_back(std::make_pair(value_mapper(kv_pair.second), kv_pair.first));
161 }
162
163 // Sort in reverse (descending order)
164 std::sort(value_key_vector.rbegin(), value_key_vector.rend());
165 return value_key_vector;
166}
167
168// Fixup a remote pointer that we read from a foreign boot.art to point to our own memory.
169// Returned pointer will point to inside of remote_contents.
170template <typename T>
Vladimir Markod93e3742018-07-18 10:58:13 +0100171static ObjPtr<T> FixUpRemotePointer(ObjPtr<T> remote_ptr,
172 std::vector<uint8_t>& remote_contents,
173 const backtrace_map_t& boot_map)
174 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehrb4005f02017-06-20 19:11:40 -0700175 if (remote_ptr == nullptr) {
176 return nullptr;
177 }
178
Vladimir Markod93e3742018-07-18 10:58:13 +0100179 uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr.Ptr());
David Sehrb4005f02017-06-20 19:11:40 -0700180
Mathieu Chartier21f7ac12018-07-09 16:18:27 -0700181 // In the case the remote pointer is out of range, it probably belongs to another image.
182 // Just return null for this case.
183 if (remote < boot_map.start || remote >= boot_map.end) {
184 return nullptr;
185 }
David Sehrb4005f02017-06-20 19:11:40 -0700186
187 off_t boot_offset = remote - boot_map.start;
188
189 return reinterpret_cast<T*>(&remote_contents[boot_offset]);
190}
191
192template <typename T>
Vladimir Markod93e3742018-07-18 10:58:13 +0100193static ObjPtr<T> RemoteContentsPointerToLocal(ObjPtr<T> remote_ptr,
194 std::vector<uint8_t>& remote_contents,
195 const ImageHeader& image_header)
196 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehrb4005f02017-06-20 19:11:40 -0700197 if (remote_ptr == nullptr) {
198 return nullptr;
199 }
200
Vladimir Markod93e3742018-07-18 10:58:13 +0100201 uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr.Ptr());
David Sehrb4005f02017-06-20 19:11:40 -0700202 ptrdiff_t boot_offset = remote - &remote_contents[0];
203
204 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + boot_offset;
205
206 return reinterpret_cast<T*>(const_cast<uint8_t*>(local_ptr));
207}
208
209template <typename T> size_t EntrySize(T* entry);
210template<> size_t EntrySize(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
211 return object->SizeOf();
212}
213template<> size_t EntrySize(ArtMethod* art_method) REQUIRES_SHARED(Locks::mutator_lock_) {
214 return sizeof(*art_method);
215}
216
217template <typename T>
218static bool EntriesDiffer(T* entry1, T* entry2) REQUIRES_SHARED(Locks::mutator_lock_) {
219 return memcmp(entry1, entry2, EntrySize(entry1)) != 0;
220}
221
222template <typename T>
223struct RegionCommon {
224 public:
225 RegionCommon(std::ostream* os,
226 std::vector<uint8_t>* remote_contents,
227 std::vector<uint8_t>* zygote_contents,
228 const backtrace_map_t& boot_map,
229 const ImageHeader& image_header) :
230 os_(*os),
231 remote_contents_(remote_contents),
232 zygote_contents_(zygote_contents),
233 boot_map_(boot_map),
234 image_header_(image_header),
235 different_entries_(0),
236 dirty_entry_bytes_(0),
237 false_dirty_entry_bytes_(0) {
238 CHECK(remote_contents != nullptr);
239 CHECK(zygote_contents != nullptr);
240 }
241
242 void DumpSamplesAndOffsetCount() {
243 os_ << " sample object addresses: ";
244 for (size_t i = 0; i < dirty_entries_.size() && i < kMaxAddressPrint; ++i) {
245 T* entry = dirty_entries_[i];
246 os_ << reinterpret_cast<void*>(entry) << ", ";
247 }
248 os_ << "\n";
249 os_ << " dirty byte +offset:count list = ";
250 std::vector<std::pair<size_t, off_t>> field_dirty_count_sorted =
251 SortByValueDesc<off_t, size_t, size_t>(field_dirty_count_);
252 for (const std::pair<size_t, off_t>& pair : field_dirty_count_sorted) {
253 off_t offset = pair.second;
254 size_t count = pair.first;
255 os_ << "+" << offset << ":" << count << ", ";
256 }
257 os_ << "\n";
258 }
259
260 size_t GetDifferentEntryCount() const { return different_entries_; }
261 size_t GetDirtyEntryBytes() const { return dirty_entry_bytes_; }
262 size_t GetFalseDirtyEntryCount() const { return false_dirty_entries_.size(); }
263 size_t GetFalseDirtyEntryBytes() const { return false_dirty_entry_bytes_; }
264 size_t GetZygoteDirtyEntryCount() const { return zygote_dirty_entries_.size(); }
265
266 protected:
267 bool IsEntryOnDirtyPage(T* entry, const std::set<size_t>& dirty_pages) const
268 REQUIRES_SHARED(Locks::mutator_lock_) {
269 size_t size = EntrySize(entry);
270 size_t page_off = 0;
271 size_t current_page_idx;
272 uintptr_t entry_address = reinterpret_cast<uintptr_t>(entry);
273 // Iterate every page this entry belongs to
274 do {
275 current_page_idx = entry_address / kPageSize + page_off;
276 if (dirty_pages.find(current_page_idx) != dirty_pages.end()) {
277 // This entry is on a dirty page
278 return true;
279 }
280 page_off++;
281 } while ((current_page_idx * kPageSize) < RoundUp(entry_address + size, kObjectAlignment));
282 return false;
283 }
284
285 void AddZygoteDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
286 zygote_dirty_entries_.insert(entry);
287 }
288
289 void AddImageDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
290 image_dirty_entries_.insert(entry);
291 }
292
293 void AddFalseDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
294 false_dirty_entries_.push_back(entry);
295 false_dirty_entry_bytes_ += EntrySize(entry);
296 }
297
298 // The output stream to write to.
299 std::ostream& os_;
300 // The byte contents of the remote (image) process' image.
301 std::vector<uint8_t>* remote_contents_;
302 // The byte contents of the zygote process' image.
303 std::vector<uint8_t>* zygote_contents_;
304 const backtrace_map_t& boot_map_;
305 const ImageHeader& image_header_;
306
307 // Count of entries that are different.
308 size_t different_entries_;
309
310 // Local entries that are dirty (differ in at least one byte).
311 size_t dirty_entry_bytes_;
312 std::vector<T*> dirty_entries_;
313
314 // Local entries that are clean, but located on dirty pages.
315 size_t false_dirty_entry_bytes_;
316 std::vector<T*> false_dirty_entries_;
317
318 // Image dirty entries
319 // If zygote_pid_only_ == true, these are shared dirty entries in the zygote.
320 // If zygote_pid_only_ == false, these are private dirty entries in the application.
321 std::set<T*> image_dirty_entries_;
322
323 // Zygote dirty entries (probably private dirty).
324 // We only add entries here if they differed in both the image and the zygote, so
325 // they are probably private dirty.
326 std::set<T*> zygote_dirty_entries_;
327
328 std::map<off_t /* field offset */, size_t /* count */> field_dirty_count_;
329
330 private:
331 DISALLOW_COPY_AND_ASSIGN(RegionCommon);
332};
333
334template <typename T>
335class RegionSpecializedBase : public RegionCommon<T> {
336};
337
338// Region analysis for mirror::Objects
David Sehra49e0532017-08-25 08:05:29 -0700339class ImgObjectVisitor : public ObjectVisitor {
340 public:
341 using ComputeDirtyFunc = std::function<void(mirror::Object* object,
342 const uint8_t* begin_image_ptr,
343 const std::set<size_t>& dirty_pages)>;
Andreas Gampe68562142018-06-20 21:49:11 +0000344 ImgObjectVisitor(ComputeDirtyFunc dirty_func,
David Sehra49e0532017-08-25 08:05:29 -0700345 const uint8_t* begin_image_ptr,
346 const std::set<size_t>& dirty_pages) :
Andreas Gampebc802de2018-06-20 17:24:11 -0700347 dirty_func_(std::move(dirty_func)),
David Sehra49e0532017-08-25 08:05:29 -0700348 begin_image_ptr_(begin_image_ptr),
349 dirty_pages_(dirty_pages) { }
350
Roland Levillainf73caca2018-08-24 17:19:07 +0100351 ~ImgObjectVisitor() override { }
David Sehra49e0532017-08-25 08:05:29 -0700352
Roland Levillainf73caca2018-08-24 17:19:07 +0100353 void Visit(mirror::Object* object) override REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700354 // Sanity check that we are reading a real mirror::Object
355 CHECK(object->GetClass() != nullptr) << "Image object at address "
356 << object
357 << " has null class";
358 if (kUseBakerReadBarrier) {
359 object->AssertReadBarrierState();
360 }
361 dirty_func_(object, begin_image_ptr_, dirty_pages_);
362 }
363
364 private:
Andreas Gampebc802de2018-06-20 17:24:11 -0700365 const ComputeDirtyFunc dirty_func_;
David Sehra49e0532017-08-25 08:05:29 -0700366 const uint8_t* begin_image_ptr_;
367 const std::set<size_t>& dirty_pages_;
368};
369
David Sehrb4005f02017-06-20 19:11:40 -0700370template<>
371class RegionSpecializedBase<mirror::Object> : public RegionCommon<mirror::Object> {
372 public:
373 RegionSpecializedBase(std::ostream* os,
374 std::vector<uint8_t>* remote_contents,
375 std::vector<uint8_t>* zygote_contents,
376 const backtrace_map_t& boot_map,
Jeff Haoc23b0c02017-07-27 18:19:38 -0700377 const ImageHeader& image_header,
378 bool dump_dirty_objects)
379 : RegionCommon<mirror::Object>(os, remote_contents, zygote_contents, boot_map, image_header),
380 os_(*os),
381 dump_dirty_objects_(dump_dirty_objects) { }
David Sehrb4005f02017-06-20 19:11:40 -0700382
David Sehra49e0532017-08-25 08:05:29 -0700383 // Define a common public type name for use by RegionData.
384 using VisitorClass = ImgObjectVisitor;
David Sehrb4005f02017-06-20 19:11:40 -0700385
David Sehra49e0532017-08-25 08:05:29 -0700386 void VisitEntries(VisitorClass* visitor,
387 uint8_t* base,
388 PointerSize pointer_size)
David Sehrb4005f02017-06-20 19:11:40 -0700389 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700390 RegionCommon<mirror::Object>::image_header_.VisitObjects(visitor, base, pointer_size);
David Sehrb4005f02017-06-20 19:11:40 -0700391 }
392
393 void VisitEntry(mirror::Object* entry)
394 REQUIRES_SHARED(Locks::mutator_lock_) {
395 // Unconditionally store the class descriptor in case we need it later
396 mirror::Class* klass = entry->GetClass();
397 class_data_[klass].descriptor = GetClassDescriptor(klass);
398 }
399
400 void AddCleanEntry(mirror::Object* entry)
401 REQUIRES_SHARED(Locks::mutator_lock_) {
402 class_data_[entry->GetClass()].AddCleanObject();
403 }
404
405 void AddFalseDirtyEntry(mirror::Object* entry)
406 REQUIRES_SHARED(Locks::mutator_lock_) {
407 RegionCommon<mirror::Object>::AddFalseDirtyEntry(entry);
408 class_data_[entry->GetClass()].AddFalseDirtyObject(entry);
409 }
410
411 void AddDirtyEntry(mirror::Object* entry, mirror::Object* entry_remote)
412 REQUIRES_SHARED(Locks::mutator_lock_) {
413 size_t entry_size = EntrySize(entry);
414 ++different_entries_;
415 dirty_entry_bytes_ += entry_size;
416 // Log dirty count and objects for class objects only.
417 mirror::Class* klass = entry->GetClass();
418 if (klass->IsClassClass()) {
419 // Increment counts for the fields that are dirty
420 const uint8_t* current = reinterpret_cast<const uint8_t*>(entry);
421 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(entry_remote);
422 for (size_t i = 0; i < entry_size; ++i) {
423 if (current[i] != current_remote[i]) {
424 field_dirty_count_[i]++;
425 }
426 }
427 dirty_entries_.push_back(entry);
428 }
429 class_data_[klass].AddDirtyObject(entry, entry_remote);
430 }
431
Jeff Haoc23b0c02017-07-27 18:19:38 -0700432 void DiffEntryContents(mirror::Object* entry,
433 uint8_t* remote_bytes,
434 const uint8_t* base_ptr,
435 bool log_dirty_objects)
David Sehrb4005f02017-06-20 19:11:40 -0700436 REQUIRES_SHARED(Locks::mutator_lock_) {
437 const char* tabs = " ";
438 // Attempt to find fields for all dirty bytes.
439 mirror::Class* klass = entry->GetClass();
440 if (entry->IsClass()) {
441 os_ << tabs
442 << "Class " << mirror::Class::PrettyClass(entry->AsClass()) << " " << entry << "\n";
443 } else {
444 os_ << tabs
445 << "Instance of " << mirror::Class::PrettyClass(klass) << " " << entry << "\n";
446 }
447
448 std::unordered_set<ArtField*> dirty_instance_fields;
449 std::unordered_set<ArtField*> dirty_static_fields;
450 // Examine the bytes comprising the Object, computing which fields are dirty
451 // and recording them for later display. If the Object is an array object,
452 // compute the dirty entries.
David Sehrb4005f02017-06-20 19:11:40 -0700453 mirror::Object* remote_entry = reinterpret_cast<mirror::Object*>(remote_bytes);
454 for (size_t i = 0, count = entry->SizeOf(); i < count; ++i) {
Mathieu Chartier51e79652017-07-24 15:43:38 -0700455 if (base_ptr[i] != remote_bytes[i]) {
David Sehrb4005f02017-06-20 19:11:40 -0700456 ArtField* field = ArtField::FindInstanceFieldWithOffset</*exact*/false>(klass, i);
457 if (field != nullptr) {
458 dirty_instance_fields.insert(field);
459 } else if (entry->IsClass()) {
460 field = ArtField::FindStaticFieldWithOffset</*exact*/false>(entry->AsClass(), i);
461 if (field != nullptr) {
462 dirty_static_fields.insert(field);
463 }
464 }
465 if (field == nullptr) {
466 if (klass->IsArrayClass()) {
467 mirror::Class* component_type = klass->GetComponentType();
468 Primitive::Type primitive_type = component_type->GetPrimitiveType();
469 size_t component_size = Primitive::ComponentSize(primitive_type);
470 size_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
471 if (i >= data_offset) {
472 os_ << tabs << "Dirty array element " << (i - data_offset) / component_size << "\n";
473 // Skip to next element to prevent spam.
474 i += component_size - 1;
475 continue;
476 }
477 }
478 os_ << tabs << "No field for byte offset " << i << "\n";
479 }
480 }
481 }
482 // Dump different fields.
483 if (!dirty_instance_fields.empty()) {
484 os_ << tabs << "Dirty instance fields " << dirty_instance_fields.size() << "\n";
485 for (ArtField* field : dirty_instance_fields) {
486 os_ << tabs << ArtField::PrettyField(field)
487 << " original=" << PrettyFieldValue(field, entry)
488 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
489 }
490 }
491 if (!dirty_static_fields.empty()) {
Jeff Haoc23b0c02017-07-27 18:19:38 -0700492 if (dump_dirty_objects_ && log_dirty_objects) {
493 dirty_objects_.insert(entry);
494 }
David Sehrb4005f02017-06-20 19:11:40 -0700495 os_ << tabs << "Dirty static fields " << dirty_static_fields.size() << "\n";
496 for (ArtField* field : dirty_static_fields) {
497 os_ << tabs << ArtField::PrettyField(field)
498 << " original=" << PrettyFieldValue(field, entry)
499 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
500 }
501 }
502 os_ << "\n";
503 }
504
Jeff Haoc23b0c02017-07-27 18:19:38 -0700505 void DumpDirtyObjects() REQUIRES_SHARED(Locks::mutator_lock_) {
506 for (mirror::Object* obj : dirty_objects_) {
507 if (obj->IsClass()) {
508 os_ << "Private dirty object: " << obj->AsClass()->PrettyDescriptor() << "\n";
509 }
510 }
511 }
512
David Sehrb4005f02017-06-20 19:11:40 -0700513 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
514 // vector of pairs (size_t count, Class*)
515 auto dirty_object_class_values =
516 SortByValueDesc<mirror::Class*, size_t, ClassData>(
517 class_data_,
518 [](const ClassData& d) { return d.dirty_object_count; });
519 os_ << "\n" << " Dirty object count by class:\n";
520 for (const auto& vk_pair : dirty_object_class_values) {
521 size_t dirty_object_count = vk_pair.first;
522 mirror::Class* klass = vk_pair.second;
523 ClassData& class_data = class_data_[klass];
524 size_t object_sizes = class_data.dirty_object_size_in_bytes;
525 float avg_dirty_bytes_per_class =
526 class_data.dirty_object_byte_count * 1.0f / object_sizes;
527 float avg_object_size = object_sizes * 1.0f / dirty_object_count;
528 const std::string& descriptor = class_data.descriptor;
529 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
530 << "objects: " << dirty_object_count << ", "
531 << "avg dirty bytes: " << avg_dirty_bytes_per_class << ", "
532 << "avg object size: " << avg_object_size << ", "
533 << "class descriptor: '" << descriptor << "'"
534 << ")\n";
535 if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) {
536 DumpSamplesAndOffsetCount();
537 os_ << " field contents:\n";
538 for (mirror::Object* object : class_data.dirty_objects) {
539 // remote class object
Vladimir Markod93e3742018-07-18 10:58:13 +0100540 ObjPtr<mirror::Class> remote_klass =
541 ObjPtr<mirror::Class>::DownCast<mirror::Object>(object);
David Sehrb4005f02017-06-20 19:11:40 -0700542 // local class object
Vladimir Markod93e3742018-07-18 10:58:13 +0100543 ObjPtr<mirror::Class> local_klass =
David Sehrb4005f02017-06-20 19:11:40 -0700544 RemoteContentsPointerToLocal(remote_klass,
545 *RegionCommon<mirror::Object>::remote_contents_,
546 RegionCommon<mirror::Object>::image_header_);
547 os_ << " " << reinterpret_cast<const void*>(object) << " ";
548 os_ << " class_status (remote): " << remote_klass->GetStatus() << ", ";
549 os_ << " class_status (local): " << local_klass->GetStatus();
550 os_ << "\n";
551 }
552 }
553 }
554 }
555
556 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
557 // vector of pairs (size_t count, Class*)
558 auto false_dirty_object_class_values =
559 SortByValueDesc<mirror::Class*, size_t, ClassData>(
560 class_data_,
561 [](const ClassData& d) { return d.false_dirty_object_count; });
562 os_ << "\n" << " False-dirty object count by class:\n";
563 for (const auto& vk_pair : false_dirty_object_class_values) {
564 size_t object_count = vk_pair.first;
565 mirror::Class* klass = vk_pair.second;
566 ClassData& class_data = class_data_[klass];
567 size_t object_sizes = class_data.false_dirty_byte_count;
568 float avg_object_size = object_sizes * 1.0f / object_count;
569 const std::string& descriptor = class_data.descriptor;
570 os_ << " " << mirror::Class::PrettyClass(klass) << " ("
571 << "objects: " << object_count << ", "
572 << "avg object size: " << avg_object_size << ", "
573 << "total bytes: " << object_sizes << ", "
574 << "class descriptor: '" << descriptor << "'"
575 << ")\n";
576 }
577 }
578
579 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
580 // vector of pairs (size_t count, Class*)
581 auto clean_object_class_values =
582 SortByValueDesc<mirror::Class*, size_t, ClassData>(
583 class_data_,
584 [](const ClassData& d) { return d.clean_object_count; });
585 os_ << "\n" << " Clean object count by class:\n";
586 for (const auto& vk_pair : clean_object_class_values) {
587 os_ << " " << mirror::Class::PrettyClass(vk_pair.second) << " (" << vk_pair.first << ")\n";
588 }
589 }
590
591 private:
592 // Aggregate and detail class data from an image diff.
593 struct ClassData {
594 size_t dirty_object_count = 0;
595 // Track only the byte-per-byte dirtiness (in bytes)
596 size_t dirty_object_byte_count = 0;
597 // Track the object-by-object dirtiness (in bytes)
598 size_t dirty_object_size_in_bytes = 0;
599 size_t clean_object_count = 0;
600 std::string descriptor;
601 size_t false_dirty_byte_count = 0;
602 size_t false_dirty_object_count = 0;
603 std::vector<mirror::Object*> false_dirty_objects;
604 // Remote pointers to dirty objects
605 std::vector<mirror::Object*> dirty_objects;
606
607 void AddCleanObject() REQUIRES_SHARED(Locks::mutator_lock_) {
608 ++clean_object_count;
609 }
610
611 void AddDirtyObject(mirror::Object* object, mirror::Object* object_remote)
612 REQUIRES_SHARED(Locks::mutator_lock_) {
613 ++dirty_object_count;
614 dirty_object_byte_count += CountDirtyBytes(object, object_remote);
615 dirty_object_size_in_bytes += EntrySize(object);
616 dirty_objects.push_back(object_remote);
617 }
618
619 void AddFalseDirtyObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
620 ++false_dirty_object_count;
621 false_dirty_objects.push_back(object);
622 false_dirty_byte_count += EntrySize(object);
623 }
624
625 private:
626 // Go byte-by-byte and figure out what exactly got dirtied
627 static size_t CountDirtyBytes(mirror::Object* object1, mirror::Object* object2)
628 REQUIRES_SHARED(Locks::mutator_lock_) {
629 const uint8_t* cur1 = reinterpret_cast<const uint8_t*>(object1);
630 const uint8_t* cur2 = reinterpret_cast<const uint8_t*>(object2);
631 size_t dirty_bytes = 0;
632 size_t object_size = EntrySize(object1);
633 for (size_t i = 0; i < object_size; ++i) {
634 if (cur1[i] != cur2[i]) {
635 dirty_bytes++;
636 }
637 }
638 return dirty_bytes;
639 }
640 };
641
642 std::ostream& os_;
Jeff Haoc23b0c02017-07-27 18:19:38 -0700643 bool dump_dirty_objects_;
644 std::unordered_set<mirror::Object*> dirty_objects_;
David Sehrb4005f02017-06-20 19:11:40 -0700645 std::map<mirror::Class*, ClassData> class_data_;
646
647 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
648};
649
650// Region analysis for ArtMethods.
David Sehra49e0532017-08-25 08:05:29 -0700651class ImgArtMethodVisitor : public ArtMethodVisitor {
652 public:
653 using ComputeDirtyFunc = std::function<void(ArtMethod*,
654 const uint8_t*,
655 const std::set<size_t>&)>;
Andreas Gampe68562142018-06-20 21:49:11 +0000656 ImgArtMethodVisitor(ComputeDirtyFunc dirty_func,
David Sehra49e0532017-08-25 08:05:29 -0700657 const uint8_t* begin_image_ptr,
658 const std::set<size_t>& dirty_pages) :
Andreas Gampebc802de2018-06-20 17:24:11 -0700659 dirty_func_(std::move(dirty_func)),
David Sehra49e0532017-08-25 08:05:29 -0700660 begin_image_ptr_(begin_image_ptr),
661 dirty_pages_(dirty_pages) { }
Roland Levillainf73caca2018-08-24 17:19:07 +0100662 ~ImgArtMethodVisitor() override { }
663 void Visit(ArtMethod* method) override {
David Sehra49e0532017-08-25 08:05:29 -0700664 dirty_func_(method, begin_image_ptr_, dirty_pages_);
665 }
666
667 private:
Andreas Gampebc802de2018-06-20 17:24:11 -0700668 const ComputeDirtyFunc dirty_func_;
David Sehra49e0532017-08-25 08:05:29 -0700669 const uint8_t* begin_image_ptr_;
670 const std::set<size_t>& dirty_pages_;
671};
672
673// Struct and functor for computing offsets of members of ArtMethods.
674// template <typename RegionType>
675struct MemberInfo {
676 template <typename T>
677 void operator() (const ArtMethod* method, const T* member_address, const std::string& name) {
678 // Check that member_address is a pointer inside *method.
679 DCHECK(reinterpret_cast<uintptr_t>(method) <= reinterpret_cast<uintptr_t>(member_address));
680 DCHECK(reinterpret_cast<uintptr_t>(member_address) + sizeof(T) <=
681 reinterpret_cast<uintptr_t>(method) + sizeof(ArtMethod));
682 size_t offset =
683 reinterpret_cast<uintptr_t>(member_address) - reinterpret_cast<uintptr_t>(method);
684 offset_to_name_size_.insert({offset, NameAndSize(sizeof(T), name)});
685 }
686
687 struct NameAndSize {
688 size_t size_;
689 std::string name_;
690 NameAndSize(size_t size, const std::string& name) : size_(size), name_(name) { }
691 NameAndSize() : size_(0), name_("INVALID") { }
692 };
693
694 std::map<size_t, NameAndSize> offset_to_name_size_;
695};
696
David Sehrb4005f02017-06-20 19:11:40 -0700697template<>
David Sehra49e0532017-08-25 08:05:29 -0700698class RegionSpecializedBase<ArtMethod> : public RegionCommon<ArtMethod> {
David Sehrb4005f02017-06-20 19:11:40 -0700699 public:
700 RegionSpecializedBase(std::ostream* os,
701 std::vector<uint8_t>* remote_contents,
702 std::vector<uint8_t>* zygote_contents,
703 const backtrace_map_t& boot_map,
David Sehra49e0532017-08-25 08:05:29 -0700704 const ImageHeader& image_header,
705 bool dump_dirty_objects ATTRIBUTE_UNUSED)
706 : RegionCommon<ArtMethod>(os, remote_contents, zygote_contents, boot_map, image_header),
707 os_(*os) {
708 // Prepare the table for offset to member lookups.
709 ArtMethod* art_method = reinterpret_cast<ArtMethod*>(&(*remote_contents)[0]);
710 art_method->VisitMembers(member_info_);
711 // Prepare the table for address to symbolic entry point names.
712 BuildEntryPointNames();
713 class_linker_ = Runtime::Current()->GetClassLinker();
David Sehrb4005f02017-06-20 19:11:40 -0700714 }
715
David Sehra49e0532017-08-25 08:05:29 -0700716 // Define a common public type name for use by RegionData.
717 using VisitorClass = ImgArtMethodVisitor;
718
719 void VisitEntries(VisitorClass* visitor,
720 uint8_t* base,
721 PointerSize pointer_size)
David Sehrb4005f02017-06-20 19:11:40 -0700722 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700723 RegionCommon<ArtMethod>::image_header_.VisitPackedArtMethods(visitor, base, pointer_size);
David Sehrb4005f02017-06-20 19:11:40 -0700724 }
725
726 void VisitEntry(ArtMethod* method ATTRIBUTE_UNUSED)
727 REQUIRES_SHARED(Locks::mutator_lock_) {
728 }
729
David Sehra49e0532017-08-25 08:05:29 -0700730 void AddCleanEntry(ArtMethod* method ATTRIBUTE_UNUSED) {
731 }
732
David Sehrb4005f02017-06-20 19:11:40 -0700733 void AddFalseDirtyEntry(ArtMethod* method)
734 REQUIRES_SHARED(Locks::mutator_lock_) {
735 RegionCommon<ArtMethod>::AddFalseDirtyEntry(method);
736 }
737
David Sehrb4005f02017-06-20 19:11:40 -0700738 void AddDirtyEntry(ArtMethod* method, ArtMethod* method_remote)
739 REQUIRES_SHARED(Locks::mutator_lock_) {
740 size_t entry_size = EntrySize(method);
741 ++different_entries_;
742 dirty_entry_bytes_ += entry_size;
743 // Increment counts for the fields that are dirty
744 const uint8_t* current = reinterpret_cast<const uint8_t*>(method);
745 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(method_remote);
746 // ArtMethods always log their dirty count and entries.
747 for (size_t i = 0; i < entry_size; ++i) {
748 if (current[i] != current_remote[i]) {
749 field_dirty_count_[i]++;
750 }
751 }
752 dirty_entries_.push_back(method);
753 }
754
David Sehra49e0532017-08-25 08:05:29 -0700755 void DiffEntryContents(ArtMethod* method,
756 uint8_t* remote_bytes,
757 const uint8_t* base_ptr,
758 bool log_dirty_objects ATTRIBUTE_UNUSED)
David Sehrb4005f02017-06-20 19:11:40 -0700759 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700760 const char* tabs = " ";
761 os_ << tabs << "ArtMethod " << ArtMethod::PrettyMethod(method) << "\n";
762
763 std::unordered_set<size_t> dirty_members;
764 // Examine the members comprising the ArtMethod, computing which members are dirty.
Andreas Gampeaad9d372018-09-18 15:58:47 -0700765 for (const std::pair<const size_t,
766 MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
David Sehra49e0532017-08-25 08:05:29 -0700767 const size_t offset = p.first;
768 if (memcmp(base_ptr + offset, remote_bytes + offset, p.second.size_) != 0) {
769 dirty_members.insert(p.first);
770 }
771 }
772 // Dump different fields.
773 if (!dirty_members.empty()) {
774 os_ << tabs << "Dirty members " << dirty_members.size() << "\n";
775 for (size_t offset : dirty_members) {
776 const MemberInfo::NameAndSize& member_info = member_info_.offset_to_name_size_[offset];
777 os_ << tabs << member_info.name_
778 << " original=" << StringFromBytes(base_ptr + offset, member_info.size_)
779 << " remote=" << StringFromBytes(remote_bytes + offset, member_info.size_)
780 << "\n";
781 }
782 }
783 os_ << "\n";
784 }
785
786 void DumpDirtyObjects() REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehrb4005f02017-06-20 19:11:40 -0700787 }
788
789 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
790 DumpSamplesAndOffsetCount();
David Sehra49e0532017-08-25 08:05:29 -0700791 os_ << " offset to field map:\n";
Andreas Gampeaad9d372018-09-18 15:58:47 -0700792 for (const std::pair<const size_t,
793 MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
David Sehra49e0532017-08-25 08:05:29 -0700794 const size_t offset = p.first;
795 const size_t size = p.second.size_;
796 os_ << StringPrintf(" %zu-%zu: ", offset, offset + size - 1)
797 << p.second.name_
798 << std::endl;
799 }
800
David Sehrb4005f02017-06-20 19:11:40 -0700801 os_ << " field contents:\n";
802 for (ArtMethod* method : dirty_entries_) {
803 // remote method
804 auto art_method = reinterpret_cast<ArtMethod*>(method);
805 // remote class
Vladimir Markod93e3742018-07-18 10:58:13 +0100806 ObjPtr<mirror::Class> remote_declaring_class =
David Sehrb4005f02017-06-20 19:11:40 -0700807 FixUpRemotePointer(art_method->GetDeclaringClass(),
808 *RegionCommon<ArtMethod>::remote_contents_,
809 RegionCommon<ArtMethod>::boot_map_);
810 // local class
Vladimir Markod93e3742018-07-18 10:58:13 +0100811 ObjPtr<mirror::Class> declaring_class =
David Sehrb4005f02017-06-20 19:11:40 -0700812 RemoteContentsPointerToLocal(remote_declaring_class,
813 *RegionCommon<ArtMethod>::remote_contents_,
814 RegionCommon<ArtMethod>::image_header_);
815 DumpOneArtMethod(art_method, declaring_class, remote_declaring_class);
816 }
817 }
818
819 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700820 os_ << "\n" << " False-dirty ArtMethods\n";
David Sehrb4005f02017-06-20 19:11:40 -0700821 os_ << " field contents:\n";
822 for (ArtMethod* method : false_dirty_entries_) {
823 // local class
Vladimir Markod93e3742018-07-18 10:58:13 +0100824 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
David Sehrb4005f02017-06-20 19:11:40 -0700825 DumpOneArtMethod(method, declaring_class, nullptr);
826 }
827 }
828
829 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
830 }
831
832 private:
833 std::ostream& os_;
David Sehra49e0532017-08-25 08:05:29 -0700834 MemberInfo member_info_;
835 std::map<const void*, std::string> entry_point_names_;
836 ClassLinker* class_linker_;
837
838 // Compute a map of addresses to names in the boot OAT file(s).
839 void BuildEntryPointNames() {
840 OatFileManager& oat_file_manager = Runtime::Current()->GetOatFileManager();
841 std::vector<const OatFile*> boot_oat_files = oat_file_manager.GetBootOatFiles();
842 for (const OatFile* oat_file : boot_oat_files) {
843 const OatHeader& oat_header = oat_file->GetOatHeader();
844 const void* i2ib = oat_header.GetInterpreterToInterpreterBridge();
845 if (i2ib != nullptr) {
846 entry_point_names_[i2ib] = "InterpreterToInterpreterBridge (from boot oat file)";
847 }
848 const void* i2ccb = oat_header.GetInterpreterToCompiledCodeBridge();
849 if (i2ccb != nullptr) {
850 entry_point_names_[i2ccb] = "InterpreterToCompiledCodeBridge (from boot oat file)";
851 }
852 const void* jdl = oat_header.GetJniDlsymLookup();
853 if (jdl != nullptr) {
854 entry_point_names_[jdl] = "JniDlsymLookup (from boot oat file)";
855 }
856 const void* qgjt = oat_header.GetQuickGenericJniTrampoline();
857 if (qgjt != nullptr) {
858 entry_point_names_[qgjt] = "QuickGenericJniTrampoline (from boot oat file)";
859 }
860 const void* qrt = oat_header.GetQuickResolutionTrampoline();
861 if (qrt != nullptr) {
862 entry_point_names_[qrt] = "QuickResolutionTrampoline (from boot oat file)";
863 }
864 const void* qict = oat_header.GetQuickImtConflictTrampoline();
865 if (qict != nullptr) {
866 entry_point_names_[qict] = "QuickImtConflictTrampoline (from boot oat file)";
867 }
868 const void* q2ib = oat_header.GetQuickToInterpreterBridge();
869 if (q2ib != nullptr) {
870 entry_point_names_[q2ib] = "QuickToInterpreterBridge (from boot oat file)";
871 }
872 }
873 }
874
875 std::string StringFromBytes(const uint8_t* bytes, size_t size) {
876 switch (size) {
877 case 1:
878 return StringPrintf("%" PRIx8, *bytes);
879 case 2:
880 return StringPrintf("%" PRIx16, *reinterpret_cast<const uint16_t*>(bytes));
881 case 4:
882 case 8: {
883 // Compute an address if the bytes might contain one.
884 uint64_t intval;
885 if (size == 4) {
886 intval = *reinterpret_cast<const uint32_t*>(bytes);
887 } else {
888 intval = *reinterpret_cast<const uint64_t*>(bytes);
889 }
890 const void* addr = reinterpret_cast<const void*>(intval);
891 // Match the address against those that have Is* methods in the ClassLinker.
892 if (class_linker_->IsQuickToInterpreterBridge(addr)) {
893 return "QuickToInterpreterBridge";
894 } else if (class_linker_->IsQuickGenericJniStub(addr)) {
895 return "QuickGenericJniStub";
896 } else if (class_linker_->IsQuickResolutionStub(addr)) {
897 return "QuickResolutionStub";
898 } else if (class_linker_->IsJniDlsymLookupStub(addr)) {
899 return "JniDlsymLookupStub";
900 }
901 // Match the address against those that we saved from the boot OAT files.
902 if (entry_point_names_.find(addr) != entry_point_names_.end()) {
903 return entry_point_names_[addr];
904 }
905 return StringPrintf("%" PRIx64, intval);
906 }
907 default:
908 LOG(WARNING) << "Don't know how to convert " << size << " bytes to integer";
909 return "<UNKNOWN>";
910 }
911 }
David Sehrb4005f02017-06-20 19:11:40 -0700912
913 void DumpOneArtMethod(ArtMethod* art_method,
Vladimir Markod93e3742018-07-18 10:58:13 +0100914 ObjPtr<mirror::Class> declaring_class,
915 ObjPtr<mirror::Class> remote_declaring_class)
David Sehrb4005f02017-06-20 19:11:40 -0700916 REQUIRES_SHARED(Locks::mutator_lock_) {
917 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
918 os_ << " " << reinterpret_cast<const void*>(art_method) << " ";
919 os_ << " entryPointFromJni: "
920 << reinterpret_cast<const void*>(art_method->GetDataPtrSize(pointer_size)) << ", ";
921 os_ << " entryPointFromQuickCompiledCode: "
922 << reinterpret_cast<const void*>(
923 art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size))
924 << ", ";
925 os_ << " isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
David Sehra49e0532017-08-25 08:05:29 -0700926 // Null for runtime metionds.
927 if (declaring_class != nullptr) {
928 os_ << " class_status (local): " << declaring_class->GetStatus();
929 }
David Sehrb4005f02017-06-20 19:11:40 -0700930 if (remote_declaring_class != nullptr) {
931 os_ << ", class_status (remote): " << remote_declaring_class->GetStatus();
932 }
933 os_ << "\n";
934 }
935
936 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
937};
938
939template <typename T>
940class RegionData : public RegionSpecializedBase<T> {
941 public:
942 RegionData(std::ostream* os,
943 std::vector<uint8_t>* remote_contents,
944 std::vector<uint8_t>* zygote_contents,
945 const backtrace_map_t& boot_map,
Jeff Haoc23b0c02017-07-27 18:19:38 -0700946 const ImageHeader& image_header,
947 bool dump_dirty_objects)
948 : RegionSpecializedBase<T>(os,
949 remote_contents,
950 zygote_contents,
951 boot_map,
952 image_header,
953 dump_dirty_objects),
954 os_(*os) {
David Sehrb4005f02017-06-20 19:11:40 -0700955 CHECK(remote_contents != nullptr);
956 CHECK(zygote_contents != nullptr);
957 }
958
959 // Walk over the type T entries in theregion between begin_image_ptr and end_image_ptr,
960 // collecting and reporting data regarding dirty, difference, etc.
961 void ProcessRegion(const MappingData& mapping_data,
962 RemoteProcesses remotes,
David Sehra49e0532017-08-25 08:05:29 -0700963 const uint8_t* begin_image_ptr)
David Sehrb4005f02017-06-20 19:11:40 -0700964 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehra49e0532017-08-25 08:05:29 -0700965 typename RegionSpecializedBase<T>::VisitorClass visitor(
966 [this](T* entry,
967 const uint8_t* begin_image_ptr,
968 const std::set<size_t>& dirty_page_set) REQUIRES_SHARED(Locks::mutator_lock_) {
969 this->ComputeEntryDirty(entry, begin_image_ptr, dirty_page_set);
970 },
971 begin_image_ptr,
972 mapping_data.dirty_page_set);
973 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
974 RegionSpecializedBase<T>::VisitEntries(&visitor,
975 const_cast<uint8_t*>(begin_image_ptr),
976 pointer_size);
David Sehrb4005f02017-06-20 19:11:40 -0700977
978 // Looking at only dirty pages, figure out how many of those bytes belong to dirty entries.
979 // TODO: fix this now that there are multiple regions in a mapping.
980 float true_dirtied_percent =
981 RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * kPageSize);
982
983 // Entry specific statistics.
984 os_ << RegionCommon<T>::GetDifferentEntryCount() << " different entries, \n "
985 << RegionCommon<T>::GetDirtyEntryBytes() << " different entry [bytes], \n "
986 << RegionCommon<T>::GetFalseDirtyEntryCount() << " false dirty entries,\n "
987 << RegionCommon<T>::GetFalseDirtyEntryBytes() << " false dirty entry [bytes], \n "
988 << true_dirtied_percent << " different entries-vs-total in a dirty page;\n "
Mathieu Chartier51e79652017-07-24 15:43:38 -0700989 << "\n";
David Sehrb4005f02017-06-20 19:11:40 -0700990
Mathieu Chartier51e79652017-07-24 15:43:38 -0700991 const uint8_t* base_ptr = begin_image_ptr;
David Sehrb4005f02017-06-20 19:11:40 -0700992 switch (remotes) {
993 case RemoteProcesses::kZygoteOnly:
994 os_ << " Zygote shared dirty entries: ";
995 break;
996 case RemoteProcesses::kImageAndZygote:
997 os_ << " Application dirty entries (private dirty): ";
Mathieu Chartier51e79652017-07-24 15:43:38 -0700998 // If we are dumping private dirty, diff against the zygote map to make it clearer what
999 // fields caused the page to be private dirty.
1000 base_ptr = &RegionCommon<T>::zygote_contents_->operator[](0);
David Sehrb4005f02017-06-20 19:11:40 -07001001 break;
1002 case RemoteProcesses::kImageOnly:
1003 os_ << " Application dirty entries (unknown whether private or shared dirty): ";
1004 break;
1005 }
Mathieu Chartier51e79652017-07-24 15:43:38 -07001006 DiffDirtyEntries(ProcessType::kRemote,
1007 begin_image_ptr,
1008 RegionCommon<T>::remote_contents_,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001009 base_ptr,
Andreas Gampe9b031f72018-10-04 11:03:34 -07001010 /*log_dirty_objects=*/true);
Mathieu Chartier51e79652017-07-24 15:43:38 -07001011 // Print shared dirty after since it's less important.
1012 if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
1013 // We only reach this point if both pids were specified. Furthermore,
1014 // entries are only displayed here if they differed in both the image
1015 // and the zygote, so they are probably private dirty.
1016 CHECK(remotes == RemoteProcesses::kImageAndZygote);
1017 os_ << "\n" << " Zygote dirty entries (probably shared dirty): ";
1018 DiffDirtyEntries(ProcessType::kZygote,
1019 begin_image_ptr,
1020 RegionCommon<T>::zygote_contents_,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001021 begin_image_ptr,
Andreas Gampe9b031f72018-10-04 11:03:34 -07001022 /*log_dirty_objects=*/false);
Mathieu Chartier51e79652017-07-24 15:43:38 -07001023 }
Jeff Haoc23b0c02017-07-27 18:19:38 -07001024 RegionSpecializedBase<T>::DumpDirtyObjects();
David Sehrb4005f02017-06-20 19:11:40 -07001025 RegionSpecializedBase<T>::DumpDirtyEntries();
1026 RegionSpecializedBase<T>::DumpFalseDirtyEntries();
1027 RegionSpecializedBase<T>::DumpCleanEntries();
1028 }
1029
1030 private:
1031 std::ostream& os_;
1032
1033 void DiffDirtyEntries(ProcessType process_type,
1034 const uint8_t* begin_image_ptr,
Mathieu Chartier51e79652017-07-24 15:43:38 -07001035 std::vector<uint8_t>* contents,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001036 const uint8_t* base_ptr,
1037 bool log_dirty_objects)
David Sehrb4005f02017-06-20 19:11:40 -07001038 REQUIRES_SHARED(Locks::mutator_lock_) {
1039 os_ << RegionCommon<T>::dirty_entries_.size() << "\n";
1040 const std::set<T*>& entries =
1041 (process_type == ProcessType::kZygote) ?
1042 RegionCommon<T>::zygote_dirty_entries_:
1043 RegionCommon<T>::image_dirty_entries_;
1044 for (T* entry : entries) {
1045 uint8_t* entry_bytes = reinterpret_cast<uint8_t*>(entry);
1046 ptrdiff_t offset = entry_bytes - begin_image_ptr;
1047 uint8_t* remote_bytes = &(*contents)[offset];
Jeff Haoc23b0c02017-07-27 18:19:38 -07001048 RegionSpecializedBase<T>::DiffEntryContents(entry,
1049 remote_bytes,
1050 &base_ptr[offset],
1051 log_dirty_objects);
David Sehrb4005f02017-06-20 19:11:40 -07001052 }
1053 }
1054
1055 void ComputeEntryDirty(T* entry,
1056 const uint8_t* begin_image_ptr,
1057 const std::set<size_t>& dirty_pages)
1058 REQUIRES_SHARED(Locks::mutator_lock_) {
1059 // Set up pointers in the remote and the zygote for comparison.
1060 uint8_t* current = reinterpret_cast<uint8_t*>(entry);
1061 ptrdiff_t offset = current - begin_image_ptr;
1062 T* entry_remote =
1063 reinterpret_cast<T*>(const_cast<uint8_t*>(&(*RegionCommon<T>::remote_contents_)[offset]));
Mathieu Chartier51e79652017-07-24 15:43:38 -07001064 const bool have_zygote = !RegionCommon<T>::zygote_contents_->empty();
David Sehrb4005f02017-06-20 19:11:40 -07001065 const uint8_t* current_zygote =
Mathieu Chartier51e79652017-07-24 15:43:38 -07001066 have_zygote ? &(*RegionCommon<T>::zygote_contents_)[offset] : nullptr;
David Sehrb4005f02017-06-20 19:11:40 -07001067 T* entry_zygote = reinterpret_cast<T*>(const_cast<uint8_t*>(current_zygote));
1068 // Visit and classify entries at the current location.
1069 RegionSpecializedBase<T>::VisitEntry(entry);
Mathieu Chartier51e79652017-07-24 15:43:38 -07001070
1071 // Test private dirty first.
1072 bool is_dirty = false;
1073 if (have_zygote) {
1074 bool private_dirty = EntriesDiffer(entry_zygote, entry_remote);
1075 if (private_dirty) {
1076 // Private dirty, app vs zygote.
1077 is_dirty = true;
David Sehrb4005f02017-06-20 19:11:40 -07001078 RegionCommon<T>::AddImageDirtyEntry(entry);
David Sehrb4005f02017-06-20 19:11:40 -07001079 }
Mathieu Chartier51e79652017-07-24 15:43:38 -07001080 if (EntriesDiffer(entry_zygote, entry)) {
1081 // Shared dirty, zygote vs image.
1082 is_dirty = true;
1083 RegionCommon<T>::AddZygoteDirtyEntry(entry);
1084 }
1085 } else if (EntriesDiffer(entry_remote, entry)) {
1086 // Shared or private dirty, app vs image.
1087 is_dirty = true;
1088 RegionCommon<T>::AddImageDirtyEntry(entry);
1089 }
1090 if (is_dirty) {
1091 // TODO: Add support dirty entries in zygote and image.
1092 RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
David Sehrb4005f02017-06-20 19:11:40 -07001093 } else {
1094 RegionSpecializedBase<T>::AddCleanEntry(entry);
Mathieu Chartier51e79652017-07-24 15:43:38 -07001095 if (RegionCommon<T>::IsEntryOnDirtyPage(entry, dirty_pages)) {
1096 // This entry was either never mutated or got mutated back to the same value.
1097 // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
1098 RegionSpecializedBase<T>::AddFalseDirtyEntry(entry);
1099 }
David Sehrb4005f02017-06-20 19:11:40 -07001100 }
1101 }
1102
1103 DISALLOW_COPY_AND_ASSIGN(RegionData);
1104};
1105
1106} // namespace
1107
1108
Igor Murashkin37743352014-11-13 14:38:00 -08001109class ImgDiagDumper {
1110 public:
1111 explicit ImgDiagDumper(std::ostream* os,
Mathieu Chartiercb044bc2016-04-01 13:56:41 -07001112 const ImageHeader& image_header,
1113 const std::string& image_location,
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001114 pid_t image_diff_pid,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001115 pid_t zygote_diff_pid,
1116 bool dump_dirty_objects)
Igor Murashkin37743352014-11-13 14:38:00 -08001117 : os_(os),
1118 image_header_(image_header),
1119 image_location_(image_location),
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001120 image_diff_pid_(image_diff_pid),
David Sehr20e271a2017-06-14 13:02:14 -07001121 zygote_diff_pid_(zygote_diff_pid),
Jeff Haoc23b0c02017-07-27 18:19:38 -07001122 dump_dirty_objects_(dump_dirty_objects),
David Sehr20e271a2017-06-14 13:02:14 -07001123 zygote_pid_only_(false) {}
Igor Murashkin37743352014-11-13 14:38:00 -08001124
David Sehr50005a02017-06-21 13:24:21 -07001125 bool Init() {
Igor Murashkin37743352014-11-13 14:38:00 -08001126 std::ostream& os = *os_;
Mathieu Chartiercb044bc2016-04-01 13:56:41 -07001127
David Sehr50005a02017-06-21 13:24:21 -07001128 if (image_diff_pid_ < 0 && zygote_diff_pid_ < 0) {
1129 os << "Either --image-diff-pid or --zygote-diff-pid (or both) must be specified.\n";
1130 return false;
Igor Murashkin37743352014-11-13 14:38:00 -08001131 }
1132
David Sehr50005a02017-06-21 13:24:21 -07001133 // To avoid the combinations of command-line argument use cases:
1134 // If the user invoked with only --zygote-diff-pid, shuffle that to
1135 // image_diff_pid_, invalidate zygote_diff_pid_, and remember that
1136 // image_diff_pid_ is now special.
1137 if (image_diff_pid_ < 0) {
1138 image_diff_pid_ = zygote_diff_pid_;
1139 zygote_diff_pid_ = -1;
1140 zygote_pid_only_ = true;
David Sehr45de57f2017-06-21 05:03:22 +00001141 }
Igor Murashkin37743352014-11-13 14:38:00 -08001142
David Sehr45de57f2017-06-21 05:03:22 +00001143 {
1144 struct stat sts;
1145 std::string proc_pid_str =
1146 StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
1147 if (stat(proc_pid_str.c_str(), &sts) == -1) {
1148 os << "Process does not exist";
1149 return false;
Igor Murashkin37743352014-11-13 14:38:00 -08001150 }
1151 }
1152
David Sehr45de57f2017-06-21 05:03:22 +00001153 // Open /proc/$pid/maps to view memory maps
David Sehr50005a02017-06-21 13:24:21 -07001154 auto tmp_proc_maps = std::unique_ptr<BacktraceMap>(BacktraceMap::Create(image_diff_pid_));
1155 if (tmp_proc_maps == nullptr) {
David Sehr45de57f2017-06-21 05:03:22 +00001156 os << "Could not read backtrace maps";
1157 return false;
1158 }
Igor Murashkin37743352014-11-13 14:38:00 -08001159
David Sehr45de57f2017-06-21 05:03:22 +00001160 bool found_boot_map = false;
David Sehr45de57f2017-06-21 05:03:22 +00001161 // Find the memory map only for boot.art
Christopher Ferris5cf8b532017-12-03 12:46:17 -08001162 for (const backtrace_map_t* map : *tmp_proc_maps) {
1163 if (EndsWith(map->name, GetImageLocationBaseName())) {
1164 if ((map->flags & PROT_WRITE) != 0) {
1165 boot_map_ = *map;
David Sehr45de57f2017-06-21 05:03:22 +00001166 found_boot_map = true;
1167 break;
David Sehr0627be32017-06-16 13:50:02 -07001168 }
David Sehr45de57f2017-06-21 05:03:22 +00001169 // In actuality there's more than 1 map, but the second one is read-only.
1170 // The one we care about is the write-able map.
1171 // The readonly maps are guaranteed to be identical, so its not interesting to compare
1172 // them.
David Sehr0627be32017-06-16 13:50:02 -07001173 }
1174 }
David Sehr0627be32017-06-16 13:50:02 -07001175
David Sehr45de57f2017-06-21 05:03:22 +00001176 if (!found_boot_map) {
1177 os << "Could not find map for " << GetImageLocationBaseName();
1178 return false;
1179 }
David Sehr50005a02017-06-21 13:24:21 -07001180 // Sanity check boot_map_.
1181 CHECK(boot_map_.end >= boot_map_.start);
1182 boot_map_size_ = boot_map_.end - boot_map_.start;
David Sehr0627be32017-06-16 13:50:02 -07001183
David Sehr50005a02017-06-21 13:24:21 -07001184 // Open /proc/<image_diff_pid_>/mem and read as remote_contents_.
1185 std::string image_file_name =
1186 StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
1187 auto image_map_file = std::unique_ptr<File>(OS::OpenFileForReading(image_file_name.c_str()));
1188 if (image_map_file == nullptr) {
1189 os << "Failed to open " << image_file_name << " for reading";
1190 return false;
1191 }
1192 std::vector<uint8_t> tmp_remote_contents(boot_map_size_);
1193 if (!image_map_file->PreadFully(&tmp_remote_contents[0], boot_map_size_, boot_map_.start)) {
1194 os << "Could not fully read file " << image_file_name;
1195 return false;
1196 }
1197
1198 // If zygote_diff_pid_ != -1, open /proc/<zygote_diff_pid_>/mem and read as zygote_contents_.
1199 std::vector<uint8_t> tmp_zygote_contents;
1200 if (zygote_diff_pid_ != -1) {
1201 std::string zygote_file_name =
1202 StringPrintf("/proc/%ld/mem", static_cast<long>(zygote_diff_pid_)); // NOLINT [runtime/int]
1203 std::unique_ptr<File> zygote_map_file(OS::OpenFileForReading(zygote_file_name.c_str()));
1204 if (zygote_map_file == nullptr) {
1205 os << "Failed to open " << zygote_file_name << " for reading";
1206 return false;
1207 }
1208 // The boot map should be at the same address.
Mathieu Chartier51e79652017-07-24 15:43:38 -07001209 tmp_zygote_contents.resize(boot_map_size_);
David Sehr50005a02017-06-21 13:24:21 -07001210 if (!zygote_map_file->PreadFully(&tmp_zygote_contents[0], boot_map_size_, boot_map_.start)) {
1211 LOG(WARNING) << "Could not fully read zygote file " << zygote_file_name;
1212 return false;
1213 }
1214 }
1215
1216 // Open /proc/<image_diff_pid_>/pagemap.
1217 std::string pagemap_file_name = StringPrintf(
1218 "/proc/%ld/pagemap", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
1219 auto tmp_pagemap_file =
1220 std::unique_ptr<File>(OS::OpenFileForReading(pagemap_file_name.c_str()));
1221 if (tmp_pagemap_file == nullptr) {
1222 os << "Failed to open " << pagemap_file_name << " for reading: " << strerror(errno);
1223 return false;
1224 }
1225
1226 // Not truly clean, mmap-ing boot.art again would be more pristine, but close enough
1227 const char* clean_pagemap_file_name = "/proc/self/pagemap";
1228 auto tmp_clean_pagemap_file = std::unique_ptr<File>(
1229 OS::OpenFileForReading(clean_pagemap_file_name));
1230 if (tmp_clean_pagemap_file == nullptr) {
1231 os << "Failed to open " << clean_pagemap_file_name << " for reading: " << strerror(errno);
1232 return false;
1233 }
1234
1235 auto tmp_kpageflags_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpageflags"));
1236 if (tmp_kpageflags_file == nullptr) {
1237 os << "Failed to open /proc/kpageflags for reading: " << strerror(errno);
1238 return false;
1239 }
1240
1241 auto tmp_kpagecount_file = std::unique_ptr<File>(OS::OpenFileForReading("/proc/kpagecount"));
1242 if (tmp_kpagecount_file == nullptr) {
1243 os << "Failed to open /proc/kpagecount for reading:" << strerror(errno);
1244 return false;
1245 }
1246
David Sehrb4005f02017-06-20 19:11:40 -07001247 // Commit the mappings, etc.
David Sehr50005a02017-06-21 13:24:21 -07001248 proc_maps_ = std::move(tmp_proc_maps);
1249 remote_contents_ = std::move(tmp_remote_contents);
1250 zygote_contents_ = std::move(tmp_zygote_contents);
1251 pagemap_file_ = std::move(*tmp_pagemap_file.release());
1252 clean_pagemap_file_ = std::move(*tmp_clean_pagemap_file.release());
1253 kpageflags_file_ = std::move(*tmp_kpageflags_file.release());
1254 kpagecount_file_ = std::move(*tmp_kpagecount_file.release());
1255
1256 return true;
1257 }
1258
1259 bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
1260 std::ostream& os = *os_;
1261 os << "IMAGE LOCATION: " << image_location_ << "\n\n";
1262
1263 os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
1264
1265 os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
1266
1267 PrintPidLine("IMAGE", image_diff_pid_);
1268 os << "\n\n";
1269 PrintPidLine("ZYGOTE", zygote_diff_pid_);
1270 bool ret = true;
1271 if (image_diff_pid_ >= 0 || zygote_diff_pid_ >= 0) {
1272 ret = DumpImageDiff();
1273 os << "\n\n";
1274 }
1275
1276 os << std::flush;
1277
1278 return ret;
1279 }
1280
1281 private:
1282 bool DumpImageDiff()
1283 REQUIRES_SHARED(Locks::mutator_lock_) {
1284 return DumpImageDiffMap();
1285 }
1286
David Sehrb4005f02017-06-20 19:11:40 -07001287 bool ComputeDirtyBytes(const uint8_t* image_begin, MappingData* mapping_data /*out*/) {
David Sehr50005a02017-06-21 13:24:21 -07001288 std::ostream& os = *os_;
1289
1290 size_t virtual_page_idx = 0; // Virtual page number (for an absolute memory address)
1291 size_t page_idx = 0; // Page index relative to 0
1292 size_t previous_page_idx = 0; // Previous page index relative to 0
1293
1294
1295 // Iterate through one page at a time. Boot map begin/end already implicitly aligned.
1296 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; begin += kPageSize) {
1297 ptrdiff_t offset = begin - boot_map_.start;
1298
1299 // We treat the image header as part of the memory map for now
1300 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1301 // But it might still be interesting to see if any of the ImageHeader data mutated
1302 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1303 uint8_t* remote_ptr = &remote_contents_[offset];
1304
1305 if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) {
David Sehrb4005f02017-06-20 19:11:40 -07001306 mapping_data->different_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001307
1308 // Count the number of 32-bit integers that are different.
1309 for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) {
1310 uint32_t* remote_ptr_int32 = reinterpret_cast<uint32_t*>(remote_ptr);
1311 const uint32_t* local_ptr_int32 = reinterpret_cast<const uint32_t*>(local_ptr);
1312
1313 if (remote_ptr_int32[i] != local_ptr_int32[i]) {
David Sehrb4005f02017-06-20 19:11:40 -07001314 mapping_data->different_int32s++;
David Sehr50005a02017-06-21 13:24:21 -07001315 }
1316 }
1317 }
1318 }
1319
Mathieu Chartier728f8502017-07-28 17:35:30 -07001320 std::vector<size_t> private_dirty_pages_for_section(ImageHeader::kSectionCount, 0u);
1321
David Sehr50005a02017-06-21 13:24:21 -07001322 // Iterate through one byte at a time.
1323 ptrdiff_t page_off_begin = image_header_.GetImageBegin() - image_begin;
1324 for (uintptr_t begin = boot_map_.start; begin != boot_map_.end; ++begin) {
1325 previous_page_idx = page_idx;
1326 ptrdiff_t offset = begin - boot_map_.start;
1327
1328 // We treat the image header as part of the memory map for now
1329 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader)
1330 // But it might still be interesting to see if any of the ImageHeader data mutated
1331 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header_) + offset;
1332 uint8_t* remote_ptr = &remote_contents_[offset];
1333
1334 virtual_page_idx = reinterpret_cast<uintptr_t>(local_ptr) / kPageSize;
1335
1336 // Calculate the page index, relative to the 0th page where the image begins
1337 page_idx = (offset + page_off_begin) / kPageSize;
1338 if (*local_ptr != *remote_ptr) {
1339 // Track number of bytes that are different
David Sehrb4005f02017-06-20 19:11:40 -07001340 mapping_data->different_bytes++;
David Sehr50005a02017-06-21 13:24:21 -07001341 }
1342
1343 // Independently count the # of dirty pages on the remote side
1344 size_t remote_virtual_page_idx = begin / kPageSize;
1345 if (previous_page_idx != page_idx) {
1346 uint64_t page_count = 0xC0FFEE;
1347 // TODO: virtual_page_idx needs to be from the same process
1348 std::string error_msg;
1349 int dirtiness = (IsPageDirty(&pagemap_file_, // Image-diff-pid procmap
1350 &clean_pagemap_file_, // Self procmap
1351 &kpageflags_file_,
1352 &kpagecount_file_,
1353 remote_virtual_page_idx, // potentially "dirty" page
1354 virtual_page_idx, // true "clean" page
1355 &page_count,
1356 &error_msg));
1357 if (dirtiness < 0) {
1358 os << error_msg;
1359 return false;
1360 } else if (dirtiness > 0) {
David Sehrb4005f02017-06-20 19:11:40 -07001361 mapping_data->dirty_pages++;
1362 mapping_data->dirty_page_set.insert(mapping_data->dirty_page_set.end(), virtual_page_idx);
David Sehr50005a02017-06-21 13:24:21 -07001363 }
1364
1365 bool is_dirty = dirtiness > 0;
1366 bool is_private = page_count == 1;
1367
1368 if (page_count == 1) {
David Sehrb4005f02017-06-20 19:11:40 -07001369 mapping_data->private_pages++;
David Sehr50005a02017-06-21 13:24:21 -07001370 }
1371
1372 if (is_dirty && is_private) {
David Sehrb4005f02017-06-20 19:11:40 -07001373 mapping_data->private_dirty_pages++;
Mathieu Chartier728f8502017-07-28 17:35:30 -07001374 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
1375 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i);
1376 if (image_header_.GetImageSection(section).Contains(offset)) {
1377 ++private_dirty_pages_for_section[i];
1378 }
1379 }
David Sehr50005a02017-06-21 13:24:21 -07001380 }
1381 }
1382 }
David Sehrb4005f02017-06-20 19:11:40 -07001383 mapping_data->false_dirty_pages = mapping_data->dirty_pages - mapping_data->different_pages;
1384 // Print low-level (bytes, int32s, pages) statistics.
1385 os << mapping_data->different_bytes << " differing bytes,\n "
1386 << mapping_data->different_int32s << " differing int32s,\n "
1387 << mapping_data->different_pages << " differing pages,\n "
1388 << mapping_data->dirty_pages << " pages are dirty;\n "
1389 << mapping_data->false_dirty_pages << " pages are false dirty;\n "
1390 << mapping_data->private_pages << " pages are private;\n "
Mathieu Chartier728f8502017-07-28 17:35:30 -07001391 << mapping_data->private_dirty_pages << " pages are Private_Dirty\n "
1392 << "\n";
1393
1394 size_t total_private_dirty_pages = std::accumulate(private_dirty_pages_for_section.begin(),
1395 private_dirty_pages_for_section.end(),
1396 0u);
1397 os << "Image sections (total private dirty pages " << total_private_dirty_pages << ")\n";
1398 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
1399 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i);
1400 os << section << " " << image_header_.GetImageSection(section)
1401 << " private dirty pages=" << private_dirty_pages_for_section[i] << "\n";
1402 }
1403 os << "\n";
David Sehrb4005f02017-06-20 19:11:40 -07001404
David Sehr50005a02017-06-21 13:24:21 -07001405 return true;
1406 }
1407
David Sehr50005a02017-06-21 13:24:21 -07001408 // Look at /proc/$pid/mem and only diff the things from there
1409 bool DumpImageDiffMap()
David Sehrb4005f02017-06-20 19:11:40 -07001410 REQUIRES_SHARED(Locks::mutator_lock_) {
David Sehr50005a02017-06-21 13:24:21 -07001411 std::ostream& os = *os_;
Igor Murashkin37743352014-11-13 14:38:00 -08001412 std::string error_msg;
1413
1414 // Walk the bytes and diff against our boot image
Igor Murashkin37743352014-11-13 14:38:00 -08001415 os << "\nObserving boot image header at address "
David Sehr50005a02017-06-21 13:24:21 -07001416 << reinterpret_cast<const void*>(&image_header_)
Igor Murashkin37743352014-11-13 14:38:00 -08001417 << "\n\n";
1418
David Sehr50005a02017-06-21 13:24:21 -07001419 const uint8_t* image_begin_unaligned = image_header_.GetImageBegin();
David Sehr50005a02017-06-21 13:24:21 -07001420 const uint8_t* image_end_unaligned = image_begin_unaligned + image_header_.GetImageSize();
Igor Murashkin37743352014-11-13 14:38:00 -08001421
1422 // Adjust range to nearest page
1423 const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize);
1424 const uint8_t* image_end = AlignUp(image_end_unaligned, kPageSize);
1425
David Sehr50005a02017-06-21 13:24:21 -07001426 if (reinterpret_cast<uintptr_t>(image_begin) > boot_map_.start ||
1427 reinterpret_cast<uintptr_t>(image_end) < boot_map_.end) {
Igor Murashkin37743352014-11-13 14:38:00 -08001428 // Sanity check that we aren't trying to read a completely different boot image
1429 os << "Remote boot map is out of range of local boot map: " <<
1430 "local begin " << reinterpret_cast<const void*>(image_begin) <<
1431 ", local end " << reinterpret_cast<const void*>(image_end) <<
David Sehr50005a02017-06-21 13:24:21 -07001432 ", remote begin " << reinterpret_cast<const void*>(boot_map_.start) <<
1433 ", remote end " << reinterpret_cast<const void*>(boot_map_.end);
Igor Murashkin37743352014-11-13 14:38:00 -08001434 return false;
1435 // If we wanted even more validation we could map the ImageHeader from the file
1436 }
1437
David Sehrb4005f02017-06-20 19:11:40 -07001438 MappingData mapping_data;
David Sehr45de57f2017-06-21 05:03:22 +00001439
David Sehrb4005f02017-06-20 19:11:40 -07001440 os << "Mapping at [" << reinterpret_cast<void*>(boot_map_.start) << ", "
1441 << reinterpret_cast<void*>(boot_map_.end) << ") had:\n ";
1442 if (!ComputeDirtyBytes(image_begin, &mapping_data)) {
David Sehr50005a02017-06-21 13:24:21 -07001443 return false;
Igor Murashkin37743352014-11-13 14:38:00 -08001444 }
David Sehrb4005f02017-06-20 19:11:40 -07001445 RemoteProcesses remotes;
David Sehr20e271a2017-06-14 13:02:14 -07001446 if (zygote_pid_only_) {
David Sehrb4005f02017-06-20 19:11:40 -07001447 remotes = RemoteProcesses::kZygoteOnly;
1448 } else if (zygote_diff_pid_ > 0) {
1449 remotes = RemoteProcesses::kImageAndZygote;
David Sehr20e271a2017-06-14 13:02:14 -07001450 } else {
David Sehrb4005f02017-06-20 19:11:40 -07001451 remotes = RemoteProcesses::kImageOnly;
Mathieu Chartiercb044bc2016-04-01 13:56:41 -07001452 }
1453
David Sehra49e0532017-08-25 08:05:29 -07001454 // Check all the mirror::Object entries in the image.
1455 RegionData<mirror::Object> object_region_data(os_,
1456 &remote_contents_,
1457 &zygote_contents_,
1458 boot_map_,
1459 image_header_,
1460 dump_dirty_objects_);
David Sehrb4005f02017-06-20 19:11:40 -07001461 object_region_data.ProcessRegion(mapping_data,
1462 remotes,
David Sehra49e0532017-08-25 08:05:29 -07001463 image_begin_unaligned);
Igor Murashkin37743352014-11-13 14:38:00 -08001464
David Sehra49e0532017-08-25 08:05:29 -07001465 // Check all the ArtMethod entries in the image.
1466 RegionData<ArtMethod> artmethod_region_data(os_,
1467 &remote_contents_,
1468 &zygote_contents_,
1469 boot_map_,
1470 image_header_,
1471 dump_dirty_objects_);
1472 artmethod_region_data.ProcessRegion(mapping_data,
1473 remotes,
1474 image_begin_unaligned);
Igor Murashkin37743352014-11-13 14:38:00 -08001475 return true;
1476 }
1477
Igor Murashkin37743352014-11-13 14:38:00 -08001478 static bool GetPageFrameNumber(File* page_map_file,
1479 size_t virtual_page_index,
1480 uint64_t* page_frame_number,
1481 std::string* error_msg) {
1482 CHECK(page_map_file != nullptr);
1483 CHECK(page_frame_number != nullptr);
1484 CHECK(error_msg != nullptr);
1485
1486 constexpr size_t kPageMapEntrySize = sizeof(uint64_t);
1487 constexpr uint64_t kPageFrameNumberMask = (1ULL << 55) - 1; // bits 0-54 [in /proc/$pid/pagemap]
1488 constexpr uint64_t kPageSoftDirtyMask = (1ULL << 55); // bit 55 [in /proc/$pid/pagemap]
1489
1490 uint64_t page_map_entry = 0;
1491
1492 // Read 64-bit entry from /proc/$pid/pagemap to get the physical page frame number
1493 if (!page_map_file->PreadFully(&page_map_entry, kPageMapEntrySize,
1494 virtual_page_index * kPageMapEntrySize)) {
1495 *error_msg = StringPrintf("Failed to read the virtual page index entry from %s",
1496 page_map_file->GetPath().c_str());
1497 return false;
1498 }
1499
1500 // TODO: seems useless, remove this.
1501 bool soft_dirty = (page_map_entry & kPageSoftDirtyMask) != 0;
1502 if ((false)) {
1503 LOG(VERBOSE) << soft_dirty; // Suppress unused warning
1504 UNREACHABLE();
1505 }
1506
1507 *page_frame_number = page_map_entry & kPageFrameNumberMask;
1508
1509 return true;
1510 }
1511
1512 static int IsPageDirty(File* page_map_file,
David Sehr50005a02017-06-21 13:24:21 -07001513 File* clean_pagemap_file,
1514 File* kpageflags_file,
1515 File* kpagecount_file,
Igor Murashkin37743352014-11-13 14:38:00 -08001516 size_t virtual_page_idx,
1517 size_t clean_virtual_page_idx,
1518 // Out parameters:
1519 uint64_t* page_count, std::string* error_msg) {
1520 CHECK(page_map_file != nullptr);
David Sehr50005a02017-06-21 13:24:21 -07001521 CHECK(clean_pagemap_file != nullptr);
1522 CHECK_NE(page_map_file, clean_pagemap_file);
1523 CHECK(kpageflags_file != nullptr);
1524 CHECK(kpagecount_file != nullptr);
Igor Murashkin37743352014-11-13 14:38:00 -08001525 CHECK(page_count != nullptr);
1526 CHECK(error_msg != nullptr);
1527
1528 // Constants are from https://www.kernel.org/doc/Documentation/vm/pagemap.txt
1529
1530 constexpr size_t kPageFlagsEntrySize = sizeof(uint64_t);
1531 constexpr size_t kPageCountEntrySize = sizeof(uint64_t);
1532 constexpr uint64_t kPageFlagsDirtyMask = (1ULL << 4); // in /proc/kpageflags
1533 constexpr uint64_t kPageFlagsNoPageMask = (1ULL << 20); // in /proc/kpageflags
1534 constexpr uint64_t kPageFlagsMmapMask = (1ULL << 11); // in /proc/kpageflags
1535
1536 uint64_t page_frame_number = 0;
1537 if (!GetPageFrameNumber(page_map_file, virtual_page_idx, &page_frame_number, error_msg)) {
1538 return -1;
1539 }
1540
1541 uint64_t page_frame_number_clean = 0;
David Sehr50005a02017-06-21 13:24:21 -07001542 if (!GetPageFrameNumber(clean_pagemap_file, clean_virtual_page_idx, &page_frame_number_clean,
Igor Murashkin37743352014-11-13 14:38:00 -08001543 error_msg)) {
1544 return -1;
1545 }
1546
1547 // Read 64-bit entry from /proc/kpageflags to get the dirty bit for a page
1548 uint64_t kpage_flags_entry = 0;
David Sehr50005a02017-06-21 13:24:21 -07001549 if (!kpageflags_file->PreadFully(&kpage_flags_entry,
Igor Murashkin37743352014-11-13 14:38:00 -08001550 kPageFlagsEntrySize,
1551 page_frame_number * kPageFlagsEntrySize)) {
1552 *error_msg = StringPrintf("Failed to read the page flags from %s",
David Sehr50005a02017-06-21 13:24:21 -07001553 kpageflags_file->GetPath().c_str());
Igor Murashkin37743352014-11-13 14:38:00 -08001554 return -1;
1555 }
1556
1557 // Read 64-bit entyry from /proc/kpagecount to get mapping counts for a page
David Sehr50005a02017-06-21 13:24:21 -07001558 if (!kpagecount_file->PreadFully(page_count /*out*/,
Igor Murashkin37743352014-11-13 14:38:00 -08001559 kPageCountEntrySize,
1560 page_frame_number * kPageCountEntrySize)) {
1561 *error_msg = StringPrintf("Failed to read the page count from %s",
David Sehr50005a02017-06-21 13:24:21 -07001562 kpagecount_file->GetPath().c_str());
Igor Murashkin37743352014-11-13 14:38:00 -08001563 return -1;
1564 }
1565
1566 // There must be a page frame at the requested address.
1567 CHECK_EQ(kpage_flags_entry & kPageFlagsNoPageMask, 0u);
1568 // The page frame must be memory mapped
1569 CHECK_NE(kpage_flags_entry & kPageFlagsMmapMask, 0u);
1570
1571 // Page is dirty, i.e. has diverged from file, if the 4th bit is set to 1
1572 bool flags_dirty = (kpage_flags_entry & kPageFlagsDirtyMask) != 0;
1573
1574 // page_frame_number_clean must come from the *same* process
1575 // but a *different* mmap than page_frame_number
1576 if (flags_dirty) {
1577 CHECK_NE(page_frame_number, page_frame_number_clean);
1578 }
1579
1580 return page_frame_number != page_frame_number_clean;
1581 }
1582
David Sehr50005a02017-06-21 13:24:21 -07001583 void PrintPidLine(const std::string& kind, pid_t pid) {
1584 if (pid < 0) {
1585 *os_ << kind << " DIFF PID: disabled\n\n";
1586 } else {
1587 *os_ << kind << " DIFF PID (" << pid << "): ";
1588 }
1589 }
1590
1591 static bool EndsWith(const std::string& str, const std::string& suffix) {
1592 return str.size() >= suffix.size() &&
1593 str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
1594 }
1595
1596 // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar)
1597 static std::string BaseName(const std::string& str) {
1598 size_t idx = str.rfind('/');
1599 if (idx == std::string::npos) {
1600 return str;
1601 }
1602
1603 return str.substr(idx + 1);
1604 }
1605
Igor Murashkin37743352014-11-13 14:38:00 -08001606 // Return the image location, stripped of any directories, e.g. "boot.art" or "core.art"
1607 std::string GetImageLocationBaseName() const {
1608 return BaseName(std::string(image_location_));
1609 }
1610
1611 std::ostream* os_;
1612 const ImageHeader& image_header_;
Andreas Gampe8994a042015-12-30 19:03:17 +00001613 const std::string image_location_;
Igor Murashkin37743352014-11-13 14:38:00 -08001614 pid_t image_diff_pid_; // Dump image diff against boot.art if pid is non-negative
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001615 pid_t zygote_diff_pid_; // Dump image diff against zygote boot.art if pid is non-negative
Jeff Haoc23b0c02017-07-27 18:19:38 -07001616 bool dump_dirty_objects_; // Adds dumping of objects that are dirty.
David Sehr20e271a2017-06-14 13:02:14 -07001617 bool zygote_pid_only_; // The user only specified a pid for the zygote.
Igor Murashkin37743352014-11-13 14:38:00 -08001618
David Sehr50005a02017-06-21 13:24:21 -07001619 // BacktraceMap used for finding the memory mapping of the image file.
1620 std::unique_ptr<BacktraceMap> proc_maps_;
1621 // Boot image mapping.
Igor Murashkin5573c372017-11-16 13:34:30 -08001622 backtrace_map_t boot_map_{};
David Sehr50005a02017-06-21 13:24:21 -07001623 // The size of the boot image mapping.
1624 size_t boot_map_size_;
1625 // The contents of /proc/<image_diff_pid_>/maps.
1626 std::vector<uint8_t> remote_contents_;
1627 // The contents of /proc/<zygote_diff_pid_>/maps.
1628 std::vector<uint8_t> zygote_contents_;
1629 // A File for reading /proc/<zygote_diff_pid_>/maps.
1630 File pagemap_file_;
1631 // A File for reading /proc/self/pagemap.
1632 File clean_pagemap_file_;
1633 // A File for reading /proc/kpageflags.
1634 File kpageflags_file_;
1635 // A File for reading /proc/kpagecount.
1636 File kpagecount_file_;
1637
Igor Murashkin37743352014-11-13 14:38:00 -08001638 DISALLOW_COPY_AND_ASSIGN(ImgDiagDumper);
1639};
1640
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001641static int DumpImage(Runtime* runtime,
1642 std::ostream* os,
1643 pid_t image_diff_pid,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001644 pid_t zygote_diff_pid,
1645 bool dump_dirty_objects) {
Igor Murashkin37743352014-11-13 14:38:00 -08001646 ScopedObjectAccess soa(Thread::Current());
1647 gc::Heap* heap = runtime->GetHeap();
Jeff Haodcdc85b2015-12-04 14:06:18 -08001648 std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
1649 CHECK(!image_spaces.empty());
1650 for (gc::space::ImageSpace* image_space : image_spaces) {
1651 const ImageHeader& image_header = image_space->GetImageHeader();
1652 if (!image_header.IsValid()) {
1653 fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
1654 return EXIT_FAILURE;
1655 }
1656
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001657 ImgDiagDumper img_diag_dumper(os,
1658 image_header,
1659 image_space->GetImageLocation(),
1660 image_diff_pid,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001661 zygote_diff_pid,
1662 dump_dirty_objects);
David Sehr50005a02017-06-21 13:24:21 -07001663 if (!img_diag_dumper.Init()) {
1664 return EXIT_FAILURE;
1665 }
Jeff Haodcdc85b2015-12-04 14:06:18 -08001666 if (!img_diag_dumper.Dump()) {
1667 return EXIT_FAILURE;
1668 }
Igor Murashkin37743352014-11-13 14:38:00 -08001669 }
Jeff Haodcdc85b2015-12-04 14:06:18 -08001670 return EXIT_SUCCESS;
Igor Murashkin37743352014-11-13 14:38:00 -08001671}
1672
1673struct ImgDiagArgs : public CmdlineArgs {
1674 protected:
1675 using Base = CmdlineArgs;
1676
Vladimir Marko8581e2a2019-02-06 15:54:55 +00001677 ParseStatus ParseCustom(const char* raw_option,
1678 size_t raw_option_length,
1679 std::string* error_msg) override {
1680 DCHECK_EQ(strlen(raw_option), raw_option_length);
Igor Murashkin37743352014-11-13 14:38:00 -08001681 {
Vladimir Marko8581e2a2019-02-06 15:54:55 +00001682 ParseStatus base_parse = Base::ParseCustom(raw_option, raw_option_length, error_msg);
Igor Murashkin37743352014-11-13 14:38:00 -08001683 if (base_parse != kParseUnknownArgument) {
1684 return base_parse;
1685 }
1686 }
1687
Vladimir Marko8581e2a2019-02-06 15:54:55 +00001688 std::string_view option(raw_option, raw_option_length);
1689 if (StartsWith(option, "--image-diff-pid=")) {
1690 const char* image_diff_pid = raw_option + strlen("--image-diff-pid=");
Igor Murashkin37743352014-11-13 14:38:00 -08001691
Andreas Gampef9411702018-09-06 17:16:57 -07001692 if (!android::base::ParseInt(image_diff_pid, &image_diff_pid_)) {
Igor Murashkin37743352014-11-13 14:38:00 -08001693 *error_msg = "Image diff pid out of range";
1694 return kParseError;
1695 }
Vladimir Marko8581e2a2019-02-06 15:54:55 +00001696 } else if (StartsWith(option, "--zygote-diff-pid=")) {
1697 const char* zygote_diff_pid = raw_option + strlen("--zygote-diff-pid=");
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001698
Andreas Gampef9411702018-09-06 17:16:57 -07001699 if (!android::base::ParseInt(zygote_diff_pid, &zygote_diff_pid_)) {
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001700 *error_msg = "Zygote diff pid out of range";
1701 return kParseError;
1702 }
Jeff Haoc23b0c02017-07-27 18:19:38 -07001703 } else if (option == "--dump-dirty-objects") {
1704 dump_dirty_objects_ = true;
Igor Murashkin37743352014-11-13 14:38:00 -08001705 } else {
1706 return kParseUnknownArgument;
1707 }
1708
1709 return kParseOk;
1710 }
1711
Roland Levillainf73caca2018-08-24 17:19:07 +01001712 ParseStatus ParseChecks(std::string* error_msg) override {
Igor Murashkin37743352014-11-13 14:38:00 -08001713 // Perform the parent checks.
1714 ParseStatus parent_checks = Base::ParseChecks(error_msg);
1715 if (parent_checks != kParseOk) {
1716 return parent_checks;
1717 }
1718
1719 // Perform our own checks.
1720
1721 if (kill(image_diff_pid_,
1722 /*sig*/0) != 0) { // No signal is sent, perform error-checking only.
1723 // Check if the pid exists before proceeding.
1724 if (errno == ESRCH) {
1725 *error_msg = "Process specified does not exist";
1726 } else {
1727 *error_msg = StringPrintf("Failed to check process status: %s", strerror(errno));
1728 }
1729 return kParseError;
Andreas Gampe8fae4b52017-09-27 20:04:47 -07001730 } else if (instruction_set_ != InstructionSet::kNone && instruction_set_ != kRuntimeISA) {
Igor Murashkin37743352014-11-13 14:38:00 -08001731 // Don't allow different ISAs since the images are ISA-specific.
1732 // Right now the code assumes both the runtime ISA and the remote ISA are identical.
1733 *error_msg = "Must use the default runtime ISA; changing ISA is not supported.";
1734 return kParseError;
1735 }
1736
1737 return kParseOk;
1738 }
1739
Andreas Gampefa6a1b02018-09-07 08:11:55 -07001740 std::string GetUsage() const override {
Igor Murashkin37743352014-11-13 14:38:00 -08001741 std::string usage;
1742
1743 usage +=
1744 "Usage: imgdiag [options] ...\n"
1745 " Example: imgdiag --image-diff-pid=$(pidof dex2oat)\n"
1746 " Example: adb shell imgdiag --image-diff-pid=$(pid zygote)\n"
1747 "\n";
1748
1749 usage += Base::GetUsage();
1750
1751 usage += // Optional.
1752 " --image-diff-pid=<pid>: provide the PID of a process whose boot.art you want to diff.\n"
1753 " Example: --image-diff-pid=$(pid zygote)\n"
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001754 " --zygote-diff-pid=<pid>: provide the PID of the zygote whose boot.art you want to diff "
1755 "against.\n"
1756 " Example: --zygote-diff-pid=$(pid zygote)\n"
Jeff Haoc23b0c02017-07-27 18:19:38 -07001757 " --dump-dirty-objects: additionally output dirty objects of interest.\n"
Igor Murashkin37743352014-11-13 14:38:00 -08001758 "\n";
1759
1760 return usage;
1761 }
1762
1763 public:
1764 pid_t image_diff_pid_ = -1;
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001765 pid_t zygote_diff_pid_ = -1;
Jeff Haoc23b0c02017-07-27 18:19:38 -07001766 bool dump_dirty_objects_ = false;
Igor Murashkin37743352014-11-13 14:38:00 -08001767};
1768
1769struct ImgDiagMain : public CmdlineMain<ImgDiagArgs> {
Andreas Gampefa6a1b02018-09-07 08:11:55 -07001770 bool ExecuteWithRuntime(Runtime* runtime) override {
Igor Murashkin37743352014-11-13 14:38:00 -08001771 CHECK(args_ != nullptr);
1772
1773 return DumpImage(runtime,
Igor Murashkin37743352014-11-13 14:38:00 -08001774 args_->os_,
Mathieu Chartierc5196cd2016-04-08 14:08:37 -07001775 args_->image_diff_pid_,
Jeff Haoc23b0c02017-07-27 18:19:38 -07001776 args_->zygote_diff_pid_,
1777 args_->dump_dirty_objects_) == EXIT_SUCCESS;
Igor Murashkin37743352014-11-13 14:38:00 -08001778 }
1779};
1780
1781} // namespace art
1782
1783int main(int argc, char** argv) {
1784 art::ImgDiagMain main;
1785 return main.Main(argc, argv);
1786}