blob: b04d413a1bd61651eb1da5d18ed9e258c18f5190 [file] [log] [blame]
Florian Mayer07710c52019-09-16 15:53:38 +00001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "perfetto_hprof"
18
19#include "perfetto_hprof.h"
20
21#include <android-base/logging.h>
22#include <fcntl.h>
23#include <inttypes.h>
24#include <sched.h>
25#include <signal.h>
26#include <sys/stat.h>
27#include <sys/types.h>
Florian Mayer6d41e572020-01-24 15:13:59 +000028#include <sys/wait.h>
Florian Mayer07710c52019-09-16 15:53:38 +000029#include <thread>
Florian Mayerc99a2312019-12-17 11:07:34 +000030#include <time.h>
Florian Mayer07710c52019-09-16 15:53:38 +000031
32#include "gc/heap-visit-objects-inl.h"
33#include "gc/heap.h"
34#include "gc/scoped_gc_critical_section.h"
35#include "mirror/object-refvisitor-inl.h"
36#include "nativehelper/scoped_local_ref.h"
Florian Mayer4bbc62b2019-09-25 12:13:35 +010037#include "perfetto/profiling/normalize.h"
Florian Mayer07710c52019-09-16 15:53:38 +000038#include "perfetto/trace/interned_data/interned_data.pbzero.h"
39#include "perfetto/trace/profiling/heap_graph.pbzero.h"
40#include "perfetto/trace/profiling/profile_common.pbzero.h"
Florian Mayer4bbc62b2019-09-25 12:13:35 +010041#include "perfetto/config/profiling/java_hprof_config.pbzero.h"
Florian Mayer2c5dfe12019-11-14 11:22:25 +000042#include "perfetto/protozero/packed_repeated_fields.h"
Florian Mayer07710c52019-09-16 15:53:38 +000043#include "perfetto/tracing.h"
44#include "runtime-inl.h"
45#include "runtime_callbacks.h"
46#include "scoped_thread_state_change-inl.h"
47#include "thread_list.h"
48#include "well_known_classes.h"
Florian Mayer29e62c32020-03-19 12:05:46 +010049#include "dex/descriptors_names.h"
Florian Mayer07710c52019-09-16 15:53:38 +000050
51// There are three threads involved in this:
52// * listener thread: this is idle in the background when this plugin gets loaded, and waits
53// for data on on g_signal_pipe_fds.
54// * signal thread: an arbitrary thread that handles the signal and writes data to
55// g_signal_pipe_fds.
56// * perfetto producer thread: once the signal is received, the app forks. In the newly forked
57// child, the Perfetto Client API spawns a thread to communicate with traced.
58
59namespace perfetto_hprof {
60
61constexpr int kJavaHeapprofdSignal = __SIGRTMIN + 6;
62constexpr time_t kWatchdogTimeoutSec = 120;
Florian Mayer2246a4e2020-02-24 16:16:41 +000063// This needs to be lower than the maximum acceptable chunk size, because this
64// is checked *before* writing another submessage. We conservatively assume
65// submessages can be up to 100k here for a 500k chunk size.
66// DropBox has a 500k chunk limit, and each chunk needs to parse as a proto.
67constexpr uint32_t kPacketSizeThreshold = 400000;
Florian Mayer07710c52019-09-16 15:53:38 +000068constexpr char kByte[1] = {'x'};
69static art::Mutex& GetStateMutex() {
70 static art::Mutex state_mutex("perfetto_hprof_state_mutex", art::LockLevel::kGenericBottomLock);
71 return state_mutex;
72}
73
74static art::ConditionVariable& GetStateCV() {
75 static art::ConditionVariable state_cv("perfetto_hprof_state_cv", GetStateMutex());
76 return state_cv;
77}
78
79static State g_state = State::kUninitialized;
80
81// Pipe to signal from the signal handler into a worker thread that handles the
82// dump requests.
83int g_signal_pipe_fds[2];
84static struct sigaction g_orig_act = {};
85
86uint64_t FindOrAppend(std::map<std::string, uint64_t>* m,
87 const std::string& s) {
88 auto it = m->find(s);
89 if (it == m->end()) {
90 std::tie(it, std::ignore) = m->emplace(s, m->size());
91 }
92 return it->second;
93}
94
95void ArmWatchdogOrDie() {
96 timer_t timerid{};
97 struct sigevent sev {};
98 sev.sigev_notify = SIGEV_SIGNAL;
99 sev.sigev_signo = SIGKILL;
100
101 if (timer_create(CLOCK_MONOTONIC, &sev, &timerid) == -1) {
102 // This only gets called in the child, so we can fatal without impacting
103 // the app.
104 PLOG(FATAL) << "failed to create watchdog timer";
105 }
106
107 struct itimerspec its {};
108 its.it_value.tv_sec = kWatchdogTimeoutSec;
109
110 if (timer_settime(timerid, 0, &its, nullptr) == -1) {
111 // This only gets called in the child, so we can fatal without impacting
112 // the app.
113 PLOG(FATAL) << "failed to arm watchdog timer";
114 }
115}
116
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100117constexpr size_t kMaxCmdlineSize = 512;
118
Florian Mayer07710c52019-09-16 15:53:38 +0000119class JavaHprofDataSource : public perfetto::DataSource<JavaHprofDataSource> {
120 public:
Florian Mayer3b1d8e32019-10-01 14:46:58 +0100121 constexpr static perfetto::BufferExhaustedPolicy kBufferExhaustedPolicy =
122 perfetto::BufferExhaustedPolicy::kStall;
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100123 void OnSetup(const SetupArgs& args) override {
124 // This is on the heap as it triggers -Wframe-larger-than.
125 std::unique_ptr<perfetto::protos::pbzero::JavaHprofConfig::Decoder> cfg(
126 new perfetto::protos::pbzero::JavaHprofConfig::Decoder(
127 args.config->java_hprof_config_raw()));
128
129 uint64_t self_pid = static_cast<uint64_t>(getpid());
Primiano Tucci4d319c72019-10-17 15:18:45 +0100130 for (auto pid_it = cfg->pid(); pid_it; ++pid_it) {
131 if (*pid_it == self_pid) {
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100132 enabled_ = true;
133 return;
134 }
135 }
136
137 if (cfg->has_process_cmdline()) {
138 int fd = open("/proc/self/cmdline", O_RDONLY | O_CLOEXEC);
139 if (fd == -1) {
140 PLOG(ERROR) << "failed to open /proc/self/cmdline";
141 return;
142 }
143 char cmdline[kMaxCmdlineSize];
144 ssize_t rd = read(fd, cmdline, sizeof(cmdline) - 1);
145 if (rd == -1) {
146 PLOG(ERROR) << "failed to read /proc/self/cmdline";
147 }
148 close(fd);
149 if (rd == -1) {
150 return;
151 }
152 cmdline[rd] = '\0';
153 char* cmdline_ptr = cmdline;
154 ssize_t sz = perfetto::profiling::NormalizeCmdLine(&cmdline_ptr, static_cast<size_t>(rd + 1));
155 if (sz == -1) {
156 PLOG(ERROR) << "failed to normalize cmdline";
157 }
158 for (auto it = cfg->process_cmdline(); it; ++it) {
Primiano Tucci4d319c72019-10-17 15:18:45 +0100159 std::string other = (*it).ToStdString();
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100160 // Append \0 to make this a C string.
161 other.resize(other.size() + 1);
162 char* other_ptr = &(other[0]);
163 ssize_t other_sz = perfetto::profiling::NormalizeCmdLine(&other_ptr, other.size());
164 if (other_sz == -1) {
165 PLOG(ERROR) << "failed to normalize other cmdline";
166 continue;
167 }
168 if (sz == other_sz && strncmp(cmdline_ptr, other_ptr, static_cast<size_t>(sz)) == 0) {
169 enabled_ = true;
170 return;
171 }
172 }
173 }
174 }
175
176 bool enabled() { return enabled_; }
Florian Mayer07710c52019-09-16 15:53:38 +0000177
178 void OnStart(const StartArgs&) override {
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100179 if (!enabled()) {
180 return;
181 }
Florian Mayer07710c52019-09-16 15:53:38 +0000182 art::MutexLock lk(art_thread(), GetStateMutex());
183 if (g_state == State::kWaitForStart) {
184 g_state = State::kStart;
185 GetStateCV().Broadcast(art_thread());
186 }
187 }
188
189 void OnStop(const StopArgs&) override {}
190
191 static art::Thread* art_thread() {
192 // TODO(fmayer): Attach the Perfetto producer thread to ART and give it a name. This is
193 // not trivial, we cannot just attach the first time this method is called, because
194 // AttachCurrentThread deadlocks with the ConditionVariable::Wait in WaitForDataSource.
195 //
196 // We should attach the thread as soon as the Client API spawns it, but that needs more
197 // complicated plumbing.
198 return nullptr;
199 }
200
201 private:
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100202 bool enabled_ = false;
Florian Mayer07710c52019-09-16 15:53:38 +0000203 static art::Thread* self_;
204};
205
206art::Thread* JavaHprofDataSource::self_ = nullptr;
207
208
209void WaitForDataSource(art::Thread* self) {
210 perfetto::TracingInitArgs args;
211 args.backends = perfetto::BackendType::kSystemBackend;
212 perfetto::Tracing::Initialize(args);
213
214 perfetto::DataSourceDescriptor dsd;
215 dsd.set_name("android.java_hprof");
216 JavaHprofDataSource::Register(dsd);
217
218 LOG(INFO) << "waiting for data source";
219
220 art::MutexLock lk(self, GetStateMutex());
221 while (g_state != State::kStart) {
222 GetStateCV().Wait(self);
223 }
224}
225
226class Writer {
227 public:
Florian Mayerc99a2312019-12-17 11:07:34 +0000228 Writer(pid_t parent_pid, JavaHprofDataSource::TraceContext* ctx, uint64_t timestamp)
Florian Mayer2246a4e2020-02-24 16:16:41 +0000229 : parent_pid_(parent_pid), ctx_(ctx), timestamp_(timestamp),
230 last_written_(ctx_->written()) {}
231
232 // Return whether the next call to GetHeapGraph will create a new TracePacket.
233 bool will_create_new_packet() {
234 return !heap_graph_ || ctx_->written() - last_written_ > kPacketSizeThreshold;
235 }
Florian Mayer07710c52019-09-16 15:53:38 +0000236
237 perfetto::protos::pbzero::HeapGraph* GetHeapGraph() {
Florian Mayer2246a4e2020-02-24 16:16:41 +0000238 if (will_create_new_packet()) {
239 CreateNewHeapGraph();
Florian Mayer07710c52019-09-16 15:53:38 +0000240 }
241 return heap_graph_;
242 }
243
Florian Mayer2246a4e2020-02-24 16:16:41 +0000244 void CreateNewHeapGraph() {
245 if (heap_graph_) {
246 heap_graph_->set_continued(true);
247 }
248 Finalize();
249
250 uint64_t written = ctx_->written();
251
252 trace_packet_ = ctx_->NewTracePacket();
253 trace_packet_->set_timestamp(timestamp_);
254 heap_graph_ = trace_packet_->set_heap_graph();
255 heap_graph_->set_pid(parent_pid_);
256 heap_graph_->set_index(index_++);
257
258 last_written_ = written;
259 }
260
Florian Mayer07710c52019-09-16 15:53:38 +0000261 void Finalize() {
262 if (trace_packet_) {
263 trace_packet_->Finalize();
264 }
265 heap_graph_ = nullptr;
266 }
267
268 ~Writer() { Finalize(); }
269
270 private:
271 const pid_t parent_pid_;
272 JavaHprofDataSource::TraceContext* const ctx_;
Florian Mayerc99a2312019-12-17 11:07:34 +0000273 const uint64_t timestamp_;
Florian Mayer07710c52019-09-16 15:53:38 +0000274
Florian Mayer2246a4e2020-02-24 16:16:41 +0000275 uint64_t last_written_ = 0;
276
Florian Mayer07710c52019-09-16 15:53:38 +0000277 perfetto::DataSource<JavaHprofDataSource>::TraceContext::TracePacketHandle
278 trace_packet_;
279 perfetto::protos::pbzero::HeapGraph* heap_graph_ = nullptr;
280
281 uint64_t index_ = 0;
Florian Mayer07710c52019-09-16 15:53:38 +0000282};
283
284class ReferredObjectsFinder {
285 public:
286 explicit ReferredObjectsFinder(
287 std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects)
Florian Mayer07710c52019-09-16 15:53:38 +0000288 : referred_objects_(referred_objects) {}
289
290 // For art::mirror::Object::VisitReferences.
291 void operator()(art::ObjPtr<art::mirror::Object> obj, art::MemberOffset offset,
292 bool is_static) const
293 REQUIRES_SHARED(art::Locks::mutator_lock_) {
294 art::mirror::Object* ref = obj->GetFieldObject<art::mirror::Object>(offset);
295 art::ArtField* field;
296 if (is_static) {
297 field = art::ArtField::FindStaticFieldWithOffset(obj->AsClass(), offset.Uint32Value());
298 } else {
299 field = art::ArtField::FindInstanceFieldWithOffset(obj->GetClass(), offset.Uint32Value());
300 }
301 std::string field_name = "";
302 if (field != nullptr) {
Florian Mayer22be0652020-02-06 17:51:46 +0000303 field_name = field->PrettyField(/*with_type=*/true);
Florian Mayer07710c52019-09-16 15:53:38 +0000304 }
305 referred_objects_->emplace_back(std::move(field_name), ref);
306 }
307
308 void VisitRootIfNonNull(art::mirror::CompressedReference<art::mirror::Object>* root
309 ATTRIBUTE_UNUSED) const {}
310 void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root
311 ATTRIBUTE_UNUSED) const {}
312
313 private:
314 // We can use a raw Object* pointer here, because there are no concurrent GC threads after the
315 // fork.
316 std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects_;
317};
318
Florian Mayer46392352019-10-11 14:25:49 +0100319class RootFinder : public art::SingleRootVisitor {
320 public:
321 explicit RootFinder(
322 std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects)
323 : root_objects_(root_objects) {}
324
325 void VisitRoot(art::mirror::Object* root, const art::RootInfo& info) override {
326 (*root_objects_)[info.GetType()].emplace_back(root);
327 }
328
329 private:
330 // We can use a raw Object* pointer here, because there are no concurrent GC threads after the
331 // fork.
332 std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects_;
333};
334
335perfetto::protos::pbzero::HeapGraphRoot::Type ToProtoType(art::RootType art_type) {
336 switch (art_type) {
337 case art::kRootUnknown:
338 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_UNKNOWN;
339 case art::kRootJNIGlobal:
340 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JNI_GLOBAL;
341 case art::kRootJNILocal:
342 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JNI_LOCAL;
343 case art::kRootJavaFrame:
344 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JAVA_FRAME;
345 case art::kRootNativeStack:
346 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_NATIVE_STACK;
347 case art::kRootStickyClass:
348 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_STICKY_CLASS;
349 case art::kRootThreadBlock:
350 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_THREAD_BLOCK;
351 case art::kRootMonitorUsed:
352 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_MONITOR_USED;
353 case art::kRootThreadObject:
354 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_THREAD_OBJECT;
355 case art::kRootInternedString:
356 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_INTERNED_STRING;
357 case art::kRootFinalizing:
358 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_FINALIZING;
359 case art::kRootDebugger:
360 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_DEBUGGER;
361 case art::kRootReferenceCleanup:
362 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_REFERENCE_CLEANUP;
363 case art::kRootVMInternal:
364 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_VM_INTERNAL;
365 case art::kRootJNIMonitor:
366 return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JNI_MONITOR;
367 }
368}
369
Florian Mayer29e62c32020-03-19 12:05:46 +0100370std::string PrettyType(art::mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS {
371 if (klass == nullptr) {
372 return "(raw)";
373 }
374 std::string temp;
375 std::string result(art::PrettyDescriptor(klass->GetDescriptor(&temp)));
376 return result;
377}
378
Florian Mayer07710c52019-09-16 15:53:38 +0000379void DumpPerfetto(art::Thread* self) {
380 pid_t parent_pid = getpid();
381 LOG(INFO) << "preparing to dump heap for " << parent_pid;
382
383 // Need to take a heap dump while GC isn't running. See the comment in
384 // Heap::VisitObjects(). Also we need the critical section to avoid visiting
385 // the same object twice. See b/34967844.
386 //
387 // We need to do this before the fork, because otherwise it can deadlock
388 // waiting for the GC, as all other threads get terminated by the clone, but
389 // their locks are not released.
390 art::gc::ScopedGCCriticalSection gcs(self, art::gc::kGcCauseHprof,
391 art::gc::kCollectorTypeHprof);
392
393 art::ScopedSuspendAll ssa(__FUNCTION__, /* long_suspend=*/ true);
394
395 pid_t pid = fork();
Florian Mayer6d41e572020-01-24 15:13:59 +0000396 if (pid == -1) {
397 // Fork error.
398 PLOG(ERROR) << "fork";
Florian Mayer07710c52019-09-16 15:53:38 +0000399 return;
400 }
Florian Mayer6d41e572020-01-24 15:13:59 +0000401 if (pid != 0) {
402 // Parent
403 int stat_loc;
404 for (;;) {
405 if (waitpid(pid, &stat_loc, 0) != -1 || errno != EINTR) {
406 break;
407 }
408 }
409 return;
410 }
411
412 // The following code is only executed by the child of the original process.
413 //
414 // Daemon creates a new process that is the grand-child of the original process, and exits.
415 if (daemon(0, 0) == -1) {
416 PLOG(FATAL) << "daemon";
417 }
418
419 // The following code is only executed by the grand-child of the original process.
Florian Mayer07710c52019-09-16 15:53:38 +0000420
421 // Make sure that this is the first thing we do after forking, so if anything
422 // below hangs, the fork will go away from the watchdog.
423 ArmWatchdogOrDie();
424
Florian Mayerc99a2312019-12-17 11:07:34 +0000425 struct timespec ts = {};
426 if (clock_gettime(CLOCK_BOOTTIME, &ts) != 0) {
427 LOG(FATAL) << "Failed to get boottime.";
428 }
429 uint64_t timestamp = ts.tv_sec * 1000000000LL + ts.tv_nsec;
430
Florian Mayer07710c52019-09-16 15:53:38 +0000431 WaitForDataSource(self);
432
433 JavaHprofDataSource::Trace(
Florian Mayerc99a2312019-12-17 11:07:34 +0000434 [parent_pid, timestamp](JavaHprofDataSource::TraceContext ctx)
Florian Mayer07710c52019-09-16 15:53:38 +0000435 NO_THREAD_SAFETY_ANALYSIS {
Florian Mayer4bbc62b2019-09-25 12:13:35 +0100436 {
437 auto ds = ctx.GetDataSourceLocked();
438 if (!ds || !ds->enabled()) {
439 LOG(INFO) << "skipping irrelevant data source.";
440 return;
441 }
442 }
Florian Mayer07710c52019-09-16 15:53:38 +0000443 LOG(INFO) << "dumping heap for " << parent_pid;
Florian Mayerc99a2312019-12-17 11:07:34 +0000444 Writer writer(parent_pid, &ctx, timestamp);
Florian Mayer07710c52019-09-16 15:53:38 +0000445 // Make sure that intern ID 0 (default proto value for a uint64_t) always maps to ""
446 // (default proto value for a string).
447 std::map<std::string, uint64_t> interned_fields{{"", 0}};
Florian Mayer29e62c32020-03-19 12:05:46 +0100448 std::map<std::string, uint64_t> interned_locations{{"", 0}};
Florian Mayer07710c52019-09-16 15:53:38 +0000449
Florian Mayer46392352019-10-11 14:25:49 +0100450 std::map<art::RootType, std::vector<art::mirror::Object*>> root_objects;
451 RootFinder rcf(&root_objects);
452 art::Runtime::Current()->VisitRoots(&rcf);
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000453 std::unique_ptr<protozero::PackedVarInt> object_ids(
454 new protozero::PackedVarInt);
Florian Mayer46392352019-10-11 14:25:49 +0100455 for (const auto& p : root_objects) {
456 const art::RootType root_type = p.first;
457 const std::vector<art::mirror::Object*>& children = p.second;
458 perfetto::protos::pbzero::HeapGraphRoot* root_proto =
459 writer.GetHeapGraph()->add_roots();
460 root_proto->set_root_type(ToProtoType(root_type));
Florian Mayer2246a4e2020-02-24 16:16:41 +0000461 for (art::mirror::Object* obj : children) {
462 if (writer.will_create_new_packet()) {
463 root_proto->set_object_ids(*object_ids);
464 object_ids->Reset();
465 root_proto = writer.GetHeapGraph()->add_roots();
466 root_proto->set_root_type(ToProtoType(root_type));
467 }
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000468 object_ids->Append(reinterpret_cast<uintptr_t>(obj));
Florian Mayer2246a4e2020-02-24 16:16:41 +0000469 }
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000470 root_proto->set_object_ids(*object_ids);
471 object_ids->Reset();
Florian Mayer46392352019-10-11 14:25:49 +0100472 }
473
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000474 std::unique_ptr<protozero::PackedVarInt> reference_field_ids(
475 new protozero::PackedVarInt);
476 std::unique_ptr<protozero::PackedVarInt> reference_object_ids(
477 new protozero::PackedVarInt);
478
Florian Mayer07710c52019-09-16 15:53:38 +0000479 art::Runtime::Current()->GetHeap()->VisitObjectsPaused(
Florian Mayer29e62c32020-03-19 12:05:46 +0100480 [&writer, &interned_fields, &interned_locations,
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000481 &reference_field_ids, &reference_object_ids](
Florian Mayer07710c52019-09-16 15:53:38 +0000482 art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Florian Mayer29e62c32020-03-19 12:05:46 +0100483 if (obj->IsClass()) {
484 art::mirror::Class* klass = obj->AsClass().Ptr();
485 perfetto::protos::pbzero::HeapGraphType* type_proto =
486 writer.GetHeapGraph()->add_types();
487 type_proto->set_id(reinterpret_cast<uintptr_t>(klass));
488 type_proto->set_class_name(PrettyType(klass));
489 type_proto->set_location_id(FindOrAppend(&interned_locations,
490 klass->GetLocation()));
491 }
492
493 art::mirror::Class* klass = obj->GetClass();
494 uintptr_t class_id = reinterpret_cast<uintptr_t>(klass);
495 // We need to synethesize a new type for Class<Foo>, which does not exist
496 // in the runtime. Otherwise, all the static members of all classes would be
497 // attributed to java.lang.Class.
498 if (klass->IsClassClass()) {
499 CHECK(obj->IsClass());
500 perfetto::protos::pbzero::HeapGraphType* type_proto =
501 writer.GetHeapGraph()->add_types();
502 // All pointers are at least multiples of two, so this way we can make sure
503 // we are not colliding with a real class.
504 class_id = reinterpret_cast<uintptr_t>(obj) | 1;
505 type_proto->set_id(class_id);
506 type_proto->set_class_name(obj->PrettyTypeOf());
507 type_proto->set_location_id(FindOrAppend(&interned_locations,
508 obj->AsClass()->GetLocation()));
509 }
510
Florian Mayer07710c52019-09-16 15:53:38 +0000511 perfetto::protos::pbzero::HeapGraphObject* object_proto =
512 writer.GetHeapGraph()->add_objects();
513 object_proto->set_id(reinterpret_cast<uintptr_t>(obj));
Florian Mayer29e62c32020-03-19 12:05:46 +0100514 object_proto->set_type_id(class_id);
Florian Mayer07710c52019-09-16 15:53:38 +0000515 object_proto->set_self_size(obj->SizeOf());
516
517 std::vector<std::pair<std::string, art::mirror::Object*>>
518 referred_objects;
519 ReferredObjectsFinder objf(&referred_objects);
520 obj->VisitReferences(objf, art::VoidFunctor());
521 for (const auto& p : referred_objects) {
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000522 reference_field_ids->Append(FindOrAppend(&interned_fields, p.first));
523 reference_object_ids->Append(reinterpret_cast<uintptr_t>(p.second));
Florian Mayer07710c52019-09-16 15:53:38 +0000524 }
Florian Mayer2c5dfe12019-11-14 11:22:25 +0000525 object_proto->set_reference_field_id(*reference_field_ids);
526 object_proto->set_reference_object_id(*reference_object_ids);
527 reference_field_ids->Reset();
528 reference_object_ids->Reset();
Florian Mayer07710c52019-09-16 15:53:38 +0000529 });
530
531 for (const auto& p : interned_fields) {
532 const std::string& str = p.first;
533 uint64_t id = p.second;
534
535 perfetto::protos::pbzero::InternedString* field_proto =
536 writer.GetHeapGraph()->add_field_names();
537 field_proto->set_iid(id);
538 field_proto->set_str(
539 reinterpret_cast<const uint8_t*>(str.c_str()), str.size());
540 }
Florian Mayer29e62c32020-03-19 12:05:46 +0100541 for (const auto& p : interned_locations) {
Florian Mayer07710c52019-09-16 15:53:38 +0000542 const std::string& str = p.first;
543 uint64_t id = p.second;
544
Florian Mayer29e62c32020-03-19 12:05:46 +0100545 perfetto::protos::pbzero::InternedString* location_proto =
546 writer.GetHeapGraph()->add_location_names();
547 location_proto->set_iid(id);
548 location_proto->set_str(reinterpret_cast<const uint8_t*>(str.c_str()),
Florian Mayer07710c52019-09-16 15:53:38 +0000549 str.size());
550 }
551
552 writer.Finalize();
553
554 ctx.Flush([] {
555 {
556 art::MutexLock lk(JavaHprofDataSource::art_thread(), GetStateMutex());
557 g_state = State::kEnd;
558 GetStateCV().Broadcast(JavaHprofDataSource::art_thread());
559 }
560 });
561 });
562
563 art::MutexLock lk(self, GetStateMutex());
564 while (g_state != State::kEnd) {
565 GetStateCV().Wait(self);
566 }
567 LOG(INFO) << "finished dumping heap for " << parent_pid;
568 // Prevent the atexit handlers to run. We do not want to call cleanup
569 // functions the parent process has registered.
570 _exit(0);
571}
572
573// The plugin initialization function.
574extern "C" bool ArtPlugin_Initialize() {
575 if (art::Runtime::Current() == nullptr) {
576 return false;
577 }
578 art::Thread* self = art::Thread::Current();
579 {
580 art::MutexLock lk(self, GetStateMutex());
581 if (g_state != State::kUninitialized) {
582 LOG(ERROR) << "perfetto_hprof already initialized. state: " << g_state;
583 return false;
584 }
585 g_state = State::kWaitForListener;
586 }
587
Nick Kralevich20d57d12020-01-31 12:54:35 -0800588 if (pipe2(g_signal_pipe_fds, O_CLOEXEC) == -1) {
Florian Mayer07710c52019-09-16 15:53:38 +0000589 PLOG(ERROR) << "Failed to pipe";
590 return false;
591 }
592
593 struct sigaction act = {};
Florian Mayer516745b2020-01-27 14:29:57 +0000594 act.sa_flags = SA_SIGINFO | SA_RESTART;
Florian Mayer07710c52019-09-16 15:53:38 +0000595 act.sa_sigaction = [](int, siginfo_t*, void*) {
596 if (write(g_signal_pipe_fds[1], kByte, sizeof(kByte)) == -1) {
597 PLOG(ERROR) << "Failed to trigger heap dump";
598 }
599 };
600
601 // TODO(fmayer): We can probably use the SignalCatcher thread here to not
602 // have an idle thread.
603 if (sigaction(kJavaHeapprofdSignal, &act, &g_orig_act) != 0) {
604 close(g_signal_pipe_fds[0]);
605 close(g_signal_pipe_fds[1]);
606 PLOG(ERROR) << "Failed to sigaction";
607 return false;
608 }
609
610 std::thread th([] {
611 art::Runtime* runtime = art::Runtime::Current();
612 if (!runtime) {
Florian Mayer516745b2020-01-27 14:29:57 +0000613 LOG(FATAL_WITHOUT_ABORT) << "no runtime in perfetto_hprof_listener";
Florian Mayer07710c52019-09-16 15:53:38 +0000614 return;
615 }
Florian Mayer516745b2020-01-27 14:29:57 +0000616 if (!runtime->AttachCurrentThread("perfetto_hprof_listener", /*as_daemon=*/ true,
Florian Mayer07710c52019-09-16 15:53:38 +0000617 runtime->GetSystemThreadGroup(), /*create_peer=*/ false)) {
618 LOG(ERROR) << "failed to attach thread.";
619 return;
620 }
621 art::Thread* self = art::Thread::Current();
622 if (!self) {
Florian Mayer516745b2020-01-27 14:29:57 +0000623 LOG(FATAL_WITHOUT_ABORT) << "no thread in perfetto_hprof_listener";
Florian Mayer07710c52019-09-16 15:53:38 +0000624 return;
625 }
626 {
627 art::MutexLock lk(self, GetStateMutex());
628 if (g_state == State::kWaitForListener) {
629 g_state = State::kWaitForStart;
630 GetStateCV().Broadcast(self);
631 }
632 }
633 char buf[1];
634 for (;;) {
635 int res;
636 do {
637 res = read(g_signal_pipe_fds[0], buf, sizeof(buf));
638 } while (res == -1 && errno == EINTR);
639
640 if (res <= 0) {
641 if (res == -1) {
642 PLOG(ERROR) << "failed to read";
643 }
644 close(g_signal_pipe_fds[0]);
645 return;
646 }
647
648 perfetto_hprof::DumpPerfetto(self);
649 }
650 });
651 th.detach();
652
653 art::MutexLock lk(art::Thread::Current(), GetStateMutex());
654 while (g_state == State::kWaitForListener) {
655 GetStateCV().Wait(art::Thread::Current());
656 }
657 return true;
658}
659
660extern "C" bool ArtPlugin_Deinitialize() {
661 if (sigaction(kJavaHeapprofdSignal, &g_orig_act, nullptr) != 0) {
662 PLOG(ERROR) << "failed to reset signal handler";
663 // We cannot close the pipe if the signal handler wasn't unregistered,
664 // to avoid receiving SIGPIPE.
665 return false;
666 }
667 close(g_signal_pipe_fds[1]);
668
669 art::Thread* self = art::Thread::Current();
670 art::MutexLock lk(self, GetStateMutex());
671 if (g_state != State::kWaitForListener) {
672 g_state = State::kUninitialized;
673 GetStateCV().Broadcast(self);
674 }
675 return true;
676}
677
678} // namespace perfetto_hprof
679
680namespace perfetto {
681
682PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(perfetto_hprof::JavaHprofDataSource);
683
684}