blob: cecd86f9c475990c2c329dfe03a02c07af072930 [file] [log] [blame]
Dave Allison0aded082013-11-07 13:15:11 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "profiler.h"
18
Calin Juravle9dae5b42014-04-07 16:36:21 +030019#include <fstream>
Dave Allison0aded082013-11-07 13:15:11 -080020#include <sys/uio.h>
Dave Allison39c3bfb2014-01-28 18:33:52 -080021#include <sys/file.h>
Dave Allison0aded082013-11-07 13:15:11 -080022
23#include "base/stl_util.h"
24#include "base/unix_file/fd_file.h"
25#include "class_linker.h"
26#include "common_throws.h"
27#include "debugger.h"
28#include "dex_file-inl.h"
29#include "instrumentation.h"
30#include "mirror/art_method-inl.h"
31#include "mirror/class-inl.h"
32#include "mirror/dex_cache.h"
33#include "mirror/object_array-inl.h"
34#include "mirror/object-inl.h"
35#include "object_utils.h"
36#include "os.h"
37#include "scoped_thread_state_change.h"
38#include "ScopedLocalRef.h"
39#include "thread.h"
40#include "thread_list.h"
Dave Allison4a7867b2014-01-30 17:44:12 -080041
42#ifdef HAVE_ANDROID_OS
43#include "cutils/properties.h"
44#endif
45
Dave Allison0aded082013-11-07 13:15:11 -080046#if !defined(ART_USE_PORTABLE_COMPILER)
47#include "entrypoints/quick/quick_entrypoints.h"
48#endif
49
50namespace art {
51
52BackgroundMethodSamplingProfiler* BackgroundMethodSamplingProfiler::profiler_ = nullptr;
53pthread_t BackgroundMethodSamplingProfiler::profiler_pthread_ = 0U;
54volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false;
55
Dave Allison0aded082013-11-07 13:15:11 -080056// TODO: this profiler runs regardless of the state of the machine. Maybe we should use the
57// wakelock or something to modify the run characteristics. This can be done when we
58// have some performance data after it's been used for a while.
59
Wei Jin445220d2014-06-20 15:56:53 -070060// Walk through the method within depth of max_depth_ on the Java stack
61class BoundedStackVisitor : public StackVisitor {
62 public:
63 BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack,
64 Thread* thread, uint32_t max_depth)
65 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
66 : StackVisitor(thread, NULL), stack_(stack), max_depth_(max_depth), depth_(0) {
67 }
68
69 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
70 mirror::ArtMethod* m = GetMethod();
71 if (m->IsRuntimeMethod()) {
72 return true;
73 }
74 uint32_t dex_pc_ = GetDexPc();
75 stack_->push_back(std::make_pair(m, dex_pc_));
76 ++depth_;
77 if (depth_ < max_depth_) {
78 return true;
79 } else {
80 return false;
81 }
82 }
83
84 private:
85 std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack_;
86 const uint32_t max_depth_;
87 uint32_t depth_;
88};
Dave Allison0aded082013-11-07 13:15:11 -080089
90// This is called from either a thread list traversal or from a checkpoint. Regardless
91// of which caller, the mutator lock must be held.
92static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
93 BackgroundMethodSamplingProfiler* profiler =
94 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
Wei Jin445220d2014-06-20 15:56:53 -070095 const ProfilerOptions profile_options = profiler->GetProfilerOptions();
96 switch (profile_options.GetProfileType()) {
97 case kProfilerMethod: {
98 mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
99 if (false && method == nullptr) {
100 LOG(INFO) << "No current method available";
101 std::ostringstream os;
102 thread->Dump(os);
103 std::string data(os.str());
104 LOG(INFO) << data;
105 }
106 profiler->RecordMethod(method);
107 break;
108 }
109 case kProfilerBoundedStack: {
110 std::vector<InstructionLocation> stack;
111 uint32_t max_depth = profile_options.GetMaxStackDepth();
112 BoundedStackVisitor bounded_stack_visitor(&stack, thread, max_depth);
113 bounded_stack_visitor.WalkStack();
114 profiler->RecordStack(stack);
115 break;
116 }
117 default:
118 LOG(INFO) << "This profile type is not implemented.";
Dave Allison0aded082013-11-07 13:15:11 -0800119 }
Dave Allison0aded082013-11-07 13:15:11 -0800120}
121
Dave Allison0aded082013-11-07 13:15:11 -0800122// A closure that is called by the thread checkpoint code.
123class SampleCheckpoint : public Closure {
124 public:
125 explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
126 profiler_(profiler) {}
127
128 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
129 Thread* self = Thread::Current();
130 if (thread == nullptr) {
131 LOG(ERROR) << "Checkpoint with nullptr thread";
132 return;
133 }
134
135 // Grab the mutator lock (shared access).
136 ScopedObjectAccess soa(self);
137
138 // Grab a sample.
139 GetSample(thread, this->profiler_);
140
141 // And finally tell the barrier that we're done.
142 this->profiler_->GetBarrier().Pass(self);
143 }
144
145 private:
146 BackgroundMethodSamplingProfiler* const profiler_;
147};
148
149bool BackgroundMethodSamplingProfiler::ShuttingDown(Thread* self) {
150 MutexLock mu(self, *Locks::profiler_lock_);
151 return shutting_down_;
152}
153
154void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
155 Runtime* runtime = Runtime::Current();
156 BackgroundMethodSamplingProfiler* profiler =
157 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
158
159 // Add a random delay for the first time run so that we don't hammer the CPU
160 // with all profiles running at the same time.
161 const int kRandomDelayMaxSecs = 30;
162 const double kMaxBackoffSecs = 24*60*60; // Max backoff time.
163
164 srand(MicroTime() * getpid());
165 int startup_delay = rand() % kRandomDelayMaxSecs; // random delay for startup.
166
167
168 CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
169 !runtime->IsCompiler()));
170
171 Thread* self = Thread::Current();
172
Calin Juravlec1b643c2014-05-30 23:44:11 +0100173 double backoff = 1.0;
Dave Allison0aded082013-11-07 13:15:11 -0800174 while (true) {
175 if (ShuttingDown(self)) {
176 break;
177 }
178
179 {
180 // wait until we need to run another profile
Calin Juravlec1b643c2014-05-30 23:44:11 +0100181 uint64_t delay_secs = profiler->options_.GetPeriodS() * backoff;
Dave Allison0aded082013-11-07 13:15:11 -0800182
183 // Add a startup delay to prevent all the profiles running at once.
184 delay_secs += startup_delay;
185
186 // Immediate startup for benchmarking?
Calin Juravlec1b643c2014-05-30 23:44:11 +0100187 if (profiler->options_.GetStartImmediately() && startup_delay > 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800188 delay_secs = 0;
189 }
190
191 startup_delay = 0;
192
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700193 VLOG(profiler) << "Delaying profile start for " << delay_secs << " secs";
Dave Allison0aded082013-11-07 13:15:11 -0800194 MutexLock mu(self, profiler->wait_lock_);
195 profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
196
197 // Expand the backoff by its coefficient, but don't go beyond the max.
Calin Juravlec1b643c2014-05-30 23:44:11 +0100198 backoff = std::min(backoff * profiler->options_.GetBackoffCoefficient(), kMaxBackoffSecs);
Dave Allison0aded082013-11-07 13:15:11 -0800199 }
200
201 if (ShuttingDown(self)) {
202 break;
203 }
204
205
206 uint64_t start_us = MicroTime();
Calin Juravlec1b643c2014-05-30 23:44:11 +0100207 uint64_t end_us = start_us + profiler->options_.GetDurationS() * UINT64_C(1000000);
Dave Allison0aded082013-11-07 13:15:11 -0800208 uint64_t now_us = start_us;
209
Calin Juravlec1b643c2014-05-30 23:44:11 +0100210 VLOG(profiler) << "Starting profiling run now for "
211 << PrettyDuration((end_us - start_us) * 1000);
Dave Allison0aded082013-11-07 13:15:11 -0800212
213 SampleCheckpoint check_point(profiler);
214
Dave Allison39c3bfb2014-01-28 18:33:52 -0800215 size_t valid_samples = 0;
Dave Allison0aded082013-11-07 13:15:11 -0800216 while (now_us < end_us) {
217 if (ShuttingDown(self)) {
218 break;
219 }
220
Calin Juravlec1b643c2014-05-30 23:44:11 +0100221 usleep(profiler->options_.GetIntervalUs()); // Non-interruptible sleep.
Dave Allison0aded082013-11-07 13:15:11 -0800222
223 ThreadList* thread_list = runtime->GetThreadList();
224
225 profiler->profiler_barrier_->Init(self, 0);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800226 size_t barrier_count = thread_list->RunCheckpointOnRunnableThreads(&check_point);
227
228 // All threads are suspended, nothing to do.
229 if (barrier_count == 0) {
230 now_us = MicroTime();
231 continue;
232 }
233
234 valid_samples += barrier_count;
Dave Allison0aded082013-11-07 13:15:11 -0800235
Wei Jin6a586912014-05-21 16:07:40 -0700236 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
Dave Allison0aded082013-11-07 13:15:11 -0800237
238 // Wait for the barrier to be crossed by all runnable threads. This wait
239 // is done with a timeout so that we can detect problems with the checkpoint
240 // running code. We should never see this.
241 const uint32_t kWaitTimeoutMs = 10000;
242 const uint32_t kWaitTimeoutUs = kWaitTimeoutMs * 1000;
243
244 uint64_t waitstart_us = MicroTime();
245 // Wait for all threads to pass the barrier.
246 profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
247 uint64_t waitend_us = MicroTime();
248 uint64_t waitdiff_us = waitend_us - waitstart_us;
249
250 // We should never get a timeout. If we do, it suggests a problem with the checkpoint
251 // code. Crash the process in this case.
252 CHECK_LT(waitdiff_us, kWaitTimeoutUs);
253
Dave Allison0aded082013-11-07 13:15:11 -0800254 // Update the current time.
255 now_us = MicroTime();
256 }
257
Wei Jin6a586912014-05-21 16:07:40 -0700258 if (valid_samples > 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800259 // After the profile has been taken, write it out.
260 ScopedObjectAccess soa(self); // Acquire the mutator lock.
261 uint32_t size = profiler->WriteProfile();
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700262 VLOG(profiler) << "Profile size: " << size;
Dave Allison0aded082013-11-07 13:15:11 -0800263 }
264 }
265
266 LOG(INFO) << "Profiler shutdown";
267 runtime->DetachCurrentThread();
268 return nullptr;
269}
270
271// Write out the profile file if we are generating a profile.
272uint32_t BackgroundMethodSamplingProfiler::WriteProfile() {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100273 std::string full_name = output_filename_;
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700274 VLOG(profiler) << "Saving profile to " << full_name;
Dave Allison0aded082013-11-07 13:15:11 -0800275
Dave Allison39c3bfb2014-01-28 18:33:52 -0800276 int fd = open(full_name.c_str(), O_RDWR);
277 if (fd < 0) {
278 // Open failed.
279 LOG(ERROR) << "Failed to open profile file " << full_name;
Dave Allison0aded082013-11-07 13:15:11 -0800280 return 0;
281 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800282
283 // Lock the file for exclusive access. This will block if another process is using
284 // the file.
285 int err = flock(fd, LOCK_EX);
286 if (err < 0) {
287 LOG(ERROR) << "Failed to lock profile file " << full_name;
288 return 0;
289 }
290
291 // Read the previous profile.
Wei Jina93b0bb2014-06-09 16:19:15 -0700292 profile_table_.ReadPrevious(fd, options_.GetProfileType());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800293
294 // Move back to the start of the file.
295 lseek(fd, 0, SEEK_SET);
296
297 // Format the profile output and write to the file.
Dave Allison0aded082013-11-07 13:15:11 -0800298 std::ostringstream os;
299 uint32_t num_methods = DumpProfile(os);
300 std::string data(os.str());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800301 const char *p = data.c_str();
302 size_t length = data.length();
303 size_t full_length = length;
304 do {
305 int n = ::write(fd, p, length);
306 p += n;
307 length -= n;
308 } while (length > 0);
309
310 // Truncate the file to the new length.
311 ftruncate(fd, full_length);
312
313 // Now unlock the file, allowing another process in.
314 err = flock(fd, LOCK_UN);
315 if (err < 0) {
316 LOG(ERROR) << "Failed to unlock profile file " << full_name;
317 }
318
319 // Done, close the file.
320 ::close(fd);
321
322 // Clean the profile for the next time.
323 CleanProfile();
324
Dave Allison0aded082013-11-07 13:15:11 -0800325 return num_methods;
326}
327
Calin Juravlec1b643c2014-05-30 23:44:11 +0100328bool BackgroundMethodSamplingProfiler::Start(
329 const std::string& output_filename, const ProfilerOptions& options) {
330 if (!options.IsEnabled()) {
331 LOG(INFO) << "Profiler disabled. To enable setprop dalvik.vm.profiler 1.";
332 return false;
333 }
334
335 CHECK(!output_filename.empty());
336
Dave Allison0aded082013-11-07 13:15:11 -0800337 Thread* self = Thread::Current();
338 {
339 MutexLock mu(self, *Locks::profiler_lock_);
340 // Don't start two profiler threads.
341 if (profiler_ != nullptr) {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100342 return true;
Dave Allison0aded082013-11-07 13:15:11 -0800343 }
344 }
345
Calin Juravlec1b643c2014-05-30 23:44:11 +0100346 LOG(INFO) << "Starting profiler using output file: " << output_filename
347 << " and options: " << options;
Dave Allison0aded082013-11-07 13:15:11 -0800348 {
349 MutexLock mu(self, *Locks::profiler_lock_);
Calin Juravlec1b643c2014-05-30 23:44:11 +0100350 profiler_ = new BackgroundMethodSamplingProfiler(output_filename, options);
Dave Allison0aded082013-11-07 13:15:11 -0800351
352 CHECK_PTHREAD_CALL(pthread_create, (&profiler_pthread_, nullptr, &RunProfilerThread,
353 reinterpret_cast<void*>(profiler_)),
354 "Profiler thread");
355 }
Calin Juravlec1b643c2014-05-30 23:44:11 +0100356 return true;
Dave Allison0aded082013-11-07 13:15:11 -0800357}
358
359
360
361void BackgroundMethodSamplingProfiler::Stop() {
362 BackgroundMethodSamplingProfiler* profiler = nullptr;
363 pthread_t profiler_pthread = 0U;
364 {
365 MutexLock trace_mu(Thread::Current(), *Locks::profiler_lock_);
Wei Jin6a586912014-05-21 16:07:40 -0700366 CHECK(!shutting_down_);
Dave Allison0aded082013-11-07 13:15:11 -0800367 profiler = profiler_;
368 shutting_down_ = true;
369 profiler_pthread = profiler_pthread_;
370 }
371
372 // Now wake up the sampler thread if it sleeping.
373 {
374 MutexLock profile_mu(Thread::Current(), profiler->wait_lock_);
375 profiler->period_condition_.Signal(Thread::Current());
376 }
377 // Wait for the sample thread to stop.
378 CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profiler thread shutdown");
379
380 {
381 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
382 profiler_ = nullptr;
383 }
384 delete profiler;
385}
386
387
388void BackgroundMethodSamplingProfiler::Shutdown() {
389 Stop();
390}
391
Calin Juravlec1b643c2014-05-30 23:44:11 +0100392BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(
393 const std::string& output_filename, const ProfilerOptions& options)
394 : output_filename_(output_filename),
395 options_(options),
Dave Allison0aded082013-11-07 13:15:11 -0800396 wait_lock_("Profile wait lock"),
397 period_condition_("Profile condition", wait_lock_),
398 profile_table_(wait_lock_),
399 profiler_barrier_(new Barrier(0)) {
400 // Populate the filtered_methods set.
401 // This is empty right now, but to add a method, do this:
402 //
403 // filtered_methods_.insert("void java.lang.Object.wait(long, int)");
404}
405
Wei Jin445220d2014-06-20 15:56:53 -0700406// Filter out methods the profiler doesn't want to record.
407// We require mutator lock since some statistics will be updated here.
408bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) {
Dave Allison0aded082013-11-07 13:15:11 -0800409 if (method == nullptr) {
410 profile_table_.NullMethod();
411 // Don't record a nullptr method.
Wei Jin445220d2014-06-20 15:56:53 -0700412 return false;
Dave Allison0aded082013-11-07 13:15:11 -0800413 }
414
415 mirror::Class* cls = method->GetDeclaringClass();
416 if (cls != nullptr) {
417 if (cls->GetClassLoader() == nullptr) {
418 // Don't include things in the boot
419 profile_table_.BootMethod();
Wei Jin445220d2014-06-20 15:56:53 -0700420 return false;
Dave Allison0aded082013-11-07 13:15:11 -0800421 }
422 }
423
424 bool is_filtered = false;
425
Mathieu Chartierbfd9a432014-05-21 17:43:44 -0700426 if (strcmp(method->GetName(), "<clinit>") == 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800427 // always filter out class init
428 is_filtered = true;
429 }
430
431 // Filter out methods by name if there are any.
432 if (!is_filtered && filtered_methods_.size() > 0) {
433 std::string method_full_name = PrettyMethod(method);
434
435 // Don't include specific filtered methods.
436 is_filtered = filtered_methods_.count(method_full_name) != 0;
437 }
Wei Jin445220d2014-06-20 15:56:53 -0700438 return !is_filtered;
439}
Dave Allison0aded082013-11-07 13:15:11 -0800440
Wei Jin445220d2014-06-20 15:56:53 -0700441// A method has been hit, record its invocation in the method map.
442// The mutator_lock must be held (shared) when this is called.
443void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method) {
Dave Allison0aded082013-11-07 13:15:11 -0800444 // Add to the profile table unless it is filtered out.
Wei Jin445220d2014-06-20 15:56:53 -0700445 if (ProcessMethod(method)) {
446 profile_table_.Put(method);
447 }
448}
449
450// Record the current bounded stack into sampling results.
451void BackgroundMethodSamplingProfiler::RecordStack(const std::vector<InstructionLocation>& stack) {
452 if (stack.size() == 0) {
453 return;
454 }
455 // Get the method on top of the stack. We use this method to perform filtering.
456 mirror::ArtMethod* method = stack.front().first;
457 if (ProcessMethod(method)) {
458 profile_table_.PutStack(stack);
Dave Allison0aded082013-11-07 13:15:11 -0800459 }
460}
461
462// Clean out any recordings for the method traces.
463void BackgroundMethodSamplingProfiler::CleanProfile() {
464 profile_table_.Clear();
465}
466
467uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700468 return profile_table_.Write(os, options_.GetProfileType());
Dave Allison0aded082013-11-07 13:15:11 -0800469}
470
471// Profile Table.
472// This holds a mapping of mirror::ArtMethod* to a count of how many times a sample
473// hit it at the top of the stack.
474ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0),
475 num_null_methods_(0),
476 num_boot_methods_(0) {
477 for (int i = 0; i < kHashSize; i++) {
478 table[i] = nullptr;
479 }
Wei Jin445220d2014-06-20 15:56:53 -0700480 method_context_table = nullptr;
481 stack_trie_root_ = nullptr;
Dave Allison0aded082013-11-07 13:15:11 -0800482}
483
484ProfileSampleResults::~ProfileSampleResults() {
Wei Jina93b0bb2014-06-09 16:19:15 -0700485 Clear();
Dave Allison0aded082013-11-07 13:15:11 -0800486}
487
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100488// Add a method to the profile table. If it's the first time the method
Dave Allison0aded082013-11-07 13:15:11 -0800489// has been seen, add it with count=1, otherwise increment the count.
490void ProfileSampleResults::Put(mirror::ArtMethod* method) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700491 MutexLock mu(Thread::Current(), lock_);
Dave Allison0aded082013-11-07 13:15:11 -0800492 uint32_t index = Hash(method);
493 if (table[index] == nullptr) {
494 table[index] = new Map();
495 }
496 Map::iterator i = table[index]->find(method);
497 if (i == table[index]->end()) {
498 (*table[index])[method] = 1;
499 } else {
500 i->second++;
501 }
502 num_samples_++;
Wei Jina93b0bb2014-06-09 16:19:15 -0700503}
504
Wei Jin445220d2014-06-20 15:56:53 -0700505// Add a bounded stack to the profile table. Only the count of the method on
506// top of the frame will be increased.
507void ProfileSampleResults::PutStack(const std::vector<InstructionLocation>& stack) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700508 MutexLock mu(Thread::Current(), lock_);
Wei Jin445220d2014-06-20 15:56:53 -0700509 ScopedObjectAccess soa(Thread::Current());
510 if (stack_trie_root_ == nullptr) {
511 // The root of the stack trie is a dummy node so that we don't have to maintain
512 // a collection of tries.
513 stack_trie_root_ = new StackTrieNode();
Wei Jina93b0bb2014-06-09 16:19:15 -0700514 }
Wei Jin445220d2014-06-20 15:56:53 -0700515
516 StackTrieNode* current = stack_trie_root_;
517 if (stack.size() == 0) {
518 current->IncreaseCount();
519 return;
520 }
521
522 for (std::vector<InstructionLocation>::const_reverse_iterator iter = stack.rbegin();
523 iter != stack.rend(); ++iter) {
524 InstructionLocation inst_loc = *iter;
525 mirror::ArtMethod* method = inst_loc.first;
526 if (method == nullptr) {
527 // skip null method
528 continue;
529 }
530 uint32_t dex_pc = inst_loc.second;
531 uint32_t method_idx = method->GetDexMethodIndex();
532 const DexFile* dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
533 MethodReference method_ref(dex_file, method_idx);
534 StackTrieNode* child = current->FindChild(method_ref, dex_pc);
535 if (child != nullptr) {
536 current = child;
Wei Jina93b0bb2014-06-09 16:19:15 -0700537 } else {
Wei Jin445220d2014-06-20 15:56:53 -0700538 uint32_t method_size = 0;
539 const DexFile::CodeItem* codeitem = method->GetCodeItem();
540 if (codeitem != nullptr) {
541 method_size = codeitem->insns_size_in_code_units_;
542 }
543 StackTrieNode* new_node = new StackTrieNode(method_ref, dex_pc, method_size, current);
544 current->AppendChild(new_node);
545 current = new_node;
Wei Jina93b0bb2014-06-09 16:19:15 -0700546 }
547 }
Wei Jin445220d2014-06-20 15:56:53 -0700548
549 if (current != stack_trie_root_ && current->GetCount() == 0) {
550 // Insert into method_context table;
551 if (method_context_table == nullptr) {
552 method_context_table = new MethodContextMap();
553 }
554 MethodReference method = current->GetMethod();
555 MethodContextMap::iterator i = method_context_table->find(method);
556 if (i == method_context_table->end()) {
557 TrieNodeSet* node_set = new TrieNodeSet();
558 node_set->insert(current);
559 (*method_context_table)[method] = node_set;
560 } else {
561 TrieNodeSet* node_set = i->second;
562 node_set->insert(current);
563 }
564 }
565 current->IncreaseCount();
Wei Jina93b0bb2014-06-09 16:19:15 -0700566 num_samples_++;
Dave Allison0aded082013-11-07 13:15:11 -0800567}
568
Dave Allison39c3bfb2014-01-28 18:33:52 -0800569// Write the profile table to the output stream. Also merge with the previous profile.
Wei Jina93b0bb2014-06-09 16:19:15 -0700570uint32_t ProfileSampleResults::Write(std::ostream& os, ProfileDataType type) {
Dave Allison0aded082013-11-07 13:15:11 -0800571 ScopedObjectAccess soa(Thread::Current());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800572 num_samples_ += previous_num_samples_;
573 num_null_methods_ += previous_num_null_methods_;
574 num_boot_methods_ += previous_num_boot_methods_;
575
Calin Juravlec1b643c2014-05-30 23:44:11 +0100576 VLOG(profiler) << "Profile: "
577 << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_;
Dave Allison0aded082013-11-07 13:15:11 -0800578 os << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_ << "\n";
579 uint32_t num_methods = 0;
Wei Jina93b0bb2014-06-09 16:19:15 -0700580 if (type == kProfilerMethod) {
581 for (int i = 0 ; i < kHashSize; i++) {
582 Map *map = table[i];
583 if (map != nullptr) {
584 for (const auto &meth_iter : *map) {
585 mirror::ArtMethod *method = meth_iter.first;
586 std::string method_name = PrettyMethod(method);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800587
Wei Jina93b0bb2014-06-09 16:19:15 -0700588 const DexFile::CodeItem* codeitem = method->GetCodeItem();
589 uint32_t method_size = 0;
590 if (codeitem != nullptr) {
591 method_size = codeitem->insns_size_in_code_units_;
592 }
593 uint32_t count = meth_iter.second;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800594
Wei Jina93b0bb2014-06-09 16:19:15 -0700595 // Merge this profile entry with one from a previous run (if present). Also
596 // remove the previous entry.
597 PreviousProfile::iterator pi = previous_.find(method_name);
598 if (pi != previous_.end()) {
599 count += pi->second.count_;
600 previous_.erase(pi);
601 }
602 os << StringPrintf("%s/%u/%u\n", method_name.c_str(), count, method_size);
603 ++num_methods;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800604 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700605 }
606 }
Wei Jin445220d2014-06-20 15:56:53 -0700607 } else if (type == kProfilerBoundedStack) {
608 if (method_context_table != nullptr) {
609 for (const auto &method_iter : *method_context_table) {
610 MethodReference method = method_iter.first;
611 TrieNodeSet* node_set = method_iter.second;
612 std::string method_name = PrettyMethod(method.dex_method_index, *(method.dex_file));
613 uint32_t method_size = 0;
614 uint32_t total_count = 0;
615 PreviousContextMap new_context_map;
616 for (const auto &trie_node_i : *node_set) {
617 StackTrieNode* node = trie_node_i;
618 method_size = node->GetMethodSize();
619 uint32_t count = node->GetCount();
620 uint32_t dexpc = node->GetDexPC();
621 total_count += count;
Wei Jina93b0bb2014-06-09 16:19:15 -0700622
Wei Jin445220d2014-06-20 15:56:53 -0700623 StackTrieNode* current = node->GetParent();
624 // We go backward on the trie to retrieve context and dex_pc until the dummy root.
625 // The format of the context is "method_1@pc_1@method_2@pc_2@..."
626 std::vector<std::string> context_vector;
627 while (current != nullptr && current->GetParent() != nullptr) {
628 context_vector.push_back(StringPrintf("%s@%u",
629 PrettyMethod(current->GetMethod().dex_method_index, *(current->GetMethod().dex_file)).c_str(),
630 current->GetDexPC()));
631 current = current->GetParent();
Wei Jina93b0bb2014-06-09 16:19:15 -0700632 }
Wei Jin445220d2014-06-20 15:56:53 -0700633 std::string context_sig = Join(context_vector, '@');
634 new_context_map[std::make_pair(dexpc, context_sig)] = count;
635 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700636
Wei Jin445220d2014-06-20 15:56:53 -0700637 PreviousProfile::iterator pi = previous_.find(method_name);
638 if (pi != previous_.end()) {
639 total_count += pi->second.count_;
640 PreviousContextMap* previous_context_map = pi->second.context_map_;
641 if (previous_context_map != nullptr) {
642 for (const auto &context_i : *previous_context_map) {
643 uint32_t count = context_i.second;
644 PreviousContextMap::iterator ci = new_context_map.find(context_i.first);
645 if (ci == new_context_map.end()) {
646 new_context_map[context_i.first] = count;
647 } else {
648 ci->second += count;
Wei Jina93b0bb2014-06-09 16:19:15 -0700649 }
650 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700651 }
Wei Jin445220d2014-06-20 15:56:53 -0700652 delete previous_context_map;
653 previous_.erase(pi);
Wei Jina93b0bb2014-06-09 16:19:15 -0700654 }
Wei Jin445220d2014-06-20 15:56:53 -0700655 // We write out profile data with dex pc and context information in the following format:
656 // "method/total_count/size/[pc_1:count_1:context_1#pc_2:count_2:context_2#...]".
657 std::vector<std::string> context_count_vector;
658 for (const auto &context_i : new_context_map) {
659 context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
660 context_i.second, context_i.first.second.c_str()));
661 }
662 os << StringPrintf("%s/%u/%u/[%s]\n", method_name.c_str(), total_count,
663 method_size, Join(context_count_vector, '#').c_str());
664 ++num_methods;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800665 }
Dave Allison0aded082013-11-07 13:15:11 -0800666 }
667 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800668
669 // Now we write out the remaining previous methods.
Wei Jina93b0bb2014-06-09 16:19:15 -0700670 for (const auto &pi : previous_) {
671 if (type == kProfilerMethod) {
672 os << StringPrintf("%s/%u/%u\n", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
Wei Jin445220d2014-06-20 15:56:53 -0700673 } else if (type == kProfilerBoundedStack) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700674 os << StringPrintf("%s/%u/%u/[", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
Wei Jin445220d2014-06-20 15:56:53 -0700675 PreviousContextMap* previous_context_map = pi.second.context_map_;
676 if (previous_context_map != nullptr) {
677 std::vector<std::string> context_count_vector;
678 for (const auto &context_i : *previous_context_map) {
679 context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
680 context_i.second, context_i.first.second.c_str()));
Wei Jina93b0bb2014-06-09 16:19:15 -0700681 }
Wei Jin445220d2014-06-20 15:56:53 -0700682 os << Join(context_count_vector, '#');
Wei Jina93b0bb2014-06-09 16:19:15 -0700683 }
684 os << "]\n";
685 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800686 ++num_methods;
687 }
Dave Allison0aded082013-11-07 13:15:11 -0800688 return num_methods;
689}
690
691void ProfileSampleResults::Clear() {
692 num_samples_ = 0;
693 num_null_methods_ = 0;
694 num_boot_methods_ = 0;
695 for (int i = 0; i < kHashSize; i++) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700696 delete table[i];
697 table[i] = nullptr;
Wei Jin445220d2014-06-20 15:56:53 -0700698 }
699 if (stack_trie_root_ != nullptr) {
700 stack_trie_root_->DeleteChildren();
701 delete stack_trie_root_;
702 stack_trie_root_ = nullptr;
703 if (method_context_table != nullptr) {
704 delete method_context_table;
705 method_context_table = nullptr;
Wei Jina93b0bb2014-06-09 16:19:15 -0700706 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700707 }
708 for (auto &pi : previous_) {
Wei Jin445220d2014-06-20 15:56:53 -0700709 if (pi.second.context_map_ != nullptr) {
710 delete pi.second.context_map_;
711 pi.second.context_map_ = nullptr;
712 }
Dave Allison0aded082013-11-07 13:15:11 -0800713 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800714 previous_.clear();
Dave Allison0aded082013-11-07 13:15:11 -0800715}
716
717uint32_t ProfileSampleResults::Hash(mirror::ArtMethod* method) {
Ian Rogersef7d42f2014-01-06 12:55:46 -0800718 return (PointerToLowMemUInt32(method) >> 3) % kHashSize;
Dave Allison0aded082013-11-07 13:15:11 -0800719}
720
Dave Allison39c3bfb2014-01-28 18:33:52 -0800721// Read a single line into the given string. Returns true if everything OK, false
722// on EOF or error.
723static bool ReadProfileLine(int fd, std::string& line) {
724 char buf[4];
725 line.clear();
726 while (true) {
727 int n = read(fd, buf, 1); // TODO: could speed this up but is it worth it?
728 if (n != 1) {
729 return false;
730 }
731 if (buf[0] == '\n') {
732 break;
733 }
734 line += buf[0];
735 }
736 return true;
737}
738
Wei Jina93b0bb2014-06-09 16:19:15 -0700739void ProfileSampleResults::ReadPrevious(int fd, ProfileDataType type) {
Dave Allison39c3bfb2014-01-28 18:33:52 -0800740 // Reset counters.
741 previous_num_samples_ = previous_num_null_methods_ = previous_num_boot_methods_ = 0;
742
743 std::string line;
744
745 // The first line contains summary information.
746 if (!ReadProfileLine(fd, line)) {
747 return;
748 }
749 std::vector<std::string> summary_info;
750 Split(line, '/', summary_info);
751 if (summary_info.size() != 3) {
752 // Bad summary info. It should be count/nullcount/bootcount
753 return;
754 }
755 previous_num_samples_ = atoi(summary_info[0].c_str());
756 previous_num_null_methods_ = atoi(summary_info[1].c_str());
757 previous_num_boot_methods_ = atoi(summary_info[2].c_str());
758
Wei Jina93b0bb2014-06-09 16:19:15 -0700759 // Now read each line until the end of file. Each line consists of 3 or 4 fields separated by /
Dave Allison39c3bfb2014-01-28 18:33:52 -0800760 while (true) {
761 if (!ReadProfileLine(fd, line)) {
762 break;
763 }
764 std::vector<std::string> info;
765 Split(line, '/', info);
Wei Jina93b0bb2014-06-09 16:19:15 -0700766 if (info.size() != 3 && info.size() != 4) {
Dave Allison39c3bfb2014-01-28 18:33:52 -0800767 // Malformed.
768 break;
769 }
770 std::string methodname = info[0];
Wei Jina93b0bb2014-06-09 16:19:15 -0700771 uint32_t total_count = atoi(info[1].c_str());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800772 uint32_t size = atoi(info[2].c_str());
Wei Jin445220d2014-06-20 15:56:53 -0700773 PreviousContextMap* context_map = nullptr;
774 if (type == kProfilerBoundedStack && info.size() == 4) {
775 context_map = new PreviousContextMap();
776 std::string context_counts_str = info[3].substr(1, info[3].size() - 2);
777 std::vector<std::string> context_count_pairs;
778 Split(context_counts_str, '#', context_count_pairs);
779 for (uint32_t i = 0; i < context_count_pairs.size(); ++i) {
780 std::vector<std::string> context_count;
781 Split(context_count_pairs[i], ':', context_count);
782 if (context_count.size() == 2) {
783 // Handles the situtation when the profile file doesn't contain context information.
784 uint32_t dexpc = atoi(context_count[0].c_str());
785 uint32_t count = atoi(context_count[1].c_str());
786 (*context_map)[std::make_pair(dexpc, "")] = count;
787 } else {
788 // Handles the situtation when the profile file contains context information.
789 uint32_t dexpc = atoi(context_count[0].c_str());
790 uint32_t count = atoi(context_count[1].c_str());
791 std::string context = context_count[2];
792 (*context_map)[std::make_pair(dexpc, context)] = count;
793 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700794 }
795 }
Wei Jin445220d2014-06-20 15:56:53 -0700796 previous_[methodname] = PreviousValue(total_count, size, context_map);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800797 }
798}
Dave Allison0aded082013-11-07 13:15:11 -0800799
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100800bool ProfileFile::LoadFile(const std::string& fileName) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300801 LOG(VERBOSE) << "reading profile file " << fileName;
802 struct stat st;
803 int err = stat(fileName.c_str(), &st);
804 if (err == -1) {
805 LOG(VERBOSE) << "not found";
806 return false;
807 }
808 if (st.st_size == 0) {
Dave Allison644789f2014-04-10 13:06:10 -0700809 return false; // Empty profiles are invalid.
Calin Juravle9dae5b42014-04-07 16:36:21 +0300810 }
811 std::ifstream in(fileName.c_str());
812 if (!in) {
813 LOG(VERBOSE) << "profile file " << fileName << " exists but can't be opened";
814 LOG(VERBOSE) << "file owner: " << st.st_uid << ":" << st.st_gid;
815 LOG(VERBOSE) << "me: " << getuid() << ":" << getgid();
816 LOG(VERBOSE) << "file permissions: " << std::oct << st.st_mode;
817 LOG(VERBOSE) << "errno: " << errno;
818 return false;
819 }
820 // The first line contains summary information.
821 std::string line;
822 std::getline(in, line);
823 if (in.eof()) {
824 return false;
825 }
826 std::vector<std::string> summary_info;
827 Split(line, '/', summary_info);
828 if (summary_info.size() != 3) {
Calin Juravle19477a82014-06-06 12:24:21 +0100829 // Bad summary info. It should be total/null/boot.
Calin Juravle9dae5b42014-04-07 16:36:21 +0300830 return false;
831 }
Calin Juravle19477a82014-06-06 12:24:21 +0100832 // This is the number of hits in all profiled methods (without nullptr or boot methods)
833 uint32_t total_count = atoi(summary_info[0].c_str());
Calin Juravle9dae5b42014-04-07 16:36:21 +0300834
835 // Now read each line until the end of file. Each line consists of 3 fields separated by '/'.
836 // Store the info in descending order given by the most used methods.
837 typedef std::set<std::pair<int, std::vector<std::string>>> ProfileSet;
838 ProfileSet countSet;
839 while (!in.eof()) {
840 std::getline(in, line);
841 if (in.eof()) {
842 break;
843 }
844 std::vector<std::string> info;
845 Split(line, '/', info);
Wei Jina93b0bb2014-06-09 16:19:15 -0700846 if (info.size() != 3 && info.size() != 4) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300847 // Malformed.
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100848 return false;
Calin Juravle9dae5b42014-04-07 16:36:21 +0300849 }
850 int count = atoi(info[1].c_str());
851 countSet.insert(std::make_pair(-count, info));
852 }
853
854 uint32_t curTotalCount = 0;
855 ProfileSet::iterator end = countSet.end();
856 const ProfileData* prevData = nullptr;
857 for (ProfileSet::iterator it = countSet.begin(); it != end ; it++) {
858 const std::string& methodname = it->second[0];
859 uint32_t count = -it->first;
860 uint32_t size = atoi(it->second[2].c_str());
861 double usedPercent = (count * 100.0) / total_count;
862
863 curTotalCount += count;
864 // Methods with the same count should be part of the same top K percentage bucket.
865 double topKPercentage = (prevData != nullptr) && (prevData->GetCount() == count)
866 ? prevData->GetTopKUsedPercentage()
867 : 100 * static_cast<double>(curTotalCount) / static_cast<double>(total_count);
868
869 // Add it to the profile map.
870 ProfileData curData = ProfileData(methodname, count, size, usedPercent, topKPercentage);
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100871 profile_map_[methodname] = curData;
Calin Juravle9dae5b42014-04-07 16:36:21 +0300872 prevData = &curData;
873 }
874 return true;
875}
876
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100877bool ProfileFile::GetProfileData(ProfileFile::ProfileData* data, const std::string& method_name) {
878 ProfileMap::iterator i = profile_map_.find(method_name);
879 if (i == profile_map_.end()) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300880 return false;
881 }
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100882 *data = i->second;
883 return true;
884}
885
886bool ProfileFile::GetTopKSamples(std::set<std::string>& topKSamples, double topKPercentage) {
887 ProfileMap::iterator end = profile_map_.end();
888 for (ProfileMap::iterator it = profile_map_.begin(); it != end; it++) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300889 if (it->second.GetTopKUsedPercentage() < topKPercentage) {
890 topKSamples.insert(it->first);
891 }
892 }
893 return true;
894}
895
Wei Jin445220d2014-06-20 15:56:53 -0700896StackTrieNode* StackTrieNode::FindChild(MethodReference method, uint32_t dex_pc) {
897 if (children_.size() == 0) {
898 return nullptr;
899 }
900 // Create a dummy node for searching.
901 StackTrieNode* node = new StackTrieNode(method, dex_pc, 0, nullptr);
902 std::set<StackTrieNode*, StackTrieNodeComparator>::iterator i = children_.find(node);
903 delete node;
904 return (i == children_.end()) ? nullptr : *i;
905}
906
907void StackTrieNode::DeleteChildren() {
908 for (auto &child : children_) {
909 if (child != nullptr) {
910 child->DeleteChildren();
911 delete child;
912 }
913 }
914}
915
Calin Juravle9dae5b42014-04-07 16:36:21 +0300916} // namespace art