blob: a6a2475d4b1621a6da1ce55d28b5c1bb3f06f81d [file] [log] [blame]
Dave Allison0aded082013-11-07 13:15:11 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "profiler.h"
18
Calin Juravle9dae5b42014-04-07 16:36:21 +030019#include <fstream>
Dave Allison0aded082013-11-07 13:15:11 -080020#include <sys/uio.h>
Dave Allison39c3bfb2014-01-28 18:33:52 -080021#include <sys/file.h>
Dave Allison0aded082013-11-07 13:15:11 -080022
23#include "base/stl_util.h"
24#include "base/unix_file/fd_file.h"
25#include "class_linker.h"
26#include "common_throws.h"
27#include "debugger.h"
28#include "dex_file-inl.h"
29#include "instrumentation.h"
30#include "mirror/art_method-inl.h"
31#include "mirror/class-inl.h"
32#include "mirror/dex_cache.h"
33#include "mirror/object_array-inl.h"
34#include "mirror/object-inl.h"
Dave Allison0aded082013-11-07 13:15:11 -080035#include "os.h"
36#include "scoped_thread_state_change.h"
37#include "ScopedLocalRef.h"
38#include "thread.h"
39#include "thread_list.h"
Dave Allison4a7867b2014-01-30 17:44:12 -080040
41#ifdef HAVE_ANDROID_OS
42#include "cutils/properties.h"
43#endif
44
Dave Allison0aded082013-11-07 13:15:11 -080045#if !defined(ART_USE_PORTABLE_COMPILER)
46#include "entrypoints/quick/quick_entrypoints.h"
47#endif
48
49namespace art {
50
51BackgroundMethodSamplingProfiler* BackgroundMethodSamplingProfiler::profiler_ = nullptr;
52pthread_t BackgroundMethodSamplingProfiler::profiler_pthread_ = 0U;
53volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false;
54
Dave Allison0aded082013-11-07 13:15:11 -080055// TODO: this profiler runs regardless of the state of the machine. Maybe we should use the
56// wakelock or something to modify the run characteristics. This can be done when we
57// have some performance data after it's been used for a while.
58
Wei Jin445220d2014-06-20 15:56:53 -070059// Walk through the method within depth of max_depth_ on the Java stack
60class BoundedStackVisitor : public StackVisitor {
61 public:
62 BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack,
63 Thread* thread, uint32_t max_depth)
64 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
65 : StackVisitor(thread, NULL), stack_(stack), max_depth_(max_depth), depth_(0) {
66 }
67
68 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
69 mirror::ArtMethod* m = GetMethod();
70 if (m->IsRuntimeMethod()) {
71 return true;
72 }
73 uint32_t dex_pc_ = GetDexPc();
74 stack_->push_back(std::make_pair(m, dex_pc_));
75 ++depth_;
76 if (depth_ < max_depth_) {
77 return true;
78 } else {
79 return false;
80 }
81 }
82
83 private:
84 std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack_;
85 const uint32_t max_depth_;
86 uint32_t depth_;
87};
Dave Allison0aded082013-11-07 13:15:11 -080088
89// This is called from either a thread list traversal or from a checkpoint. Regardless
90// of which caller, the mutator lock must be held.
91static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
92 BackgroundMethodSamplingProfiler* profiler =
93 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
Wei Jin445220d2014-06-20 15:56:53 -070094 const ProfilerOptions profile_options = profiler->GetProfilerOptions();
95 switch (profile_options.GetProfileType()) {
96 case kProfilerMethod: {
97 mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
98 if (false && method == nullptr) {
99 LOG(INFO) << "No current method available";
100 std::ostringstream os;
101 thread->Dump(os);
102 std::string data(os.str());
103 LOG(INFO) << data;
104 }
105 profiler->RecordMethod(method);
106 break;
107 }
108 case kProfilerBoundedStack: {
109 std::vector<InstructionLocation> stack;
110 uint32_t max_depth = profile_options.GetMaxStackDepth();
111 BoundedStackVisitor bounded_stack_visitor(&stack, thread, max_depth);
112 bounded_stack_visitor.WalkStack();
113 profiler->RecordStack(stack);
114 break;
115 }
116 default:
117 LOG(INFO) << "This profile type is not implemented.";
Dave Allison0aded082013-11-07 13:15:11 -0800118 }
Dave Allison0aded082013-11-07 13:15:11 -0800119}
120
Dave Allison0aded082013-11-07 13:15:11 -0800121// A closure that is called by the thread checkpoint code.
122class SampleCheckpoint : public Closure {
123 public:
124 explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
125 profiler_(profiler) {}
126
127 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
128 Thread* self = Thread::Current();
129 if (thread == nullptr) {
130 LOG(ERROR) << "Checkpoint with nullptr thread";
131 return;
132 }
133
134 // Grab the mutator lock (shared access).
135 ScopedObjectAccess soa(self);
136
137 // Grab a sample.
138 GetSample(thread, this->profiler_);
139
140 // And finally tell the barrier that we're done.
141 this->profiler_->GetBarrier().Pass(self);
142 }
143
144 private:
145 BackgroundMethodSamplingProfiler* const profiler_;
146};
147
148bool BackgroundMethodSamplingProfiler::ShuttingDown(Thread* self) {
149 MutexLock mu(self, *Locks::profiler_lock_);
150 return shutting_down_;
151}
152
153void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
154 Runtime* runtime = Runtime::Current();
155 BackgroundMethodSamplingProfiler* profiler =
156 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
157
158 // Add a random delay for the first time run so that we don't hammer the CPU
159 // with all profiles running at the same time.
160 const int kRandomDelayMaxSecs = 30;
161 const double kMaxBackoffSecs = 24*60*60; // Max backoff time.
162
163 srand(MicroTime() * getpid());
164 int startup_delay = rand() % kRandomDelayMaxSecs; // random delay for startup.
165
166
167 CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
168 !runtime->IsCompiler()));
169
170 Thread* self = Thread::Current();
171
Calin Juravlec1b643c2014-05-30 23:44:11 +0100172 double backoff = 1.0;
Dave Allison0aded082013-11-07 13:15:11 -0800173 while (true) {
174 if (ShuttingDown(self)) {
175 break;
176 }
177
178 {
179 // wait until we need to run another profile
Calin Juravlec1b643c2014-05-30 23:44:11 +0100180 uint64_t delay_secs = profiler->options_.GetPeriodS() * backoff;
Dave Allison0aded082013-11-07 13:15:11 -0800181
182 // Add a startup delay to prevent all the profiles running at once.
183 delay_secs += startup_delay;
184
185 // Immediate startup for benchmarking?
Calin Juravlec1b643c2014-05-30 23:44:11 +0100186 if (profiler->options_.GetStartImmediately() && startup_delay > 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800187 delay_secs = 0;
188 }
189
190 startup_delay = 0;
191
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700192 VLOG(profiler) << "Delaying profile start for " << delay_secs << " secs";
Dave Allison0aded082013-11-07 13:15:11 -0800193 MutexLock mu(self, profiler->wait_lock_);
194 profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
195
196 // Expand the backoff by its coefficient, but don't go beyond the max.
Calin Juravlec1b643c2014-05-30 23:44:11 +0100197 backoff = std::min(backoff * profiler->options_.GetBackoffCoefficient(), kMaxBackoffSecs);
Dave Allison0aded082013-11-07 13:15:11 -0800198 }
199
200 if (ShuttingDown(self)) {
201 break;
202 }
203
204
205 uint64_t start_us = MicroTime();
Calin Juravlec1b643c2014-05-30 23:44:11 +0100206 uint64_t end_us = start_us + profiler->options_.GetDurationS() * UINT64_C(1000000);
Dave Allison0aded082013-11-07 13:15:11 -0800207 uint64_t now_us = start_us;
208
Calin Juravlec1b643c2014-05-30 23:44:11 +0100209 VLOG(profiler) << "Starting profiling run now for "
210 << PrettyDuration((end_us - start_us) * 1000);
Dave Allison0aded082013-11-07 13:15:11 -0800211
212 SampleCheckpoint check_point(profiler);
213
Dave Allison39c3bfb2014-01-28 18:33:52 -0800214 size_t valid_samples = 0;
Dave Allison0aded082013-11-07 13:15:11 -0800215 while (now_us < end_us) {
216 if (ShuttingDown(self)) {
217 break;
218 }
219
Calin Juravlec1b643c2014-05-30 23:44:11 +0100220 usleep(profiler->options_.GetIntervalUs()); // Non-interruptible sleep.
Dave Allison0aded082013-11-07 13:15:11 -0800221
222 ThreadList* thread_list = runtime->GetThreadList();
223
224 profiler->profiler_barrier_->Init(self, 0);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800225 size_t barrier_count = thread_list->RunCheckpointOnRunnableThreads(&check_point);
226
227 // All threads are suspended, nothing to do.
228 if (barrier_count == 0) {
229 now_us = MicroTime();
230 continue;
231 }
232
233 valid_samples += barrier_count;
Dave Allison0aded082013-11-07 13:15:11 -0800234
Wei Jin6a586912014-05-21 16:07:40 -0700235 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
Dave Allison0aded082013-11-07 13:15:11 -0800236
237 // Wait for the barrier to be crossed by all runnable threads. This wait
238 // is done with a timeout so that we can detect problems with the checkpoint
239 // running code. We should never see this.
240 const uint32_t kWaitTimeoutMs = 10000;
241 const uint32_t kWaitTimeoutUs = kWaitTimeoutMs * 1000;
242
243 uint64_t waitstart_us = MicroTime();
244 // Wait for all threads to pass the barrier.
245 profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
246 uint64_t waitend_us = MicroTime();
247 uint64_t waitdiff_us = waitend_us - waitstart_us;
248
249 // We should never get a timeout. If we do, it suggests a problem with the checkpoint
250 // code. Crash the process in this case.
251 CHECK_LT(waitdiff_us, kWaitTimeoutUs);
252
Dave Allison0aded082013-11-07 13:15:11 -0800253 // Update the current time.
254 now_us = MicroTime();
255 }
256
Wei Jin6a586912014-05-21 16:07:40 -0700257 if (valid_samples > 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800258 // After the profile has been taken, write it out.
259 ScopedObjectAccess soa(self); // Acquire the mutator lock.
260 uint32_t size = profiler->WriteProfile();
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700261 VLOG(profiler) << "Profile size: " << size;
Dave Allison0aded082013-11-07 13:15:11 -0800262 }
263 }
264
265 LOG(INFO) << "Profiler shutdown";
266 runtime->DetachCurrentThread();
267 return nullptr;
268}
269
270// Write out the profile file if we are generating a profile.
271uint32_t BackgroundMethodSamplingProfiler::WriteProfile() {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100272 std::string full_name = output_filename_;
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700273 VLOG(profiler) << "Saving profile to " << full_name;
Dave Allison0aded082013-11-07 13:15:11 -0800274
Dave Allison39c3bfb2014-01-28 18:33:52 -0800275 int fd = open(full_name.c_str(), O_RDWR);
276 if (fd < 0) {
277 // Open failed.
278 LOG(ERROR) << "Failed to open profile file " << full_name;
Dave Allison0aded082013-11-07 13:15:11 -0800279 return 0;
280 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800281
282 // Lock the file for exclusive access. This will block if another process is using
283 // the file.
284 int err = flock(fd, LOCK_EX);
285 if (err < 0) {
286 LOG(ERROR) << "Failed to lock profile file " << full_name;
287 return 0;
288 }
289
290 // Read the previous profile.
Wei Jina93b0bb2014-06-09 16:19:15 -0700291 profile_table_.ReadPrevious(fd, options_.GetProfileType());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800292
293 // Move back to the start of the file.
294 lseek(fd, 0, SEEK_SET);
295
296 // Format the profile output and write to the file.
Dave Allison0aded082013-11-07 13:15:11 -0800297 std::ostringstream os;
298 uint32_t num_methods = DumpProfile(os);
299 std::string data(os.str());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800300 const char *p = data.c_str();
301 size_t length = data.length();
302 size_t full_length = length;
303 do {
304 int n = ::write(fd, p, length);
305 p += n;
306 length -= n;
307 } while (length > 0);
308
309 // Truncate the file to the new length.
310 ftruncate(fd, full_length);
311
312 // Now unlock the file, allowing another process in.
313 err = flock(fd, LOCK_UN);
314 if (err < 0) {
315 LOG(ERROR) << "Failed to unlock profile file " << full_name;
316 }
317
318 // Done, close the file.
319 ::close(fd);
320
321 // Clean the profile for the next time.
322 CleanProfile();
323
Dave Allison0aded082013-11-07 13:15:11 -0800324 return num_methods;
325}
326
Calin Juravlec1b643c2014-05-30 23:44:11 +0100327bool BackgroundMethodSamplingProfiler::Start(
328 const std::string& output_filename, const ProfilerOptions& options) {
329 if (!options.IsEnabled()) {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100330 return false;
331 }
332
333 CHECK(!output_filename.empty());
334
Dave Allison0aded082013-11-07 13:15:11 -0800335 Thread* self = Thread::Current();
336 {
337 MutexLock mu(self, *Locks::profiler_lock_);
338 // Don't start two profiler threads.
339 if (profiler_ != nullptr) {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100340 return true;
Dave Allison0aded082013-11-07 13:15:11 -0800341 }
342 }
343
Calin Juravlec1b643c2014-05-30 23:44:11 +0100344 LOG(INFO) << "Starting profiler using output file: " << output_filename
345 << " and options: " << options;
Dave Allison0aded082013-11-07 13:15:11 -0800346 {
347 MutexLock mu(self, *Locks::profiler_lock_);
Calin Juravlec1b643c2014-05-30 23:44:11 +0100348 profiler_ = new BackgroundMethodSamplingProfiler(output_filename, options);
Dave Allison0aded082013-11-07 13:15:11 -0800349
350 CHECK_PTHREAD_CALL(pthread_create, (&profiler_pthread_, nullptr, &RunProfilerThread,
351 reinterpret_cast<void*>(profiler_)),
352 "Profiler thread");
353 }
Calin Juravlec1b643c2014-05-30 23:44:11 +0100354 return true;
Dave Allison0aded082013-11-07 13:15:11 -0800355}
356
357
358
359void BackgroundMethodSamplingProfiler::Stop() {
360 BackgroundMethodSamplingProfiler* profiler = nullptr;
361 pthread_t profiler_pthread = 0U;
362 {
363 MutexLock trace_mu(Thread::Current(), *Locks::profiler_lock_);
Wei Jin6a586912014-05-21 16:07:40 -0700364 CHECK(!shutting_down_);
Dave Allison0aded082013-11-07 13:15:11 -0800365 profiler = profiler_;
366 shutting_down_ = true;
367 profiler_pthread = profiler_pthread_;
368 }
369
370 // Now wake up the sampler thread if it sleeping.
371 {
372 MutexLock profile_mu(Thread::Current(), profiler->wait_lock_);
373 profiler->period_condition_.Signal(Thread::Current());
374 }
375 // Wait for the sample thread to stop.
376 CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profiler thread shutdown");
377
378 {
379 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
380 profiler_ = nullptr;
381 }
382 delete profiler;
383}
384
385
386void BackgroundMethodSamplingProfiler::Shutdown() {
387 Stop();
388}
389
Calin Juravlec1b643c2014-05-30 23:44:11 +0100390BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(
391 const std::string& output_filename, const ProfilerOptions& options)
392 : output_filename_(output_filename),
393 options_(options),
Dave Allison0aded082013-11-07 13:15:11 -0800394 wait_lock_("Profile wait lock"),
395 period_condition_("Profile condition", wait_lock_),
396 profile_table_(wait_lock_),
397 profiler_barrier_(new Barrier(0)) {
398 // Populate the filtered_methods set.
399 // This is empty right now, but to add a method, do this:
400 //
401 // filtered_methods_.insert("void java.lang.Object.wait(long, int)");
402}
403
Wei Jin445220d2014-06-20 15:56:53 -0700404// Filter out methods the profiler doesn't want to record.
405// We require mutator lock since some statistics will be updated here.
406bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) {
Dave Allison0aded082013-11-07 13:15:11 -0800407 if (method == nullptr) {
408 profile_table_.NullMethod();
409 // Don't record a nullptr method.
Wei Jin445220d2014-06-20 15:56:53 -0700410 return false;
Dave Allison0aded082013-11-07 13:15:11 -0800411 }
412
413 mirror::Class* cls = method->GetDeclaringClass();
414 if (cls != nullptr) {
415 if (cls->GetClassLoader() == nullptr) {
416 // Don't include things in the boot
417 profile_table_.BootMethod();
Wei Jin445220d2014-06-20 15:56:53 -0700418 return false;
Dave Allison0aded082013-11-07 13:15:11 -0800419 }
420 }
421
422 bool is_filtered = false;
423
Mathieu Chartierbfd9a432014-05-21 17:43:44 -0700424 if (strcmp(method->GetName(), "<clinit>") == 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800425 // always filter out class init
426 is_filtered = true;
427 }
428
429 // Filter out methods by name if there are any.
430 if (!is_filtered && filtered_methods_.size() > 0) {
431 std::string method_full_name = PrettyMethod(method);
432
433 // Don't include specific filtered methods.
434 is_filtered = filtered_methods_.count(method_full_name) != 0;
435 }
Wei Jin445220d2014-06-20 15:56:53 -0700436 return !is_filtered;
437}
Dave Allison0aded082013-11-07 13:15:11 -0800438
Wei Jin445220d2014-06-20 15:56:53 -0700439// A method has been hit, record its invocation in the method map.
440// The mutator_lock must be held (shared) when this is called.
441void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method) {
Dave Allison0aded082013-11-07 13:15:11 -0800442 // Add to the profile table unless it is filtered out.
Wei Jin445220d2014-06-20 15:56:53 -0700443 if (ProcessMethod(method)) {
444 profile_table_.Put(method);
445 }
446}
447
448// Record the current bounded stack into sampling results.
449void BackgroundMethodSamplingProfiler::RecordStack(const std::vector<InstructionLocation>& stack) {
450 if (stack.size() == 0) {
451 return;
452 }
453 // Get the method on top of the stack. We use this method to perform filtering.
454 mirror::ArtMethod* method = stack.front().first;
455 if (ProcessMethod(method)) {
456 profile_table_.PutStack(stack);
Dave Allison0aded082013-11-07 13:15:11 -0800457 }
458}
459
460// Clean out any recordings for the method traces.
461void BackgroundMethodSamplingProfiler::CleanProfile() {
462 profile_table_.Clear();
463}
464
465uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700466 return profile_table_.Write(os, options_.GetProfileType());
Dave Allison0aded082013-11-07 13:15:11 -0800467}
468
469// Profile Table.
470// This holds a mapping of mirror::ArtMethod* to a count of how many times a sample
471// hit it at the top of the stack.
472ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0),
473 num_null_methods_(0),
474 num_boot_methods_(0) {
475 for (int i = 0; i < kHashSize; i++) {
476 table[i] = nullptr;
477 }
Wei Jin445220d2014-06-20 15:56:53 -0700478 method_context_table = nullptr;
479 stack_trie_root_ = nullptr;
Dave Allison0aded082013-11-07 13:15:11 -0800480}
481
482ProfileSampleResults::~ProfileSampleResults() {
Wei Jina93b0bb2014-06-09 16:19:15 -0700483 Clear();
Dave Allison0aded082013-11-07 13:15:11 -0800484}
485
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100486// Add a method to the profile table. If it's the first time the method
Dave Allison0aded082013-11-07 13:15:11 -0800487// has been seen, add it with count=1, otherwise increment the count.
488void ProfileSampleResults::Put(mirror::ArtMethod* method) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700489 MutexLock mu(Thread::Current(), lock_);
Dave Allison0aded082013-11-07 13:15:11 -0800490 uint32_t index = Hash(method);
491 if (table[index] == nullptr) {
492 table[index] = new Map();
493 }
494 Map::iterator i = table[index]->find(method);
495 if (i == table[index]->end()) {
496 (*table[index])[method] = 1;
497 } else {
498 i->second++;
499 }
500 num_samples_++;
Wei Jina93b0bb2014-06-09 16:19:15 -0700501}
502
Wei Jin445220d2014-06-20 15:56:53 -0700503// Add a bounded stack to the profile table. Only the count of the method on
504// top of the frame will be increased.
505void ProfileSampleResults::PutStack(const std::vector<InstructionLocation>& stack) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700506 MutexLock mu(Thread::Current(), lock_);
Wei Jin445220d2014-06-20 15:56:53 -0700507 ScopedObjectAccess soa(Thread::Current());
508 if (stack_trie_root_ == nullptr) {
509 // The root of the stack trie is a dummy node so that we don't have to maintain
510 // a collection of tries.
511 stack_trie_root_ = new StackTrieNode();
Wei Jina93b0bb2014-06-09 16:19:15 -0700512 }
Wei Jin445220d2014-06-20 15:56:53 -0700513
514 StackTrieNode* current = stack_trie_root_;
515 if (stack.size() == 0) {
516 current->IncreaseCount();
517 return;
518 }
519
520 for (std::vector<InstructionLocation>::const_reverse_iterator iter = stack.rbegin();
521 iter != stack.rend(); ++iter) {
522 InstructionLocation inst_loc = *iter;
523 mirror::ArtMethod* method = inst_loc.first;
524 if (method == nullptr) {
525 // skip null method
526 continue;
527 }
528 uint32_t dex_pc = inst_loc.second;
529 uint32_t method_idx = method->GetDexMethodIndex();
530 const DexFile* dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
531 MethodReference method_ref(dex_file, method_idx);
532 StackTrieNode* child = current->FindChild(method_ref, dex_pc);
533 if (child != nullptr) {
534 current = child;
Wei Jina93b0bb2014-06-09 16:19:15 -0700535 } else {
Wei Jin445220d2014-06-20 15:56:53 -0700536 uint32_t method_size = 0;
537 const DexFile::CodeItem* codeitem = method->GetCodeItem();
538 if (codeitem != nullptr) {
539 method_size = codeitem->insns_size_in_code_units_;
540 }
541 StackTrieNode* new_node = new StackTrieNode(method_ref, dex_pc, method_size, current);
542 current->AppendChild(new_node);
543 current = new_node;
Wei Jina93b0bb2014-06-09 16:19:15 -0700544 }
545 }
Wei Jin445220d2014-06-20 15:56:53 -0700546
547 if (current != stack_trie_root_ && current->GetCount() == 0) {
548 // Insert into method_context table;
549 if (method_context_table == nullptr) {
550 method_context_table = new MethodContextMap();
551 }
552 MethodReference method = current->GetMethod();
553 MethodContextMap::iterator i = method_context_table->find(method);
554 if (i == method_context_table->end()) {
555 TrieNodeSet* node_set = new TrieNodeSet();
556 node_set->insert(current);
557 (*method_context_table)[method] = node_set;
558 } else {
559 TrieNodeSet* node_set = i->second;
560 node_set->insert(current);
561 }
562 }
563 current->IncreaseCount();
Wei Jina93b0bb2014-06-09 16:19:15 -0700564 num_samples_++;
Dave Allison0aded082013-11-07 13:15:11 -0800565}
566
Dave Allison39c3bfb2014-01-28 18:33:52 -0800567// Write the profile table to the output stream. Also merge with the previous profile.
Wei Jina93b0bb2014-06-09 16:19:15 -0700568uint32_t ProfileSampleResults::Write(std::ostream& os, ProfileDataType type) {
Dave Allison0aded082013-11-07 13:15:11 -0800569 ScopedObjectAccess soa(Thread::Current());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800570 num_samples_ += previous_num_samples_;
571 num_null_methods_ += previous_num_null_methods_;
572 num_boot_methods_ += previous_num_boot_methods_;
573
Calin Juravlec1b643c2014-05-30 23:44:11 +0100574 VLOG(profiler) << "Profile: "
575 << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_;
Dave Allison0aded082013-11-07 13:15:11 -0800576 os << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_ << "\n";
577 uint32_t num_methods = 0;
Wei Jina93b0bb2014-06-09 16:19:15 -0700578 if (type == kProfilerMethod) {
579 for (int i = 0 ; i < kHashSize; i++) {
580 Map *map = table[i];
581 if (map != nullptr) {
582 for (const auto &meth_iter : *map) {
583 mirror::ArtMethod *method = meth_iter.first;
584 std::string method_name = PrettyMethod(method);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800585
Wei Jina93b0bb2014-06-09 16:19:15 -0700586 const DexFile::CodeItem* codeitem = method->GetCodeItem();
587 uint32_t method_size = 0;
588 if (codeitem != nullptr) {
589 method_size = codeitem->insns_size_in_code_units_;
590 }
591 uint32_t count = meth_iter.second;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800592
Wei Jina93b0bb2014-06-09 16:19:15 -0700593 // Merge this profile entry with one from a previous run (if present). Also
594 // remove the previous entry.
595 PreviousProfile::iterator pi = previous_.find(method_name);
596 if (pi != previous_.end()) {
597 count += pi->second.count_;
598 previous_.erase(pi);
599 }
600 os << StringPrintf("%s/%u/%u\n", method_name.c_str(), count, method_size);
601 ++num_methods;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800602 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700603 }
604 }
Wei Jin445220d2014-06-20 15:56:53 -0700605 } else if (type == kProfilerBoundedStack) {
606 if (method_context_table != nullptr) {
607 for (const auto &method_iter : *method_context_table) {
608 MethodReference method = method_iter.first;
609 TrieNodeSet* node_set = method_iter.second;
610 std::string method_name = PrettyMethod(method.dex_method_index, *(method.dex_file));
611 uint32_t method_size = 0;
612 uint32_t total_count = 0;
613 PreviousContextMap new_context_map;
614 for (const auto &trie_node_i : *node_set) {
615 StackTrieNode* node = trie_node_i;
616 method_size = node->GetMethodSize();
617 uint32_t count = node->GetCount();
618 uint32_t dexpc = node->GetDexPC();
619 total_count += count;
Wei Jina93b0bb2014-06-09 16:19:15 -0700620
Wei Jin445220d2014-06-20 15:56:53 -0700621 StackTrieNode* current = node->GetParent();
622 // We go backward on the trie to retrieve context and dex_pc until the dummy root.
623 // The format of the context is "method_1@pc_1@method_2@pc_2@..."
624 std::vector<std::string> context_vector;
625 while (current != nullptr && current->GetParent() != nullptr) {
626 context_vector.push_back(StringPrintf("%s@%u",
627 PrettyMethod(current->GetMethod().dex_method_index, *(current->GetMethod().dex_file)).c_str(),
628 current->GetDexPC()));
629 current = current->GetParent();
Wei Jina93b0bb2014-06-09 16:19:15 -0700630 }
Wei Jin445220d2014-06-20 15:56:53 -0700631 std::string context_sig = Join(context_vector, '@');
632 new_context_map[std::make_pair(dexpc, context_sig)] = count;
633 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700634
Wei Jin445220d2014-06-20 15:56:53 -0700635 PreviousProfile::iterator pi = previous_.find(method_name);
636 if (pi != previous_.end()) {
637 total_count += pi->second.count_;
638 PreviousContextMap* previous_context_map = pi->second.context_map_;
639 if (previous_context_map != nullptr) {
640 for (const auto &context_i : *previous_context_map) {
641 uint32_t count = context_i.second;
642 PreviousContextMap::iterator ci = new_context_map.find(context_i.first);
643 if (ci == new_context_map.end()) {
644 new_context_map[context_i.first] = count;
645 } else {
646 ci->second += count;
Wei Jina93b0bb2014-06-09 16:19:15 -0700647 }
648 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700649 }
Wei Jin445220d2014-06-20 15:56:53 -0700650 delete previous_context_map;
651 previous_.erase(pi);
Wei Jina93b0bb2014-06-09 16:19:15 -0700652 }
Wei Jin445220d2014-06-20 15:56:53 -0700653 // We write out profile data with dex pc and context information in the following format:
654 // "method/total_count/size/[pc_1:count_1:context_1#pc_2:count_2:context_2#...]".
655 std::vector<std::string> context_count_vector;
656 for (const auto &context_i : new_context_map) {
657 context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
658 context_i.second, context_i.first.second.c_str()));
659 }
660 os << StringPrintf("%s/%u/%u/[%s]\n", method_name.c_str(), total_count,
661 method_size, Join(context_count_vector, '#').c_str());
662 ++num_methods;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800663 }
Dave Allison0aded082013-11-07 13:15:11 -0800664 }
665 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800666
667 // Now we write out the remaining previous methods.
Wei Jina93b0bb2014-06-09 16:19:15 -0700668 for (const auto &pi : previous_) {
669 if (type == kProfilerMethod) {
670 os << StringPrintf("%s/%u/%u\n", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
Wei Jin445220d2014-06-20 15:56:53 -0700671 } else if (type == kProfilerBoundedStack) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700672 os << StringPrintf("%s/%u/%u/[", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
Wei Jin445220d2014-06-20 15:56:53 -0700673 PreviousContextMap* previous_context_map = pi.second.context_map_;
674 if (previous_context_map != nullptr) {
675 std::vector<std::string> context_count_vector;
676 for (const auto &context_i : *previous_context_map) {
677 context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
678 context_i.second, context_i.first.second.c_str()));
Wei Jina93b0bb2014-06-09 16:19:15 -0700679 }
Wei Jin445220d2014-06-20 15:56:53 -0700680 os << Join(context_count_vector, '#');
Wei Jina93b0bb2014-06-09 16:19:15 -0700681 }
682 os << "]\n";
683 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800684 ++num_methods;
685 }
Dave Allison0aded082013-11-07 13:15:11 -0800686 return num_methods;
687}
688
689void ProfileSampleResults::Clear() {
690 num_samples_ = 0;
691 num_null_methods_ = 0;
692 num_boot_methods_ = 0;
693 for (int i = 0; i < kHashSize; i++) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700694 delete table[i];
695 table[i] = nullptr;
Wei Jin445220d2014-06-20 15:56:53 -0700696 }
697 if (stack_trie_root_ != nullptr) {
698 stack_trie_root_->DeleteChildren();
699 delete stack_trie_root_;
700 stack_trie_root_ = nullptr;
701 if (method_context_table != nullptr) {
702 delete method_context_table;
703 method_context_table = nullptr;
Wei Jina93b0bb2014-06-09 16:19:15 -0700704 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700705 }
706 for (auto &pi : previous_) {
Wei Jin445220d2014-06-20 15:56:53 -0700707 if (pi.second.context_map_ != nullptr) {
708 delete pi.second.context_map_;
709 pi.second.context_map_ = nullptr;
710 }
Dave Allison0aded082013-11-07 13:15:11 -0800711 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800712 previous_.clear();
Dave Allison0aded082013-11-07 13:15:11 -0800713}
714
715uint32_t ProfileSampleResults::Hash(mirror::ArtMethod* method) {
Ian Rogersef7d42f2014-01-06 12:55:46 -0800716 return (PointerToLowMemUInt32(method) >> 3) % kHashSize;
Dave Allison0aded082013-11-07 13:15:11 -0800717}
718
Dave Allison39c3bfb2014-01-28 18:33:52 -0800719// Read a single line into the given string. Returns true if everything OK, false
720// on EOF or error.
721static bool ReadProfileLine(int fd, std::string& line) {
722 char buf[4];
723 line.clear();
724 while (true) {
725 int n = read(fd, buf, 1); // TODO: could speed this up but is it worth it?
726 if (n != 1) {
727 return false;
728 }
729 if (buf[0] == '\n') {
730 break;
731 }
732 line += buf[0];
733 }
734 return true;
735}
736
Wei Jina93b0bb2014-06-09 16:19:15 -0700737void ProfileSampleResults::ReadPrevious(int fd, ProfileDataType type) {
Dave Allison39c3bfb2014-01-28 18:33:52 -0800738 // Reset counters.
739 previous_num_samples_ = previous_num_null_methods_ = previous_num_boot_methods_ = 0;
740
741 std::string line;
742
743 // The first line contains summary information.
744 if (!ReadProfileLine(fd, line)) {
745 return;
746 }
747 std::vector<std::string> summary_info;
748 Split(line, '/', summary_info);
749 if (summary_info.size() != 3) {
750 // Bad summary info. It should be count/nullcount/bootcount
751 return;
752 }
Wei Jinf21f0a92014-06-27 17:44:18 -0700753 previous_num_samples_ = strtoul(summary_info[0].c_str(), nullptr, 10);
754 previous_num_null_methods_ = strtoul(summary_info[1].c_str(), nullptr, 10);
755 previous_num_boot_methods_ = strtoul(summary_info[2].c_str(), nullptr, 10);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800756
Wei Jina93b0bb2014-06-09 16:19:15 -0700757 // Now read each line until the end of file. Each line consists of 3 or 4 fields separated by /
Dave Allison39c3bfb2014-01-28 18:33:52 -0800758 while (true) {
759 if (!ReadProfileLine(fd, line)) {
760 break;
761 }
762 std::vector<std::string> info;
763 Split(line, '/', info);
Wei Jina93b0bb2014-06-09 16:19:15 -0700764 if (info.size() != 3 && info.size() != 4) {
Dave Allison39c3bfb2014-01-28 18:33:52 -0800765 // Malformed.
766 break;
767 }
768 std::string methodname = info[0];
Wei Jinf21f0a92014-06-27 17:44:18 -0700769 uint32_t total_count = strtoul(info[1].c_str(), nullptr, 10);
770 uint32_t size = strtoul(info[2].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700771 PreviousContextMap* context_map = nullptr;
772 if (type == kProfilerBoundedStack && info.size() == 4) {
773 context_map = new PreviousContextMap();
774 std::string context_counts_str = info[3].substr(1, info[3].size() - 2);
775 std::vector<std::string> context_count_pairs;
776 Split(context_counts_str, '#', context_count_pairs);
777 for (uint32_t i = 0; i < context_count_pairs.size(); ++i) {
778 std::vector<std::string> context_count;
779 Split(context_count_pairs[i], ':', context_count);
780 if (context_count.size() == 2) {
781 // Handles the situtation when the profile file doesn't contain context information.
Wei Jinf21f0a92014-06-27 17:44:18 -0700782 uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
783 uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700784 (*context_map)[std::make_pair(dexpc, "")] = count;
785 } else {
786 // Handles the situtation when the profile file contains context information.
Wei Jinf21f0a92014-06-27 17:44:18 -0700787 uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
788 uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700789 std::string context = context_count[2];
790 (*context_map)[std::make_pair(dexpc, context)] = count;
791 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700792 }
793 }
Wei Jin445220d2014-06-20 15:56:53 -0700794 previous_[methodname] = PreviousValue(total_count, size, context_map);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800795 }
796}
Dave Allison0aded082013-11-07 13:15:11 -0800797
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100798bool ProfileFile::LoadFile(const std::string& fileName) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300799 LOG(VERBOSE) << "reading profile file " << fileName;
800 struct stat st;
801 int err = stat(fileName.c_str(), &st);
802 if (err == -1) {
803 LOG(VERBOSE) << "not found";
804 return false;
805 }
806 if (st.st_size == 0) {
Dave Allison644789f2014-04-10 13:06:10 -0700807 return false; // Empty profiles are invalid.
Calin Juravle9dae5b42014-04-07 16:36:21 +0300808 }
809 std::ifstream in(fileName.c_str());
810 if (!in) {
811 LOG(VERBOSE) << "profile file " << fileName << " exists but can't be opened";
812 LOG(VERBOSE) << "file owner: " << st.st_uid << ":" << st.st_gid;
813 LOG(VERBOSE) << "me: " << getuid() << ":" << getgid();
814 LOG(VERBOSE) << "file permissions: " << std::oct << st.st_mode;
815 LOG(VERBOSE) << "errno: " << errno;
816 return false;
817 }
818 // The first line contains summary information.
819 std::string line;
820 std::getline(in, line);
821 if (in.eof()) {
822 return false;
823 }
824 std::vector<std::string> summary_info;
825 Split(line, '/', summary_info);
826 if (summary_info.size() != 3) {
Calin Juravle19477a82014-06-06 12:24:21 +0100827 // Bad summary info. It should be total/null/boot.
Calin Juravle9dae5b42014-04-07 16:36:21 +0300828 return false;
829 }
Calin Juravle19477a82014-06-06 12:24:21 +0100830 // This is the number of hits in all profiled methods (without nullptr or boot methods)
Wei Jinf21f0a92014-06-27 17:44:18 -0700831 uint32_t total_count = strtoul(summary_info[0].c_str(), nullptr, 10);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300832
833 // Now read each line until the end of file. Each line consists of 3 fields separated by '/'.
834 // Store the info in descending order given by the most used methods.
835 typedef std::set<std::pair<int, std::vector<std::string>>> ProfileSet;
836 ProfileSet countSet;
837 while (!in.eof()) {
838 std::getline(in, line);
839 if (in.eof()) {
840 break;
841 }
842 std::vector<std::string> info;
843 Split(line, '/', info);
Wei Jina93b0bb2014-06-09 16:19:15 -0700844 if (info.size() != 3 && info.size() != 4) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300845 // Malformed.
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100846 return false;
Calin Juravle9dae5b42014-04-07 16:36:21 +0300847 }
848 int count = atoi(info[1].c_str());
849 countSet.insert(std::make_pair(-count, info));
850 }
851
852 uint32_t curTotalCount = 0;
853 ProfileSet::iterator end = countSet.end();
854 const ProfileData* prevData = nullptr;
855 for (ProfileSet::iterator it = countSet.begin(); it != end ; it++) {
856 const std::string& methodname = it->second[0];
857 uint32_t count = -it->first;
Wei Jinf21f0a92014-06-27 17:44:18 -0700858 uint32_t size = strtoul(it->second[2].c_str(), nullptr, 10);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300859 double usedPercent = (count * 100.0) / total_count;
860
861 curTotalCount += count;
862 // Methods with the same count should be part of the same top K percentage bucket.
863 double topKPercentage = (prevData != nullptr) && (prevData->GetCount() == count)
864 ? prevData->GetTopKUsedPercentage()
865 : 100 * static_cast<double>(curTotalCount) / static_cast<double>(total_count);
866
867 // Add it to the profile map.
868 ProfileData curData = ProfileData(methodname, count, size, usedPercent, topKPercentage);
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100869 profile_map_[methodname] = curData;
Calin Juravle9dae5b42014-04-07 16:36:21 +0300870 prevData = &curData;
871 }
872 return true;
873}
874
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100875bool ProfileFile::GetProfileData(ProfileFile::ProfileData* data, const std::string& method_name) {
876 ProfileMap::iterator i = profile_map_.find(method_name);
877 if (i == profile_map_.end()) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300878 return false;
879 }
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100880 *data = i->second;
881 return true;
882}
883
884bool ProfileFile::GetTopKSamples(std::set<std::string>& topKSamples, double topKPercentage) {
885 ProfileMap::iterator end = profile_map_.end();
886 for (ProfileMap::iterator it = profile_map_.begin(); it != end; it++) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300887 if (it->second.GetTopKUsedPercentage() < topKPercentage) {
888 topKSamples.insert(it->first);
889 }
890 }
891 return true;
892}
893
Wei Jin445220d2014-06-20 15:56:53 -0700894StackTrieNode* StackTrieNode::FindChild(MethodReference method, uint32_t dex_pc) {
895 if (children_.size() == 0) {
896 return nullptr;
897 }
898 // Create a dummy node for searching.
899 StackTrieNode* node = new StackTrieNode(method, dex_pc, 0, nullptr);
900 std::set<StackTrieNode*, StackTrieNodeComparator>::iterator i = children_.find(node);
901 delete node;
902 return (i == children_.end()) ? nullptr : *i;
903}
904
905void StackTrieNode::DeleteChildren() {
906 for (auto &child : children_) {
907 if (child != nullptr) {
908 child->DeleteChildren();
909 delete child;
910 }
911 }
912}
913
Calin Juravle9dae5b42014-04-07 16:36:21 +0300914} // namespace art