blob: e39919500840d0083f64d84b8ee33d536210c1c6 [file] [log] [blame]
Dave Allison0aded082013-11-07 13:15:11 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "profiler.h"
18
Dave Allison39c3bfb2014-01-28 18:33:52 -080019#include <sys/file.h>
Ian Rogers6f3dbba2014-10-14 17:41:57 -070020#include <sys/stat.h>
21#include <sys/uio.h>
22
23#include <fstream>
Dave Allison0aded082013-11-07 13:15:11 -080024
25#include "base/stl_util.h"
26#include "base/unix_file/fd_file.h"
27#include "class_linker.h"
28#include "common_throws.h"
29#include "debugger.h"
30#include "dex_file-inl.h"
31#include "instrumentation.h"
32#include "mirror/art_method-inl.h"
33#include "mirror/class-inl.h"
34#include "mirror/dex_cache.h"
35#include "mirror/object_array-inl.h"
36#include "mirror/object-inl.h"
Dave Allison0aded082013-11-07 13:15:11 -080037#include "os.h"
38#include "scoped_thread_state_change.h"
39#include "ScopedLocalRef.h"
40#include "thread.h"
41#include "thread_list.h"
Dave Allison4a7867b2014-01-30 17:44:12 -080042
43#ifdef HAVE_ANDROID_OS
44#include "cutils/properties.h"
45#endif
46
Dave Allison0aded082013-11-07 13:15:11 -080047#if !defined(ART_USE_PORTABLE_COMPILER)
48#include "entrypoints/quick/quick_entrypoints.h"
49#endif
50
51namespace art {
52
53BackgroundMethodSamplingProfiler* BackgroundMethodSamplingProfiler::profiler_ = nullptr;
54pthread_t BackgroundMethodSamplingProfiler::profiler_pthread_ = 0U;
55volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false;
56
Dave Allison0aded082013-11-07 13:15:11 -080057// TODO: this profiler runs regardless of the state of the machine. Maybe we should use the
58// wakelock or something to modify the run characteristics. This can be done when we
59// have some performance data after it's been used for a while.
60
Wei Jin445220d2014-06-20 15:56:53 -070061// Walk through the method within depth of max_depth_ on the Java stack
62class BoundedStackVisitor : public StackVisitor {
63 public:
64 BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack,
65 Thread* thread, uint32_t max_depth)
66 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
67 : StackVisitor(thread, NULL), stack_(stack), max_depth_(max_depth), depth_(0) {
68 }
69
70 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
71 mirror::ArtMethod* m = GetMethod();
72 if (m->IsRuntimeMethod()) {
73 return true;
74 }
75 uint32_t dex_pc_ = GetDexPc();
76 stack_->push_back(std::make_pair(m, dex_pc_));
77 ++depth_;
78 if (depth_ < max_depth_) {
79 return true;
80 } else {
81 return false;
82 }
83 }
84
85 private:
86 std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack_;
87 const uint32_t max_depth_;
88 uint32_t depth_;
89};
Dave Allison0aded082013-11-07 13:15:11 -080090
91// This is called from either a thread list traversal or from a checkpoint. Regardless
92// of which caller, the mutator lock must be held.
93static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
94 BackgroundMethodSamplingProfiler* profiler =
95 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
Wei Jin445220d2014-06-20 15:56:53 -070096 const ProfilerOptions profile_options = profiler->GetProfilerOptions();
97 switch (profile_options.GetProfileType()) {
98 case kProfilerMethod: {
99 mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
Ian Rogerscf7f1912014-10-22 22:06:39 -0700100 if ((false) && method == nullptr) {
Wei Jin445220d2014-06-20 15:56:53 -0700101 LOG(INFO) << "No current method available";
102 std::ostringstream os;
103 thread->Dump(os);
104 std::string data(os.str());
105 LOG(INFO) << data;
106 }
107 profiler->RecordMethod(method);
108 break;
109 }
110 case kProfilerBoundedStack: {
111 std::vector<InstructionLocation> stack;
112 uint32_t max_depth = profile_options.GetMaxStackDepth();
113 BoundedStackVisitor bounded_stack_visitor(&stack, thread, max_depth);
114 bounded_stack_visitor.WalkStack();
115 profiler->RecordStack(stack);
116 break;
117 }
118 default:
119 LOG(INFO) << "This profile type is not implemented.";
Dave Allison0aded082013-11-07 13:15:11 -0800120 }
Dave Allison0aded082013-11-07 13:15:11 -0800121}
122
Dave Allison0aded082013-11-07 13:15:11 -0800123// A closure that is called by the thread checkpoint code.
Ian Rogers7b078e82014-09-10 14:44:24 -0700124class SampleCheckpoint FINAL : public Closure {
Dave Allison0aded082013-11-07 13:15:11 -0800125 public:
126 explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
127 profiler_(profiler) {}
128
Ian Rogers7b078e82014-09-10 14:44:24 -0700129 void Run(Thread* thread) OVERRIDE {
Dave Allison0aded082013-11-07 13:15:11 -0800130 Thread* self = Thread::Current();
131 if (thread == nullptr) {
132 LOG(ERROR) << "Checkpoint with nullptr thread";
133 return;
134 }
135
136 // Grab the mutator lock (shared access).
137 ScopedObjectAccess soa(self);
138
139 // Grab a sample.
140 GetSample(thread, this->profiler_);
141
142 // And finally tell the barrier that we're done.
143 this->profiler_->GetBarrier().Pass(self);
144 }
145
146 private:
147 BackgroundMethodSamplingProfiler* const profiler_;
148};
149
150bool BackgroundMethodSamplingProfiler::ShuttingDown(Thread* self) {
151 MutexLock mu(self, *Locks::profiler_lock_);
152 return shutting_down_;
153}
154
155void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
156 Runtime* runtime = Runtime::Current();
157 BackgroundMethodSamplingProfiler* profiler =
158 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
159
160 // Add a random delay for the first time run so that we don't hammer the CPU
161 // with all profiles running at the same time.
162 const int kRandomDelayMaxSecs = 30;
163 const double kMaxBackoffSecs = 24*60*60; // Max backoff time.
164
165 srand(MicroTime() * getpid());
166 int startup_delay = rand() % kRandomDelayMaxSecs; // random delay for startup.
167
168
169 CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
170 !runtime->IsCompiler()));
171
172 Thread* self = Thread::Current();
173
Calin Juravlec1b643c2014-05-30 23:44:11 +0100174 double backoff = 1.0;
Dave Allison0aded082013-11-07 13:15:11 -0800175 while (true) {
176 if (ShuttingDown(self)) {
177 break;
178 }
179
180 {
181 // wait until we need to run another profile
Calin Juravlec1b643c2014-05-30 23:44:11 +0100182 uint64_t delay_secs = profiler->options_.GetPeriodS() * backoff;
Dave Allison0aded082013-11-07 13:15:11 -0800183
184 // Add a startup delay to prevent all the profiles running at once.
185 delay_secs += startup_delay;
186
187 // Immediate startup for benchmarking?
Calin Juravlec1b643c2014-05-30 23:44:11 +0100188 if (profiler->options_.GetStartImmediately() && startup_delay > 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800189 delay_secs = 0;
190 }
191
192 startup_delay = 0;
193
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700194 VLOG(profiler) << "Delaying profile start for " << delay_secs << " secs";
Dave Allison0aded082013-11-07 13:15:11 -0800195 MutexLock mu(self, profiler->wait_lock_);
196 profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
Ian Rogers7b078e82014-09-10 14:44:24 -0700197 // We were either signaled by Stop or timedout, in either case ignore the timed out result.
Dave Allison0aded082013-11-07 13:15:11 -0800198
199 // Expand the backoff by its coefficient, but don't go beyond the max.
Calin Juravlec1b643c2014-05-30 23:44:11 +0100200 backoff = std::min(backoff * profiler->options_.GetBackoffCoefficient(), kMaxBackoffSecs);
Dave Allison0aded082013-11-07 13:15:11 -0800201 }
202
203 if (ShuttingDown(self)) {
204 break;
205 }
206
207
208 uint64_t start_us = MicroTime();
Calin Juravlec1b643c2014-05-30 23:44:11 +0100209 uint64_t end_us = start_us + profiler->options_.GetDurationS() * UINT64_C(1000000);
Dave Allison0aded082013-11-07 13:15:11 -0800210 uint64_t now_us = start_us;
211
Calin Juravlec1b643c2014-05-30 23:44:11 +0100212 VLOG(profiler) << "Starting profiling run now for "
213 << PrettyDuration((end_us - start_us) * 1000);
Dave Allison0aded082013-11-07 13:15:11 -0800214
215 SampleCheckpoint check_point(profiler);
216
Dave Allison39c3bfb2014-01-28 18:33:52 -0800217 size_t valid_samples = 0;
Dave Allison0aded082013-11-07 13:15:11 -0800218 while (now_us < end_us) {
219 if (ShuttingDown(self)) {
220 break;
221 }
222
Calin Juravlec1b643c2014-05-30 23:44:11 +0100223 usleep(profiler->options_.GetIntervalUs()); // Non-interruptible sleep.
Dave Allison0aded082013-11-07 13:15:11 -0800224
225 ThreadList* thread_list = runtime->GetThreadList();
226
227 profiler->profiler_barrier_->Init(self, 0);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800228 size_t barrier_count = thread_list->RunCheckpointOnRunnableThreads(&check_point);
229
230 // All threads are suspended, nothing to do.
231 if (barrier_count == 0) {
232 now_us = MicroTime();
233 continue;
234 }
235
236 valid_samples += barrier_count;
Dave Allison0aded082013-11-07 13:15:11 -0800237
Wei Jin6a586912014-05-21 16:07:40 -0700238 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
Dave Allison0aded082013-11-07 13:15:11 -0800239
240 // Wait for the barrier to be crossed by all runnable threads. This wait
241 // is done with a timeout so that we can detect problems with the checkpoint
242 // running code. We should never see this.
243 const uint32_t kWaitTimeoutMs = 10000;
Dave Allison0aded082013-11-07 13:15:11 -0800244
Dave Allison0aded082013-11-07 13:15:11 -0800245 // Wait for all threads to pass the barrier.
Ian Rogers7b078e82014-09-10 14:44:24 -0700246 bool timed_out = profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
Dave Allison0aded082013-11-07 13:15:11 -0800247
248 // We should never get a timeout. If we do, it suggests a problem with the checkpoint
249 // code. Crash the process in this case.
Ian Rogers7b078e82014-09-10 14:44:24 -0700250 CHECK(!timed_out);
Dave Allison0aded082013-11-07 13:15:11 -0800251
Dave Allison0aded082013-11-07 13:15:11 -0800252 // Update the current time.
253 now_us = MicroTime();
254 }
255
Wei Jin6a586912014-05-21 16:07:40 -0700256 if (valid_samples > 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800257 // After the profile has been taken, write it out.
258 ScopedObjectAccess soa(self); // Acquire the mutator lock.
259 uint32_t size = profiler->WriteProfile();
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700260 VLOG(profiler) << "Profile size: " << size;
Dave Allison0aded082013-11-07 13:15:11 -0800261 }
262 }
263
264 LOG(INFO) << "Profiler shutdown";
265 runtime->DetachCurrentThread();
266 return nullptr;
267}
268
269// Write out the profile file if we are generating a profile.
270uint32_t BackgroundMethodSamplingProfiler::WriteProfile() {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100271 std::string full_name = output_filename_;
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700272 VLOG(profiler) << "Saving profile to " << full_name;
Dave Allison0aded082013-11-07 13:15:11 -0800273
Dave Allison39c3bfb2014-01-28 18:33:52 -0800274 int fd = open(full_name.c_str(), O_RDWR);
275 if (fd < 0) {
276 // Open failed.
277 LOG(ERROR) << "Failed to open profile file " << full_name;
Dave Allison0aded082013-11-07 13:15:11 -0800278 return 0;
279 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800280
281 // Lock the file for exclusive access. This will block if another process is using
282 // the file.
283 int err = flock(fd, LOCK_EX);
284 if (err < 0) {
285 LOG(ERROR) << "Failed to lock profile file " << full_name;
286 return 0;
287 }
288
289 // Read the previous profile.
Wei Jina93b0bb2014-06-09 16:19:15 -0700290 profile_table_.ReadPrevious(fd, options_.GetProfileType());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800291
292 // Move back to the start of the file.
293 lseek(fd, 0, SEEK_SET);
294
295 // Format the profile output and write to the file.
Dave Allison0aded082013-11-07 13:15:11 -0800296 std::ostringstream os;
297 uint32_t num_methods = DumpProfile(os);
298 std::string data(os.str());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800299 const char *p = data.c_str();
300 size_t length = data.length();
301 size_t full_length = length;
302 do {
303 int n = ::write(fd, p, length);
304 p += n;
305 length -= n;
306 } while (length > 0);
307
308 // Truncate the file to the new length.
309 ftruncate(fd, full_length);
310
311 // Now unlock the file, allowing another process in.
312 err = flock(fd, LOCK_UN);
313 if (err < 0) {
314 LOG(ERROR) << "Failed to unlock profile file " << full_name;
315 }
316
317 // Done, close the file.
318 ::close(fd);
319
320 // Clean the profile for the next time.
321 CleanProfile();
322
Dave Allison0aded082013-11-07 13:15:11 -0800323 return num_methods;
324}
325
Calin Juravlec1b643c2014-05-30 23:44:11 +0100326bool BackgroundMethodSamplingProfiler::Start(
327 const std::string& output_filename, const ProfilerOptions& options) {
328 if (!options.IsEnabled()) {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100329 return false;
330 }
331
332 CHECK(!output_filename.empty());
333
Dave Allison0aded082013-11-07 13:15:11 -0800334 Thread* self = Thread::Current();
335 {
336 MutexLock mu(self, *Locks::profiler_lock_);
337 // Don't start two profiler threads.
338 if (profiler_ != nullptr) {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100339 return true;
Dave Allison0aded082013-11-07 13:15:11 -0800340 }
341 }
342
Calin Juravlec1b643c2014-05-30 23:44:11 +0100343 LOG(INFO) << "Starting profiler using output file: " << output_filename
344 << " and options: " << options;
Dave Allison0aded082013-11-07 13:15:11 -0800345 {
346 MutexLock mu(self, *Locks::profiler_lock_);
Calin Juravlec1b643c2014-05-30 23:44:11 +0100347 profiler_ = new BackgroundMethodSamplingProfiler(output_filename, options);
Dave Allison0aded082013-11-07 13:15:11 -0800348
349 CHECK_PTHREAD_CALL(pthread_create, (&profiler_pthread_, nullptr, &RunProfilerThread,
350 reinterpret_cast<void*>(profiler_)),
351 "Profiler thread");
352 }
Calin Juravlec1b643c2014-05-30 23:44:11 +0100353 return true;
Dave Allison0aded082013-11-07 13:15:11 -0800354}
355
356
357
358void BackgroundMethodSamplingProfiler::Stop() {
359 BackgroundMethodSamplingProfiler* profiler = nullptr;
360 pthread_t profiler_pthread = 0U;
361 {
362 MutexLock trace_mu(Thread::Current(), *Locks::profiler_lock_);
Wei Jin6a586912014-05-21 16:07:40 -0700363 CHECK(!shutting_down_);
Dave Allison0aded082013-11-07 13:15:11 -0800364 profiler = profiler_;
365 shutting_down_ = true;
366 profiler_pthread = profiler_pthread_;
367 }
368
369 // Now wake up the sampler thread if it sleeping.
370 {
371 MutexLock profile_mu(Thread::Current(), profiler->wait_lock_);
372 profiler->period_condition_.Signal(Thread::Current());
373 }
374 // Wait for the sample thread to stop.
375 CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profiler thread shutdown");
376
377 {
378 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
379 profiler_ = nullptr;
380 }
381 delete profiler;
382}
383
384
385void BackgroundMethodSamplingProfiler::Shutdown() {
386 Stop();
387}
388
Calin Juravlec1b643c2014-05-30 23:44:11 +0100389BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(
390 const std::string& output_filename, const ProfilerOptions& options)
391 : output_filename_(output_filename),
392 options_(options),
Dave Allison0aded082013-11-07 13:15:11 -0800393 wait_lock_("Profile wait lock"),
394 period_condition_("Profile condition", wait_lock_),
395 profile_table_(wait_lock_),
396 profiler_barrier_(new Barrier(0)) {
397 // Populate the filtered_methods set.
398 // This is empty right now, but to add a method, do this:
399 //
400 // filtered_methods_.insert("void java.lang.Object.wait(long, int)");
401}
402
Wei Jin445220d2014-06-20 15:56:53 -0700403// Filter out methods the profiler doesn't want to record.
404// We require mutator lock since some statistics will be updated here.
405bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) {
Dave Allison0aded082013-11-07 13:15:11 -0800406 if (method == nullptr) {
407 profile_table_.NullMethod();
408 // Don't record a nullptr method.
Wei Jin445220d2014-06-20 15:56:53 -0700409 return false;
Dave Allison0aded082013-11-07 13:15:11 -0800410 }
411
412 mirror::Class* cls = method->GetDeclaringClass();
413 if (cls != nullptr) {
414 if (cls->GetClassLoader() == nullptr) {
415 // Don't include things in the boot
416 profile_table_.BootMethod();
Wei Jin445220d2014-06-20 15:56:53 -0700417 return false;
Dave Allison0aded082013-11-07 13:15:11 -0800418 }
419 }
420
421 bool is_filtered = false;
422
Mathieu Chartierbfd9a432014-05-21 17:43:44 -0700423 if (strcmp(method->GetName(), "<clinit>") == 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800424 // always filter out class init
425 is_filtered = true;
426 }
427
428 // Filter out methods by name if there are any.
429 if (!is_filtered && filtered_methods_.size() > 0) {
430 std::string method_full_name = PrettyMethod(method);
431
432 // Don't include specific filtered methods.
433 is_filtered = filtered_methods_.count(method_full_name) != 0;
434 }
Wei Jin445220d2014-06-20 15:56:53 -0700435 return !is_filtered;
436}
Dave Allison0aded082013-11-07 13:15:11 -0800437
Wei Jin445220d2014-06-20 15:56:53 -0700438// A method has been hit, record its invocation in the method map.
439// The mutator_lock must be held (shared) when this is called.
440void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method) {
Dave Allison0aded082013-11-07 13:15:11 -0800441 // Add to the profile table unless it is filtered out.
Wei Jin445220d2014-06-20 15:56:53 -0700442 if (ProcessMethod(method)) {
443 profile_table_.Put(method);
444 }
445}
446
447// Record the current bounded stack into sampling results.
448void BackgroundMethodSamplingProfiler::RecordStack(const std::vector<InstructionLocation>& stack) {
449 if (stack.size() == 0) {
450 return;
451 }
452 // Get the method on top of the stack. We use this method to perform filtering.
453 mirror::ArtMethod* method = stack.front().first;
454 if (ProcessMethod(method)) {
455 profile_table_.PutStack(stack);
Dave Allison0aded082013-11-07 13:15:11 -0800456 }
457}
458
459// Clean out any recordings for the method traces.
460void BackgroundMethodSamplingProfiler::CleanProfile() {
461 profile_table_.Clear();
462}
463
464uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700465 return profile_table_.Write(os, options_.GetProfileType());
Dave Allison0aded082013-11-07 13:15:11 -0800466}
467
468// Profile Table.
469// This holds a mapping of mirror::ArtMethod* to a count of how many times a sample
470// hit it at the top of the stack.
471ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0),
472 num_null_methods_(0),
473 num_boot_methods_(0) {
474 for (int i = 0; i < kHashSize; i++) {
475 table[i] = nullptr;
476 }
Wei Jin445220d2014-06-20 15:56:53 -0700477 method_context_table = nullptr;
478 stack_trie_root_ = nullptr;
Dave Allison0aded082013-11-07 13:15:11 -0800479}
480
481ProfileSampleResults::~ProfileSampleResults() {
Wei Jina93b0bb2014-06-09 16:19:15 -0700482 Clear();
Dave Allison0aded082013-11-07 13:15:11 -0800483}
484
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100485// Add a method to the profile table. If it's the first time the method
Dave Allison0aded082013-11-07 13:15:11 -0800486// has been seen, add it with count=1, otherwise increment the count.
487void ProfileSampleResults::Put(mirror::ArtMethod* method) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700488 MutexLock mu(Thread::Current(), lock_);
Dave Allison0aded082013-11-07 13:15:11 -0800489 uint32_t index = Hash(method);
490 if (table[index] == nullptr) {
491 table[index] = new Map();
492 }
493 Map::iterator i = table[index]->find(method);
494 if (i == table[index]->end()) {
495 (*table[index])[method] = 1;
496 } else {
497 i->second++;
498 }
499 num_samples_++;
Wei Jina93b0bb2014-06-09 16:19:15 -0700500}
501
Wei Jin445220d2014-06-20 15:56:53 -0700502// Add a bounded stack to the profile table. Only the count of the method on
503// top of the frame will be increased.
504void ProfileSampleResults::PutStack(const std::vector<InstructionLocation>& stack) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700505 MutexLock mu(Thread::Current(), lock_);
Wei Jin445220d2014-06-20 15:56:53 -0700506 ScopedObjectAccess soa(Thread::Current());
507 if (stack_trie_root_ == nullptr) {
508 // The root of the stack trie is a dummy node so that we don't have to maintain
509 // a collection of tries.
510 stack_trie_root_ = new StackTrieNode();
Wei Jina93b0bb2014-06-09 16:19:15 -0700511 }
Wei Jin445220d2014-06-20 15:56:53 -0700512
513 StackTrieNode* current = stack_trie_root_;
514 if (stack.size() == 0) {
515 current->IncreaseCount();
516 return;
517 }
518
519 for (std::vector<InstructionLocation>::const_reverse_iterator iter = stack.rbegin();
520 iter != stack.rend(); ++iter) {
521 InstructionLocation inst_loc = *iter;
522 mirror::ArtMethod* method = inst_loc.first;
523 if (method == nullptr) {
524 // skip null method
525 continue;
526 }
527 uint32_t dex_pc = inst_loc.second;
528 uint32_t method_idx = method->GetDexMethodIndex();
529 const DexFile* dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
530 MethodReference method_ref(dex_file, method_idx);
531 StackTrieNode* child = current->FindChild(method_ref, dex_pc);
532 if (child != nullptr) {
533 current = child;
Wei Jina93b0bb2014-06-09 16:19:15 -0700534 } else {
Wei Jin445220d2014-06-20 15:56:53 -0700535 uint32_t method_size = 0;
536 const DexFile::CodeItem* codeitem = method->GetCodeItem();
537 if (codeitem != nullptr) {
538 method_size = codeitem->insns_size_in_code_units_;
539 }
540 StackTrieNode* new_node = new StackTrieNode(method_ref, dex_pc, method_size, current);
541 current->AppendChild(new_node);
542 current = new_node;
Wei Jina93b0bb2014-06-09 16:19:15 -0700543 }
544 }
Wei Jin445220d2014-06-20 15:56:53 -0700545
546 if (current != stack_trie_root_ && current->GetCount() == 0) {
547 // Insert into method_context table;
548 if (method_context_table == nullptr) {
549 method_context_table = new MethodContextMap();
550 }
551 MethodReference method = current->GetMethod();
552 MethodContextMap::iterator i = method_context_table->find(method);
553 if (i == method_context_table->end()) {
554 TrieNodeSet* node_set = new TrieNodeSet();
555 node_set->insert(current);
556 (*method_context_table)[method] = node_set;
557 } else {
558 TrieNodeSet* node_set = i->second;
559 node_set->insert(current);
560 }
561 }
562 current->IncreaseCount();
Wei Jina93b0bb2014-06-09 16:19:15 -0700563 num_samples_++;
Dave Allison0aded082013-11-07 13:15:11 -0800564}
565
Dave Allison39c3bfb2014-01-28 18:33:52 -0800566// Write the profile table to the output stream. Also merge with the previous profile.
Wei Jina93b0bb2014-06-09 16:19:15 -0700567uint32_t ProfileSampleResults::Write(std::ostream& os, ProfileDataType type) {
Dave Allison0aded082013-11-07 13:15:11 -0800568 ScopedObjectAccess soa(Thread::Current());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800569 num_samples_ += previous_num_samples_;
570 num_null_methods_ += previous_num_null_methods_;
571 num_boot_methods_ += previous_num_boot_methods_;
572
Calin Juravlec1b643c2014-05-30 23:44:11 +0100573 VLOG(profiler) << "Profile: "
574 << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_;
Dave Allison0aded082013-11-07 13:15:11 -0800575 os << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_ << "\n";
576 uint32_t num_methods = 0;
Wei Jina93b0bb2014-06-09 16:19:15 -0700577 if (type == kProfilerMethod) {
578 for (int i = 0 ; i < kHashSize; i++) {
579 Map *map = table[i];
580 if (map != nullptr) {
581 for (const auto &meth_iter : *map) {
582 mirror::ArtMethod *method = meth_iter.first;
583 std::string method_name = PrettyMethod(method);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800584
Wei Jina93b0bb2014-06-09 16:19:15 -0700585 const DexFile::CodeItem* codeitem = method->GetCodeItem();
586 uint32_t method_size = 0;
587 if (codeitem != nullptr) {
588 method_size = codeitem->insns_size_in_code_units_;
589 }
590 uint32_t count = meth_iter.second;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800591
Wei Jina93b0bb2014-06-09 16:19:15 -0700592 // Merge this profile entry with one from a previous run (if present). Also
593 // remove the previous entry.
594 PreviousProfile::iterator pi = previous_.find(method_name);
595 if (pi != previous_.end()) {
596 count += pi->second.count_;
597 previous_.erase(pi);
598 }
599 os << StringPrintf("%s/%u/%u\n", method_name.c_str(), count, method_size);
600 ++num_methods;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800601 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700602 }
603 }
Wei Jin445220d2014-06-20 15:56:53 -0700604 } else if (type == kProfilerBoundedStack) {
605 if (method_context_table != nullptr) {
606 for (const auto &method_iter : *method_context_table) {
607 MethodReference method = method_iter.first;
608 TrieNodeSet* node_set = method_iter.second;
609 std::string method_name = PrettyMethod(method.dex_method_index, *(method.dex_file));
610 uint32_t method_size = 0;
611 uint32_t total_count = 0;
612 PreviousContextMap new_context_map;
613 for (const auto &trie_node_i : *node_set) {
614 StackTrieNode* node = trie_node_i;
615 method_size = node->GetMethodSize();
616 uint32_t count = node->GetCount();
617 uint32_t dexpc = node->GetDexPC();
618 total_count += count;
Wei Jina93b0bb2014-06-09 16:19:15 -0700619
Wei Jin445220d2014-06-20 15:56:53 -0700620 StackTrieNode* current = node->GetParent();
621 // We go backward on the trie to retrieve context and dex_pc until the dummy root.
622 // The format of the context is "method_1@pc_1@method_2@pc_2@..."
623 std::vector<std::string> context_vector;
624 while (current != nullptr && current->GetParent() != nullptr) {
625 context_vector.push_back(StringPrintf("%s@%u",
626 PrettyMethod(current->GetMethod().dex_method_index, *(current->GetMethod().dex_file)).c_str(),
627 current->GetDexPC()));
628 current = current->GetParent();
Wei Jina93b0bb2014-06-09 16:19:15 -0700629 }
Wei Jin445220d2014-06-20 15:56:53 -0700630 std::string context_sig = Join(context_vector, '@');
631 new_context_map[std::make_pair(dexpc, context_sig)] = count;
632 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700633
Wei Jin445220d2014-06-20 15:56:53 -0700634 PreviousProfile::iterator pi = previous_.find(method_name);
635 if (pi != previous_.end()) {
636 total_count += pi->second.count_;
637 PreviousContextMap* previous_context_map = pi->second.context_map_;
638 if (previous_context_map != nullptr) {
639 for (const auto &context_i : *previous_context_map) {
640 uint32_t count = context_i.second;
641 PreviousContextMap::iterator ci = new_context_map.find(context_i.first);
642 if (ci == new_context_map.end()) {
643 new_context_map[context_i.first] = count;
644 } else {
645 ci->second += count;
Wei Jina93b0bb2014-06-09 16:19:15 -0700646 }
647 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700648 }
Wei Jin445220d2014-06-20 15:56:53 -0700649 delete previous_context_map;
650 previous_.erase(pi);
Wei Jina93b0bb2014-06-09 16:19:15 -0700651 }
Wei Jin445220d2014-06-20 15:56:53 -0700652 // We write out profile data with dex pc and context information in the following format:
653 // "method/total_count/size/[pc_1:count_1:context_1#pc_2:count_2:context_2#...]".
654 std::vector<std::string> context_count_vector;
655 for (const auto &context_i : new_context_map) {
656 context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
657 context_i.second, context_i.first.second.c_str()));
658 }
659 os << StringPrintf("%s/%u/%u/[%s]\n", method_name.c_str(), total_count,
660 method_size, Join(context_count_vector, '#').c_str());
661 ++num_methods;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800662 }
Dave Allison0aded082013-11-07 13:15:11 -0800663 }
664 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800665
666 // Now we write out the remaining previous methods.
Wei Jina93b0bb2014-06-09 16:19:15 -0700667 for (const auto &pi : previous_) {
668 if (type == kProfilerMethod) {
669 os << StringPrintf("%s/%u/%u\n", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
Wei Jin445220d2014-06-20 15:56:53 -0700670 } else if (type == kProfilerBoundedStack) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700671 os << StringPrintf("%s/%u/%u/[", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
Wei Jin445220d2014-06-20 15:56:53 -0700672 PreviousContextMap* previous_context_map = pi.second.context_map_;
673 if (previous_context_map != nullptr) {
674 std::vector<std::string> context_count_vector;
675 for (const auto &context_i : *previous_context_map) {
676 context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
677 context_i.second, context_i.first.second.c_str()));
Wei Jina93b0bb2014-06-09 16:19:15 -0700678 }
Wei Jin445220d2014-06-20 15:56:53 -0700679 os << Join(context_count_vector, '#');
Wei Jina93b0bb2014-06-09 16:19:15 -0700680 }
681 os << "]\n";
682 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800683 ++num_methods;
684 }
Dave Allison0aded082013-11-07 13:15:11 -0800685 return num_methods;
686}
687
688void ProfileSampleResults::Clear() {
689 num_samples_ = 0;
690 num_null_methods_ = 0;
691 num_boot_methods_ = 0;
692 for (int i = 0; i < kHashSize; i++) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700693 delete table[i];
694 table[i] = nullptr;
Wei Jin445220d2014-06-20 15:56:53 -0700695 }
696 if (stack_trie_root_ != nullptr) {
697 stack_trie_root_->DeleteChildren();
698 delete stack_trie_root_;
699 stack_trie_root_ = nullptr;
700 if (method_context_table != nullptr) {
701 delete method_context_table;
702 method_context_table = nullptr;
Wei Jina93b0bb2014-06-09 16:19:15 -0700703 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700704 }
705 for (auto &pi : previous_) {
Wei Jin445220d2014-06-20 15:56:53 -0700706 if (pi.second.context_map_ != nullptr) {
707 delete pi.second.context_map_;
708 pi.second.context_map_ = nullptr;
709 }
Dave Allison0aded082013-11-07 13:15:11 -0800710 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800711 previous_.clear();
Dave Allison0aded082013-11-07 13:15:11 -0800712}
713
714uint32_t ProfileSampleResults::Hash(mirror::ArtMethod* method) {
Ian Rogersef7d42f2014-01-06 12:55:46 -0800715 return (PointerToLowMemUInt32(method) >> 3) % kHashSize;
Dave Allison0aded082013-11-07 13:15:11 -0800716}
717
Dave Allison39c3bfb2014-01-28 18:33:52 -0800718// Read a single line into the given string. Returns true if everything OK, false
719// on EOF or error.
720static bool ReadProfileLine(int fd, std::string& line) {
721 char buf[4];
722 line.clear();
723 while (true) {
724 int n = read(fd, buf, 1); // TODO: could speed this up but is it worth it?
725 if (n != 1) {
726 return false;
727 }
728 if (buf[0] == '\n') {
729 break;
730 }
731 line += buf[0];
732 }
733 return true;
734}
735
Wei Jina93b0bb2014-06-09 16:19:15 -0700736void ProfileSampleResults::ReadPrevious(int fd, ProfileDataType type) {
Dave Allison39c3bfb2014-01-28 18:33:52 -0800737 // Reset counters.
738 previous_num_samples_ = previous_num_null_methods_ = previous_num_boot_methods_ = 0;
739
740 std::string line;
741
742 // The first line contains summary information.
743 if (!ReadProfileLine(fd, line)) {
744 return;
745 }
746 std::vector<std::string> summary_info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700747 Split(line, '/', &summary_info);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800748 if (summary_info.size() != 3) {
749 // Bad summary info. It should be count/nullcount/bootcount
750 return;
751 }
Wei Jinf21f0a92014-06-27 17:44:18 -0700752 previous_num_samples_ = strtoul(summary_info[0].c_str(), nullptr, 10);
753 previous_num_null_methods_ = strtoul(summary_info[1].c_str(), nullptr, 10);
754 previous_num_boot_methods_ = strtoul(summary_info[2].c_str(), nullptr, 10);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800755
Wei Jina93b0bb2014-06-09 16:19:15 -0700756 // Now read each line until the end of file. Each line consists of 3 or 4 fields separated by /
Dave Allison39c3bfb2014-01-28 18:33:52 -0800757 while (true) {
758 if (!ReadProfileLine(fd, line)) {
759 break;
760 }
761 std::vector<std::string> info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700762 Split(line, '/', &info);
Wei Jina93b0bb2014-06-09 16:19:15 -0700763 if (info.size() != 3 && info.size() != 4) {
Dave Allison39c3bfb2014-01-28 18:33:52 -0800764 // Malformed.
765 break;
766 }
767 std::string methodname = info[0];
Wei Jinf21f0a92014-06-27 17:44:18 -0700768 uint32_t total_count = strtoul(info[1].c_str(), nullptr, 10);
769 uint32_t size = strtoul(info[2].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700770 PreviousContextMap* context_map = nullptr;
771 if (type == kProfilerBoundedStack && info.size() == 4) {
772 context_map = new PreviousContextMap();
773 std::string context_counts_str = info[3].substr(1, info[3].size() - 2);
774 std::vector<std::string> context_count_pairs;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700775 Split(context_counts_str, '#', &context_count_pairs);
Wei Jin445220d2014-06-20 15:56:53 -0700776 for (uint32_t i = 0; i < context_count_pairs.size(); ++i) {
777 std::vector<std::string> context_count;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700778 Split(context_count_pairs[i], ':', &context_count);
Wei Jin445220d2014-06-20 15:56:53 -0700779 if (context_count.size() == 2) {
780 // Handles the situtation when the profile file doesn't contain context information.
Wei Jinf21f0a92014-06-27 17:44:18 -0700781 uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
782 uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700783 (*context_map)[std::make_pair(dexpc, "")] = count;
784 } else {
785 // Handles the situtation when the profile file contains context information.
Wei Jinf21f0a92014-06-27 17:44:18 -0700786 uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
787 uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700788 std::string context = context_count[2];
789 (*context_map)[std::make_pair(dexpc, context)] = count;
790 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700791 }
792 }
Wei Jin445220d2014-06-20 15:56:53 -0700793 previous_[methodname] = PreviousValue(total_count, size, context_map);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800794 }
795}
Dave Allison0aded082013-11-07 13:15:11 -0800796
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100797bool ProfileFile::LoadFile(const std::string& fileName) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300798 LOG(VERBOSE) << "reading profile file " << fileName;
799 struct stat st;
800 int err = stat(fileName.c_str(), &st);
801 if (err == -1) {
802 LOG(VERBOSE) << "not found";
803 return false;
804 }
805 if (st.st_size == 0) {
Dave Allison644789f2014-04-10 13:06:10 -0700806 return false; // Empty profiles are invalid.
Calin Juravle9dae5b42014-04-07 16:36:21 +0300807 }
808 std::ifstream in(fileName.c_str());
809 if (!in) {
810 LOG(VERBOSE) << "profile file " << fileName << " exists but can't be opened";
811 LOG(VERBOSE) << "file owner: " << st.st_uid << ":" << st.st_gid;
812 LOG(VERBOSE) << "me: " << getuid() << ":" << getgid();
813 LOG(VERBOSE) << "file permissions: " << std::oct << st.st_mode;
814 LOG(VERBOSE) << "errno: " << errno;
815 return false;
816 }
817 // The first line contains summary information.
818 std::string line;
819 std::getline(in, line);
820 if (in.eof()) {
821 return false;
822 }
823 std::vector<std::string> summary_info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700824 Split(line, '/', &summary_info);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300825 if (summary_info.size() != 3) {
Calin Juravle19477a82014-06-06 12:24:21 +0100826 // Bad summary info. It should be total/null/boot.
Calin Juravle9dae5b42014-04-07 16:36:21 +0300827 return false;
828 }
Calin Juravle19477a82014-06-06 12:24:21 +0100829 // This is the number of hits in all profiled methods (without nullptr or boot methods)
Wei Jinf21f0a92014-06-27 17:44:18 -0700830 uint32_t total_count = strtoul(summary_info[0].c_str(), nullptr, 10);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300831
832 // Now read each line until the end of file. Each line consists of 3 fields separated by '/'.
833 // Store the info in descending order given by the most used methods.
834 typedef std::set<std::pair<int, std::vector<std::string>>> ProfileSet;
835 ProfileSet countSet;
836 while (!in.eof()) {
837 std::getline(in, line);
838 if (in.eof()) {
839 break;
840 }
841 std::vector<std::string> info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700842 Split(line, '/', &info);
Wei Jina93b0bb2014-06-09 16:19:15 -0700843 if (info.size() != 3 && info.size() != 4) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300844 // Malformed.
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100845 return false;
Calin Juravle9dae5b42014-04-07 16:36:21 +0300846 }
847 int count = atoi(info[1].c_str());
848 countSet.insert(std::make_pair(-count, info));
849 }
850
851 uint32_t curTotalCount = 0;
852 ProfileSet::iterator end = countSet.end();
853 const ProfileData* prevData = nullptr;
854 for (ProfileSet::iterator it = countSet.begin(); it != end ; it++) {
855 const std::string& methodname = it->second[0];
856 uint32_t count = -it->first;
Wei Jinf21f0a92014-06-27 17:44:18 -0700857 uint32_t size = strtoul(it->second[2].c_str(), nullptr, 10);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300858 double usedPercent = (count * 100.0) / total_count;
859
860 curTotalCount += count;
861 // Methods with the same count should be part of the same top K percentage bucket.
862 double topKPercentage = (prevData != nullptr) && (prevData->GetCount() == count)
863 ? prevData->GetTopKUsedPercentage()
864 : 100 * static_cast<double>(curTotalCount) / static_cast<double>(total_count);
865
866 // Add it to the profile map.
867 ProfileData curData = ProfileData(methodname, count, size, usedPercent, topKPercentage);
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100868 profile_map_[methodname] = curData;
Calin Juravle9dae5b42014-04-07 16:36:21 +0300869 prevData = &curData;
870 }
871 return true;
872}
873
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100874bool ProfileFile::GetProfileData(ProfileFile::ProfileData* data, const std::string& method_name) {
875 ProfileMap::iterator i = profile_map_.find(method_name);
876 if (i == profile_map_.end()) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300877 return false;
878 }
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100879 *data = i->second;
880 return true;
881}
882
883bool ProfileFile::GetTopKSamples(std::set<std::string>& topKSamples, double topKPercentage) {
884 ProfileMap::iterator end = profile_map_.end();
885 for (ProfileMap::iterator it = profile_map_.begin(); it != end; it++) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300886 if (it->second.GetTopKUsedPercentage() < topKPercentage) {
887 topKSamples.insert(it->first);
888 }
889 }
890 return true;
891}
892
Wei Jin445220d2014-06-20 15:56:53 -0700893StackTrieNode* StackTrieNode::FindChild(MethodReference method, uint32_t dex_pc) {
894 if (children_.size() == 0) {
895 return nullptr;
896 }
897 // Create a dummy node for searching.
898 StackTrieNode* node = new StackTrieNode(method, dex_pc, 0, nullptr);
899 std::set<StackTrieNode*, StackTrieNodeComparator>::iterator i = children_.find(node);
900 delete node;
901 return (i == children_.end()) ? nullptr : *i;
902}
903
904void StackTrieNode::DeleteChildren() {
905 for (auto &child : children_) {
906 if (child != nullptr) {
907 child->DeleteChildren();
908 delete child;
909 }
910 }
911}
912
Calin Juravle9dae5b42014-04-07 16:36:21 +0300913} // namespace art