blob: 5354fd84861ada33cc095fc974986142227faa24 [file] [log] [blame]
Dave Allison0aded082013-11-07 13:15:11 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "profiler.h"
18
Dave Allison39c3bfb2014-01-28 18:33:52 -080019#include <sys/file.h>
Ian Rogers6f3dbba2014-10-14 17:41:57 -070020#include <sys/stat.h>
21#include <sys/uio.h>
22
23#include <fstream>
Dave Allison0aded082013-11-07 13:15:11 -080024
25#include "base/stl_util.h"
26#include "base/unix_file/fd_file.h"
27#include "class_linker.h"
28#include "common_throws.h"
29#include "debugger.h"
30#include "dex_file-inl.h"
31#include "instrumentation.h"
32#include "mirror/art_method-inl.h"
33#include "mirror/class-inl.h"
34#include "mirror/dex_cache.h"
35#include "mirror/object_array-inl.h"
36#include "mirror/object-inl.h"
Dave Allison0aded082013-11-07 13:15:11 -080037#include "os.h"
38#include "scoped_thread_state_change.h"
39#include "ScopedLocalRef.h"
40#include "thread.h"
41#include "thread_list.h"
Dave Allison4a7867b2014-01-30 17:44:12 -080042
Dave Allison0aded082013-11-07 13:15:11 -080043#include "entrypoints/quick/quick_entrypoints.h"
Dave Allison0aded082013-11-07 13:15:11 -080044
45namespace art {
46
47BackgroundMethodSamplingProfiler* BackgroundMethodSamplingProfiler::profiler_ = nullptr;
48pthread_t BackgroundMethodSamplingProfiler::profiler_pthread_ = 0U;
49volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false;
50
Dave Allison0aded082013-11-07 13:15:11 -080051// TODO: this profiler runs regardless of the state of the machine. Maybe we should use the
52// wakelock or something to modify the run characteristics. This can be done when we
53// have some performance data after it's been used for a while.
54
Wei Jin445220d2014-06-20 15:56:53 -070055// Walk through the method within depth of max_depth_ on the Java stack
56class BoundedStackVisitor : public StackVisitor {
57 public:
58 BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack,
59 Thread* thread, uint32_t max_depth)
60 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Nicolas Geoffray8e5bd182015-05-06 11:34:34 +010061 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
62 stack_(stack),
63 max_depth_(max_depth),
64 depth_(0) {}
Wei Jin445220d2014-06-20 15:56:53 -070065
66 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
67 mirror::ArtMethod* m = GetMethod();
68 if (m->IsRuntimeMethod()) {
69 return true;
70 }
71 uint32_t dex_pc_ = GetDexPc();
72 stack_->push_back(std::make_pair(m, dex_pc_));
73 ++depth_;
74 if (depth_ < max_depth_) {
75 return true;
76 } else {
77 return false;
78 }
79 }
80
81 private:
82 std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack_;
83 const uint32_t max_depth_;
84 uint32_t depth_;
85};
Dave Allison0aded082013-11-07 13:15:11 -080086
87// This is called from either a thread list traversal or from a checkpoint. Regardless
88// of which caller, the mutator lock must be held.
89static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
90 BackgroundMethodSamplingProfiler* profiler =
91 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
Wei Jin445220d2014-06-20 15:56:53 -070092 const ProfilerOptions profile_options = profiler->GetProfilerOptions();
93 switch (profile_options.GetProfileType()) {
94 case kProfilerMethod: {
95 mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
Ian Rogerscf7f1912014-10-22 22:06:39 -070096 if ((false) && method == nullptr) {
Wei Jin445220d2014-06-20 15:56:53 -070097 LOG(INFO) << "No current method available";
98 std::ostringstream os;
99 thread->Dump(os);
100 std::string data(os.str());
101 LOG(INFO) << data;
102 }
103 profiler->RecordMethod(method);
104 break;
105 }
106 case kProfilerBoundedStack: {
107 std::vector<InstructionLocation> stack;
108 uint32_t max_depth = profile_options.GetMaxStackDepth();
109 BoundedStackVisitor bounded_stack_visitor(&stack, thread, max_depth);
110 bounded_stack_visitor.WalkStack();
111 profiler->RecordStack(stack);
112 break;
113 }
114 default:
115 LOG(INFO) << "This profile type is not implemented.";
Dave Allison0aded082013-11-07 13:15:11 -0800116 }
Dave Allison0aded082013-11-07 13:15:11 -0800117}
118
Dave Allison0aded082013-11-07 13:15:11 -0800119// A closure that is called by the thread checkpoint code.
Ian Rogers7b078e82014-09-10 14:44:24 -0700120class SampleCheckpoint FINAL : public Closure {
Dave Allison0aded082013-11-07 13:15:11 -0800121 public:
122 explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
123 profiler_(profiler) {}
124
Ian Rogers7b078e82014-09-10 14:44:24 -0700125 void Run(Thread* thread) OVERRIDE {
Dave Allison0aded082013-11-07 13:15:11 -0800126 Thread* self = Thread::Current();
127 if (thread == nullptr) {
128 LOG(ERROR) << "Checkpoint with nullptr thread";
129 return;
130 }
131
132 // Grab the mutator lock (shared access).
133 ScopedObjectAccess soa(self);
134
135 // Grab a sample.
136 GetSample(thread, this->profiler_);
137
138 // And finally tell the barrier that we're done.
139 this->profiler_->GetBarrier().Pass(self);
140 }
141
142 private:
143 BackgroundMethodSamplingProfiler* const profiler_;
144};
145
146bool BackgroundMethodSamplingProfiler::ShuttingDown(Thread* self) {
147 MutexLock mu(self, *Locks::profiler_lock_);
148 return shutting_down_;
149}
150
151void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
152 Runtime* runtime = Runtime::Current();
153 BackgroundMethodSamplingProfiler* profiler =
154 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
155
156 // Add a random delay for the first time run so that we don't hammer the CPU
157 // with all profiles running at the same time.
158 const int kRandomDelayMaxSecs = 30;
159 const double kMaxBackoffSecs = 24*60*60; // Max backoff time.
160
161 srand(MicroTime() * getpid());
162 int startup_delay = rand() % kRandomDelayMaxSecs; // random delay for startup.
163
164
165 CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800166 !runtime->IsAotCompiler()));
Dave Allison0aded082013-11-07 13:15:11 -0800167
168 Thread* self = Thread::Current();
169
Calin Juravlec1b643c2014-05-30 23:44:11 +0100170 double backoff = 1.0;
Dave Allison0aded082013-11-07 13:15:11 -0800171 while (true) {
172 if (ShuttingDown(self)) {
173 break;
174 }
175
176 {
177 // wait until we need to run another profile
Calin Juravlec1b643c2014-05-30 23:44:11 +0100178 uint64_t delay_secs = profiler->options_.GetPeriodS() * backoff;
Dave Allison0aded082013-11-07 13:15:11 -0800179
180 // Add a startup delay to prevent all the profiles running at once.
181 delay_secs += startup_delay;
182
183 // Immediate startup for benchmarking?
Calin Juravlec1b643c2014-05-30 23:44:11 +0100184 if (profiler->options_.GetStartImmediately() && startup_delay > 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800185 delay_secs = 0;
186 }
187
188 startup_delay = 0;
189
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700190 VLOG(profiler) << "Delaying profile start for " << delay_secs << " secs";
Dave Allison0aded082013-11-07 13:15:11 -0800191 MutexLock mu(self, profiler->wait_lock_);
192 profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
Ian Rogers7b078e82014-09-10 14:44:24 -0700193 // We were either signaled by Stop or timedout, in either case ignore the timed out result.
Dave Allison0aded082013-11-07 13:15:11 -0800194
195 // Expand the backoff by its coefficient, but don't go beyond the max.
Calin Juravlec1b643c2014-05-30 23:44:11 +0100196 backoff = std::min(backoff * profiler->options_.GetBackoffCoefficient(), kMaxBackoffSecs);
Dave Allison0aded082013-11-07 13:15:11 -0800197 }
198
199 if (ShuttingDown(self)) {
200 break;
201 }
202
203
204 uint64_t start_us = MicroTime();
Calin Juravlec1b643c2014-05-30 23:44:11 +0100205 uint64_t end_us = start_us + profiler->options_.GetDurationS() * UINT64_C(1000000);
Dave Allison0aded082013-11-07 13:15:11 -0800206 uint64_t now_us = start_us;
207
Calin Juravlec1b643c2014-05-30 23:44:11 +0100208 VLOG(profiler) << "Starting profiling run now for "
209 << PrettyDuration((end_us - start_us) * 1000);
Dave Allison0aded082013-11-07 13:15:11 -0800210
211 SampleCheckpoint check_point(profiler);
212
Dave Allison39c3bfb2014-01-28 18:33:52 -0800213 size_t valid_samples = 0;
Dave Allison0aded082013-11-07 13:15:11 -0800214 while (now_us < end_us) {
215 if (ShuttingDown(self)) {
216 break;
217 }
218
Calin Juravlec1b643c2014-05-30 23:44:11 +0100219 usleep(profiler->options_.GetIntervalUs()); // Non-interruptible sleep.
Dave Allison0aded082013-11-07 13:15:11 -0800220
221 ThreadList* thread_list = runtime->GetThreadList();
222
223 profiler->profiler_barrier_->Init(self, 0);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800224 size_t barrier_count = thread_list->RunCheckpointOnRunnableThreads(&check_point);
225
226 // All threads are suspended, nothing to do.
227 if (barrier_count == 0) {
228 now_us = MicroTime();
229 continue;
230 }
231
232 valid_samples += barrier_count;
Dave Allison0aded082013-11-07 13:15:11 -0800233
Wei Jin6a586912014-05-21 16:07:40 -0700234 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
Dave Allison0aded082013-11-07 13:15:11 -0800235
236 // Wait for the barrier to be crossed by all runnable threads. This wait
237 // is done with a timeout so that we can detect problems with the checkpoint
238 // running code. We should never see this.
239 const uint32_t kWaitTimeoutMs = 10000;
Dave Allison0aded082013-11-07 13:15:11 -0800240
Dave Allison0aded082013-11-07 13:15:11 -0800241 // Wait for all threads to pass the barrier.
Ian Rogers7b078e82014-09-10 14:44:24 -0700242 bool timed_out = profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
Dave Allison0aded082013-11-07 13:15:11 -0800243
244 // We should never get a timeout. If we do, it suggests a problem with the checkpoint
245 // code. Crash the process in this case.
Ian Rogers7b078e82014-09-10 14:44:24 -0700246 CHECK(!timed_out);
Dave Allison0aded082013-11-07 13:15:11 -0800247
Dave Allison0aded082013-11-07 13:15:11 -0800248 // Update the current time.
249 now_us = MicroTime();
250 }
251
Wei Jin6a586912014-05-21 16:07:40 -0700252 if (valid_samples > 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800253 // After the profile has been taken, write it out.
254 ScopedObjectAccess soa(self); // Acquire the mutator lock.
255 uint32_t size = profiler->WriteProfile();
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700256 VLOG(profiler) << "Profile size: " << size;
Dave Allison0aded082013-11-07 13:15:11 -0800257 }
258 }
259
260 LOG(INFO) << "Profiler shutdown";
261 runtime->DetachCurrentThread();
262 return nullptr;
263}
264
265// Write out the profile file if we are generating a profile.
266uint32_t BackgroundMethodSamplingProfiler::WriteProfile() {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100267 std::string full_name = output_filename_;
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700268 VLOG(profiler) << "Saving profile to " << full_name;
Dave Allison0aded082013-11-07 13:15:11 -0800269
Dave Allison39c3bfb2014-01-28 18:33:52 -0800270 int fd = open(full_name.c_str(), O_RDWR);
271 if (fd < 0) {
272 // Open failed.
273 LOG(ERROR) << "Failed to open profile file " << full_name;
Dave Allison0aded082013-11-07 13:15:11 -0800274 return 0;
275 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800276
277 // Lock the file for exclusive access. This will block if another process is using
278 // the file.
279 int err = flock(fd, LOCK_EX);
280 if (err < 0) {
281 LOG(ERROR) << "Failed to lock profile file " << full_name;
282 return 0;
283 }
284
285 // Read the previous profile.
Wei Jina93b0bb2014-06-09 16:19:15 -0700286 profile_table_.ReadPrevious(fd, options_.GetProfileType());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800287
288 // Move back to the start of the file.
289 lseek(fd, 0, SEEK_SET);
290
291 // Format the profile output and write to the file.
Dave Allison0aded082013-11-07 13:15:11 -0800292 std::ostringstream os;
293 uint32_t num_methods = DumpProfile(os);
294 std::string data(os.str());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800295 const char *p = data.c_str();
296 size_t length = data.length();
297 size_t full_length = length;
298 do {
299 int n = ::write(fd, p, length);
300 p += n;
301 length -= n;
302 } while (length > 0);
303
304 // Truncate the file to the new length.
305 ftruncate(fd, full_length);
306
307 // Now unlock the file, allowing another process in.
308 err = flock(fd, LOCK_UN);
309 if (err < 0) {
310 LOG(ERROR) << "Failed to unlock profile file " << full_name;
311 }
312
313 // Done, close the file.
314 ::close(fd);
315
316 // Clean the profile for the next time.
317 CleanProfile();
318
Dave Allison0aded082013-11-07 13:15:11 -0800319 return num_methods;
320}
321
Calin Juravlec1b643c2014-05-30 23:44:11 +0100322bool BackgroundMethodSamplingProfiler::Start(
323 const std::string& output_filename, const ProfilerOptions& options) {
324 if (!options.IsEnabled()) {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100325 return false;
326 }
327
328 CHECK(!output_filename.empty());
329
Dave Allison0aded082013-11-07 13:15:11 -0800330 Thread* self = Thread::Current();
331 {
332 MutexLock mu(self, *Locks::profiler_lock_);
333 // Don't start two profiler threads.
334 if (profiler_ != nullptr) {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100335 return true;
Dave Allison0aded082013-11-07 13:15:11 -0800336 }
337 }
338
Calin Juravlec1b643c2014-05-30 23:44:11 +0100339 LOG(INFO) << "Starting profiler using output file: " << output_filename
340 << " and options: " << options;
Dave Allison0aded082013-11-07 13:15:11 -0800341 {
342 MutexLock mu(self, *Locks::profiler_lock_);
Calin Juravlec1b643c2014-05-30 23:44:11 +0100343 profiler_ = new BackgroundMethodSamplingProfiler(output_filename, options);
Dave Allison0aded082013-11-07 13:15:11 -0800344
345 CHECK_PTHREAD_CALL(pthread_create, (&profiler_pthread_, nullptr, &RunProfilerThread,
346 reinterpret_cast<void*>(profiler_)),
347 "Profiler thread");
348 }
Calin Juravlec1b643c2014-05-30 23:44:11 +0100349 return true;
Dave Allison0aded082013-11-07 13:15:11 -0800350}
351
352
353
354void BackgroundMethodSamplingProfiler::Stop() {
355 BackgroundMethodSamplingProfiler* profiler = nullptr;
356 pthread_t profiler_pthread = 0U;
357 {
358 MutexLock trace_mu(Thread::Current(), *Locks::profiler_lock_);
Wei Jin6a586912014-05-21 16:07:40 -0700359 CHECK(!shutting_down_);
Dave Allison0aded082013-11-07 13:15:11 -0800360 profiler = profiler_;
361 shutting_down_ = true;
362 profiler_pthread = profiler_pthread_;
363 }
364
365 // Now wake up the sampler thread if it sleeping.
366 {
367 MutexLock profile_mu(Thread::Current(), profiler->wait_lock_);
368 profiler->period_condition_.Signal(Thread::Current());
369 }
370 // Wait for the sample thread to stop.
371 CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profiler thread shutdown");
372
373 {
374 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
375 profiler_ = nullptr;
376 }
377 delete profiler;
378}
379
380
381void BackgroundMethodSamplingProfiler::Shutdown() {
382 Stop();
383}
384
Calin Juravlec1b643c2014-05-30 23:44:11 +0100385BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(
386 const std::string& output_filename, const ProfilerOptions& options)
387 : output_filename_(output_filename),
388 options_(options),
Dave Allison0aded082013-11-07 13:15:11 -0800389 wait_lock_("Profile wait lock"),
390 period_condition_("Profile condition", wait_lock_),
391 profile_table_(wait_lock_),
392 profiler_barrier_(new Barrier(0)) {
393 // Populate the filtered_methods set.
394 // This is empty right now, but to add a method, do this:
395 //
396 // filtered_methods_.insert("void java.lang.Object.wait(long, int)");
397}
398
Wei Jin445220d2014-06-20 15:56:53 -0700399// Filter out methods the profiler doesn't want to record.
400// We require mutator lock since some statistics will be updated here.
401bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) {
Dave Allison0aded082013-11-07 13:15:11 -0800402 if (method == nullptr) {
403 profile_table_.NullMethod();
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700404 // Don't record a null method.
Wei Jin445220d2014-06-20 15:56:53 -0700405 return false;
Dave Allison0aded082013-11-07 13:15:11 -0800406 }
407
408 mirror::Class* cls = method->GetDeclaringClass();
409 if (cls != nullptr) {
410 if (cls->GetClassLoader() == nullptr) {
411 // Don't include things in the boot
412 profile_table_.BootMethod();
Wei Jin445220d2014-06-20 15:56:53 -0700413 return false;
Dave Allison0aded082013-11-07 13:15:11 -0800414 }
415 }
416
417 bool is_filtered = false;
418
Mathieu Chartierbfd9a432014-05-21 17:43:44 -0700419 if (strcmp(method->GetName(), "<clinit>") == 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800420 // always filter out class init
421 is_filtered = true;
422 }
423
424 // Filter out methods by name if there are any.
425 if (!is_filtered && filtered_methods_.size() > 0) {
426 std::string method_full_name = PrettyMethod(method);
427
428 // Don't include specific filtered methods.
429 is_filtered = filtered_methods_.count(method_full_name) != 0;
430 }
Wei Jin445220d2014-06-20 15:56:53 -0700431 return !is_filtered;
432}
Dave Allison0aded082013-11-07 13:15:11 -0800433
Wei Jin445220d2014-06-20 15:56:53 -0700434// A method has been hit, record its invocation in the method map.
435// The mutator_lock must be held (shared) when this is called.
436void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method) {
Dave Allison0aded082013-11-07 13:15:11 -0800437 // Add to the profile table unless it is filtered out.
Wei Jin445220d2014-06-20 15:56:53 -0700438 if (ProcessMethod(method)) {
439 profile_table_.Put(method);
440 }
441}
442
443// Record the current bounded stack into sampling results.
444void BackgroundMethodSamplingProfiler::RecordStack(const std::vector<InstructionLocation>& stack) {
445 if (stack.size() == 0) {
446 return;
447 }
448 // Get the method on top of the stack. We use this method to perform filtering.
449 mirror::ArtMethod* method = stack.front().first;
450 if (ProcessMethod(method)) {
451 profile_table_.PutStack(stack);
Dave Allison0aded082013-11-07 13:15:11 -0800452 }
453}
454
455// Clean out any recordings for the method traces.
456void BackgroundMethodSamplingProfiler::CleanProfile() {
457 profile_table_.Clear();
458}
459
460uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700461 return profile_table_.Write(os, options_.GetProfileType());
Dave Allison0aded082013-11-07 13:15:11 -0800462}
463
464// Profile Table.
465// This holds a mapping of mirror::ArtMethod* to a count of how many times a sample
466// hit it at the top of the stack.
467ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0),
468 num_null_methods_(0),
469 num_boot_methods_(0) {
470 for (int i = 0; i < kHashSize; i++) {
471 table[i] = nullptr;
472 }
Wei Jin445220d2014-06-20 15:56:53 -0700473 method_context_table = nullptr;
474 stack_trie_root_ = nullptr;
Dave Allison0aded082013-11-07 13:15:11 -0800475}
476
477ProfileSampleResults::~ProfileSampleResults() {
Wei Jina93b0bb2014-06-09 16:19:15 -0700478 Clear();
Dave Allison0aded082013-11-07 13:15:11 -0800479}
480
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100481// Add a method to the profile table. If it's the first time the method
Dave Allison0aded082013-11-07 13:15:11 -0800482// has been seen, add it with count=1, otherwise increment the count.
483void ProfileSampleResults::Put(mirror::ArtMethod* method) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700484 MutexLock mu(Thread::Current(), lock_);
Dave Allison0aded082013-11-07 13:15:11 -0800485 uint32_t index = Hash(method);
486 if (table[index] == nullptr) {
487 table[index] = new Map();
488 }
489 Map::iterator i = table[index]->find(method);
490 if (i == table[index]->end()) {
491 (*table[index])[method] = 1;
492 } else {
493 i->second++;
494 }
495 num_samples_++;
Wei Jina93b0bb2014-06-09 16:19:15 -0700496}
497
Wei Jin445220d2014-06-20 15:56:53 -0700498// Add a bounded stack to the profile table. Only the count of the method on
499// top of the frame will be increased.
500void ProfileSampleResults::PutStack(const std::vector<InstructionLocation>& stack) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700501 MutexLock mu(Thread::Current(), lock_);
Wei Jin445220d2014-06-20 15:56:53 -0700502 ScopedObjectAccess soa(Thread::Current());
503 if (stack_trie_root_ == nullptr) {
504 // The root of the stack trie is a dummy node so that we don't have to maintain
505 // a collection of tries.
506 stack_trie_root_ = new StackTrieNode();
Wei Jina93b0bb2014-06-09 16:19:15 -0700507 }
Wei Jin445220d2014-06-20 15:56:53 -0700508
509 StackTrieNode* current = stack_trie_root_;
510 if (stack.size() == 0) {
511 current->IncreaseCount();
512 return;
513 }
514
515 for (std::vector<InstructionLocation>::const_reverse_iterator iter = stack.rbegin();
516 iter != stack.rend(); ++iter) {
517 InstructionLocation inst_loc = *iter;
518 mirror::ArtMethod* method = inst_loc.first;
519 if (method == nullptr) {
520 // skip null method
521 continue;
522 }
523 uint32_t dex_pc = inst_loc.second;
524 uint32_t method_idx = method->GetDexMethodIndex();
525 const DexFile* dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
526 MethodReference method_ref(dex_file, method_idx);
527 StackTrieNode* child = current->FindChild(method_ref, dex_pc);
528 if (child != nullptr) {
529 current = child;
Wei Jina93b0bb2014-06-09 16:19:15 -0700530 } else {
Wei Jin445220d2014-06-20 15:56:53 -0700531 uint32_t method_size = 0;
532 const DexFile::CodeItem* codeitem = method->GetCodeItem();
533 if (codeitem != nullptr) {
534 method_size = codeitem->insns_size_in_code_units_;
535 }
536 StackTrieNode* new_node = new StackTrieNode(method_ref, dex_pc, method_size, current);
537 current->AppendChild(new_node);
538 current = new_node;
Wei Jina93b0bb2014-06-09 16:19:15 -0700539 }
540 }
Wei Jin445220d2014-06-20 15:56:53 -0700541
542 if (current != stack_trie_root_ && current->GetCount() == 0) {
543 // Insert into method_context table;
544 if (method_context_table == nullptr) {
545 method_context_table = new MethodContextMap();
546 }
547 MethodReference method = current->GetMethod();
548 MethodContextMap::iterator i = method_context_table->find(method);
549 if (i == method_context_table->end()) {
550 TrieNodeSet* node_set = new TrieNodeSet();
551 node_set->insert(current);
552 (*method_context_table)[method] = node_set;
553 } else {
554 TrieNodeSet* node_set = i->second;
555 node_set->insert(current);
556 }
557 }
558 current->IncreaseCount();
Wei Jina93b0bb2014-06-09 16:19:15 -0700559 num_samples_++;
Dave Allison0aded082013-11-07 13:15:11 -0800560}
561
Dave Allison39c3bfb2014-01-28 18:33:52 -0800562// Write the profile table to the output stream. Also merge with the previous profile.
Wei Jina93b0bb2014-06-09 16:19:15 -0700563uint32_t ProfileSampleResults::Write(std::ostream& os, ProfileDataType type) {
Dave Allison0aded082013-11-07 13:15:11 -0800564 ScopedObjectAccess soa(Thread::Current());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800565 num_samples_ += previous_num_samples_;
566 num_null_methods_ += previous_num_null_methods_;
567 num_boot_methods_ += previous_num_boot_methods_;
568
Calin Juravlec1b643c2014-05-30 23:44:11 +0100569 VLOG(profiler) << "Profile: "
570 << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_;
Dave Allison0aded082013-11-07 13:15:11 -0800571 os << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_ << "\n";
572 uint32_t num_methods = 0;
Wei Jina93b0bb2014-06-09 16:19:15 -0700573 if (type == kProfilerMethod) {
574 for (int i = 0 ; i < kHashSize; i++) {
575 Map *map = table[i];
576 if (map != nullptr) {
577 for (const auto &meth_iter : *map) {
578 mirror::ArtMethod *method = meth_iter.first;
579 std::string method_name = PrettyMethod(method);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800580
Wei Jina93b0bb2014-06-09 16:19:15 -0700581 const DexFile::CodeItem* codeitem = method->GetCodeItem();
582 uint32_t method_size = 0;
583 if (codeitem != nullptr) {
584 method_size = codeitem->insns_size_in_code_units_;
585 }
586 uint32_t count = meth_iter.second;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800587
Wei Jina93b0bb2014-06-09 16:19:15 -0700588 // Merge this profile entry with one from a previous run (if present). Also
589 // remove the previous entry.
590 PreviousProfile::iterator pi = previous_.find(method_name);
591 if (pi != previous_.end()) {
592 count += pi->second.count_;
593 previous_.erase(pi);
594 }
595 os << StringPrintf("%s/%u/%u\n", method_name.c_str(), count, method_size);
596 ++num_methods;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800597 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700598 }
599 }
Wei Jin445220d2014-06-20 15:56:53 -0700600 } else if (type == kProfilerBoundedStack) {
601 if (method_context_table != nullptr) {
602 for (const auto &method_iter : *method_context_table) {
603 MethodReference method = method_iter.first;
604 TrieNodeSet* node_set = method_iter.second;
605 std::string method_name = PrettyMethod(method.dex_method_index, *(method.dex_file));
606 uint32_t method_size = 0;
607 uint32_t total_count = 0;
608 PreviousContextMap new_context_map;
609 for (const auto &trie_node_i : *node_set) {
610 StackTrieNode* node = trie_node_i;
611 method_size = node->GetMethodSize();
612 uint32_t count = node->GetCount();
613 uint32_t dexpc = node->GetDexPC();
614 total_count += count;
Wei Jina93b0bb2014-06-09 16:19:15 -0700615
Wei Jin445220d2014-06-20 15:56:53 -0700616 StackTrieNode* current = node->GetParent();
617 // We go backward on the trie to retrieve context and dex_pc until the dummy root.
618 // The format of the context is "method_1@pc_1@method_2@pc_2@..."
619 std::vector<std::string> context_vector;
620 while (current != nullptr && current->GetParent() != nullptr) {
621 context_vector.push_back(StringPrintf("%s@%u",
622 PrettyMethod(current->GetMethod().dex_method_index, *(current->GetMethod().dex_file)).c_str(),
623 current->GetDexPC()));
624 current = current->GetParent();
Wei Jina93b0bb2014-06-09 16:19:15 -0700625 }
Wei Jin445220d2014-06-20 15:56:53 -0700626 std::string context_sig = Join(context_vector, '@');
627 new_context_map[std::make_pair(dexpc, context_sig)] = count;
628 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700629
Wei Jin445220d2014-06-20 15:56:53 -0700630 PreviousProfile::iterator pi = previous_.find(method_name);
631 if (pi != previous_.end()) {
632 total_count += pi->second.count_;
633 PreviousContextMap* previous_context_map = pi->second.context_map_;
634 if (previous_context_map != nullptr) {
635 for (const auto &context_i : *previous_context_map) {
636 uint32_t count = context_i.second;
637 PreviousContextMap::iterator ci = new_context_map.find(context_i.first);
638 if (ci == new_context_map.end()) {
639 new_context_map[context_i.first] = count;
640 } else {
641 ci->second += count;
Wei Jina93b0bb2014-06-09 16:19:15 -0700642 }
643 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700644 }
Wei Jin445220d2014-06-20 15:56:53 -0700645 delete previous_context_map;
646 previous_.erase(pi);
Wei Jina93b0bb2014-06-09 16:19:15 -0700647 }
Wei Jin445220d2014-06-20 15:56:53 -0700648 // We write out profile data with dex pc and context information in the following format:
649 // "method/total_count/size/[pc_1:count_1:context_1#pc_2:count_2:context_2#...]".
650 std::vector<std::string> context_count_vector;
651 for (const auto &context_i : new_context_map) {
652 context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
653 context_i.second, context_i.first.second.c_str()));
654 }
655 os << StringPrintf("%s/%u/%u/[%s]\n", method_name.c_str(), total_count,
656 method_size, Join(context_count_vector, '#').c_str());
657 ++num_methods;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800658 }
Dave Allison0aded082013-11-07 13:15:11 -0800659 }
660 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800661
662 // Now we write out the remaining previous methods.
Wei Jina93b0bb2014-06-09 16:19:15 -0700663 for (const auto &pi : previous_) {
664 if (type == kProfilerMethod) {
665 os << StringPrintf("%s/%u/%u\n", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
Wei Jin445220d2014-06-20 15:56:53 -0700666 } else if (type == kProfilerBoundedStack) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700667 os << StringPrintf("%s/%u/%u/[", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
Wei Jin445220d2014-06-20 15:56:53 -0700668 PreviousContextMap* previous_context_map = pi.second.context_map_;
669 if (previous_context_map != nullptr) {
670 std::vector<std::string> context_count_vector;
671 for (const auto &context_i : *previous_context_map) {
672 context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
673 context_i.second, context_i.first.second.c_str()));
Wei Jina93b0bb2014-06-09 16:19:15 -0700674 }
Wei Jin445220d2014-06-20 15:56:53 -0700675 os << Join(context_count_vector, '#');
Wei Jina93b0bb2014-06-09 16:19:15 -0700676 }
677 os << "]\n";
678 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800679 ++num_methods;
680 }
Dave Allison0aded082013-11-07 13:15:11 -0800681 return num_methods;
682}
683
684void ProfileSampleResults::Clear() {
685 num_samples_ = 0;
686 num_null_methods_ = 0;
687 num_boot_methods_ = 0;
688 for (int i = 0; i < kHashSize; i++) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700689 delete table[i];
690 table[i] = nullptr;
Wei Jin445220d2014-06-20 15:56:53 -0700691 }
692 if (stack_trie_root_ != nullptr) {
693 stack_trie_root_->DeleteChildren();
694 delete stack_trie_root_;
695 stack_trie_root_ = nullptr;
696 if (method_context_table != nullptr) {
697 delete method_context_table;
698 method_context_table = nullptr;
Wei Jina93b0bb2014-06-09 16:19:15 -0700699 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700700 }
701 for (auto &pi : previous_) {
Wei Jin445220d2014-06-20 15:56:53 -0700702 if (pi.second.context_map_ != nullptr) {
703 delete pi.second.context_map_;
704 pi.second.context_map_ = nullptr;
705 }
Dave Allison0aded082013-11-07 13:15:11 -0800706 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800707 previous_.clear();
Dave Allison0aded082013-11-07 13:15:11 -0800708}
709
710uint32_t ProfileSampleResults::Hash(mirror::ArtMethod* method) {
Ian Rogersef7d42f2014-01-06 12:55:46 -0800711 return (PointerToLowMemUInt32(method) >> 3) % kHashSize;
Dave Allison0aded082013-11-07 13:15:11 -0800712}
713
Dave Allison39c3bfb2014-01-28 18:33:52 -0800714// Read a single line into the given string. Returns true if everything OK, false
715// on EOF or error.
716static bool ReadProfileLine(int fd, std::string& line) {
717 char buf[4];
718 line.clear();
719 while (true) {
720 int n = read(fd, buf, 1); // TODO: could speed this up but is it worth it?
721 if (n != 1) {
722 return false;
723 }
724 if (buf[0] == '\n') {
725 break;
726 }
727 line += buf[0];
728 }
729 return true;
730}
731
Wei Jina93b0bb2014-06-09 16:19:15 -0700732void ProfileSampleResults::ReadPrevious(int fd, ProfileDataType type) {
Dave Allison39c3bfb2014-01-28 18:33:52 -0800733 // Reset counters.
734 previous_num_samples_ = previous_num_null_methods_ = previous_num_boot_methods_ = 0;
735
736 std::string line;
737
738 // The first line contains summary information.
739 if (!ReadProfileLine(fd, line)) {
740 return;
741 }
742 std::vector<std::string> summary_info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700743 Split(line, '/', &summary_info);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800744 if (summary_info.size() != 3) {
745 // Bad summary info. It should be count/nullcount/bootcount
746 return;
747 }
Wei Jinf21f0a92014-06-27 17:44:18 -0700748 previous_num_samples_ = strtoul(summary_info[0].c_str(), nullptr, 10);
749 previous_num_null_methods_ = strtoul(summary_info[1].c_str(), nullptr, 10);
750 previous_num_boot_methods_ = strtoul(summary_info[2].c_str(), nullptr, 10);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800751
Wei Jina93b0bb2014-06-09 16:19:15 -0700752 // Now read each line until the end of file. Each line consists of 3 or 4 fields separated by /
Dave Allison39c3bfb2014-01-28 18:33:52 -0800753 while (true) {
754 if (!ReadProfileLine(fd, line)) {
755 break;
756 }
757 std::vector<std::string> info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700758 Split(line, '/', &info);
Wei Jina93b0bb2014-06-09 16:19:15 -0700759 if (info.size() != 3 && info.size() != 4) {
Dave Allison39c3bfb2014-01-28 18:33:52 -0800760 // Malformed.
761 break;
762 }
763 std::string methodname = info[0];
Wei Jinf21f0a92014-06-27 17:44:18 -0700764 uint32_t total_count = strtoul(info[1].c_str(), nullptr, 10);
765 uint32_t size = strtoul(info[2].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700766 PreviousContextMap* context_map = nullptr;
767 if (type == kProfilerBoundedStack && info.size() == 4) {
768 context_map = new PreviousContextMap();
769 std::string context_counts_str = info[3].substr(1, info[3].size() - 2);
770 std::vector<std::string> context_count_pairs;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700771 Split(context_counts_str, '#', &context_count_pairs);
Wei Jin445220d2014-06-20 15:56:53 -0700772 for (uint32_t i = 0; i < context_count_pairs.size(); ++i) {
773 std::vector<std::string> context_count;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700774 Split(context_count_pairs[i], ':', &context_count);
Wei Jin445220d2014-06-20 15:56:53 -0700775 if (context_count.size() == 2) {
776 // Handles the situtation when the profile file doesn't contain context information.
Wei Jinf21f0a92014-06-27 17:44:18 -0700777 uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
778 uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700779 (*context_map)[std::make_pair(dexpc, "")] = count;
780 } else {
781 // Handles the situtation when the profile file contains context information.
Wei Jinf21f0a92014-06-27 17:44:18 -0700782 uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
783 uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700784 std::string context = context_count[2];
785 (*context_map)[std::make_pair(dexpc, context)] = count;
786 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700787 }
788 }
Wei Jin445220d2014-06-20 15:56:53 -0700789 previous_[methodname] = PreviousValue(total_count, size, context_map);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800790 }
791}
Dave Allison0aded082013-11-07 13:15:11 -0800792
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100793bool ProfileFile::LoadFile(const std::string& fileName) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300794 LOG(VERBOSE) << "reading profile file " << fileName;
795 struct stat st;
796 int err = stat(fileName.c_str(), &st);
797 if (err == -1) {
798 LOG(VERBOSE) << "not found";
799 return false;
800 }
801 if (st.st_size == 0) {
Dave Allison644789f2014-04-10 13:06:10 -0700802 return false; // Empty profiles are invalid.
Calin Juravle9dae5b42014-04-07 16:36:21 +0300803 }
804 std::ifstream in(fileName.c_str());
805 if (!in) {
806 LOG(VERBOSE) << "profile file " << fileName << " exists but can't be opened";
807 LOG(VERBOSE) << "file owner: " << st.st_uid << ":" << st.st_gid;
808 LOG(VERBOSE) << "me: " << getuid() << ":" << getgid();
809 LOG(VERBOSE) << "file permissions: " << std::oct << st.st_mode;
810 LOG(VERBOSE) << "errno: " << errno;
811 return false;
812 }
813 // The first line contains summary information.
814 std::string line;
815 std::getline(in, line);
816 if (in.eof()) {
817 return false;
818 }
819 std::vector<std::string> summary_info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700820 Split(line, '/', &summary_info);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300821 if (summary_info.size() != 3) {
Calin Juravle19477a82014-06-06 12:24:21 +0100822 // Bad summary info. It should be total/null/boot.
Calin Juravle9dae5b42014-04-07 16:36:21 +0300823 return false;
824 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700825 // This is the number of hits in all profiled methods (without null or boot methods)
Wei Jinf21f0a92014-06-27 17:44:18 -0700826 uint32_t total_count = strtoul(summary_info[0].c_str(), nullptr, 10);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300827
828 // Now read each line until the end of file. Each line consists of 3 fields separated by '/'.
829 // Store the info in descending order given by the most used methods.
830 typedef std::set<std::pair<int, std::vector<std::string>>> ProfileSet;
831 ProfileSet countSet;
832 while (!in.eof()) {
833 std::getline(in, line);
834 if (in.eof()) {
835 break;
836 }
837 std::vector<std::string> info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700838 Split(line, '/', &info);
Wei Jina93b0bb2014-06-09 16:19:15 -0700839 if (info.size() != 3 && info.size() != 4) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300840 // Malformed.
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100841 return false;
Calin Juravle9dae5b42014-04-07 16:36:21 +0300842 }
843 int count = atoi(info[1].c_str());
844 countSet.insert(std::make_pair(-count, info));
845 }
846
847 uint32_t curTotalCount = 0;
848 ProfileSet::iterator end = countSet.end();
849 const ProfileData* prevData = nullptr;
850 for (ProfileSet::iterator it = countSet.begin(); it != end ; it++) {
851 const std::string& methodname = it->second[0];
852 uint32_t count = -it->first;
Wei Jinf21f0a92014-06-27 17:44:18 -0700853 uint32_t size = strtoul(it->second[2].c_str(), nullptr, 10);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300854 double usedPercent = (count * 100.0) / total_count;
855
856 curTotalCount += count;
857 // Methods with the same count should be part of the same top K percentage bucket.
858 double topKPercentage = (prevData != nullptr) && (prevData->GetCount() == count)
859 ? prevData->GetTopKUsedPercentage()
860 : 100 * static_cast<double>(curTotalCount) / static_cast<double>(total_count);
861
862 // Add it to the profile map.
863 ProfileData curData = ProfileData(methodname, count, size, usedPercent, topKPercentage);
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100864 profile_map_[methodname] = curData;
Calin Juravle9dae5b42014-04-07 16:36:21 +0300865 prevData = &curData;
866 }
867 return true;
868}
869
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100870bool ProfileFile::GetProfileData(ProfileFile::ProfileData* data, const std::string& method_name) {
871 ProfileMap::iterator i = profile_map_.find(method_name);
872 if (i == profile_map_.end()) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300873 return false;
874 }
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100875 *data = i->second;
876 return true;
877}
878
879bool ProfileFile::GetTopKSamples(std::set<std::string>& topKSamples, double topKPercentage) {
880 ProfileMap::iterator end = profile_map_.end();
881 for (ProfileMap::iterator it = profile_map_.begin(); it != end; it++) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300882 if (it->second.GetTopKUsedPercentage() < topKPercentage) {
883 topKSamples.insert(it->first);
884 }
885 }
886 return true;
887}
888
Wei Jin445220d2014-06-20 15:56:53 -0700889StackTrieNode* StackTrieNode::FindChild(MethodReference method, uint32_t dex_pc) {
890 if (children_.size() == 0) {
891 return nullptr;
892 }
893 // Create a dummy node for searching.
894 StackTrieNode* node = new StackTrieNode(method, dex_pc, 0, nullptr);
895 std::set<StackTrieNode*, StackTrieNodeComparator>::iterator i = children_.find(node);
896 delete node;
897 return (i == children_.end()) ? nullptr : *i;
898}
899
900void StackTrieNode::DeleteChildren() {
901 for (auto &child : children_) {
902 if (child != nullptr) {
903 child->DeleteChildren();
904 delete child;
905 }
906 }
907}
908
Calin Juravle9dae5b42014-04-07 16:36:21 +0300909} // namespace art