blob: 20e08b82b3db4fc8b63470cf6defe45cbbfb15e4 [file] [log] [blame]
Dave Allison0aded082013-11-07 13:15:11 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "profiler.h"
18
19#include <sys/uio.h>
20
21#include "base/stl_util.h"
22#include "base/unix_file/fd_file.h"
23#include "class_linker.h"
24#include "common_throws.h"
25#include "debugger.h"
26#include "dex_file-inl.h"
27#include "instrumentation.h"
28#include "mirror/art_method-inl.h"
29#include "mirror/class-inl.h"
30#include "mirror/dex_cache.h"
31#include "mirror/object_array-inl.h"
32#include "mirror/object-inl.h"
33#include "object_utils.h"
34#include "os.h"
35#include "scoped_thread_state_change.h"
36#include "ScopedLocalRef.h"
37#include "thread.h"
38#include "thread_list.h"
Dave Allison4a7867b2014-01-30 17:44:12 -080039
40#ifdef HAVE_ANDROID_OS
41#include "cutils/properties.h"
42#endif
43
Dave Allison0aded082013-11-07 13:15:11 -080044#if !defined(ART_USE_PORTABLE_COMPILER)
45#include "entrypoints/quick/quick_entrypoints.h"
46#endif
47
48namespace art {
49
50BackgroundMethodSamplingProfiler* BackgroundMethodSamplingProfiler::profiler_ = nullptr;
51pthread_t BackgroundMethodSamplingProfiler::profiler_pthread_ = 0U;
52volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false;
53
54
55// TODO: this profiler runs regardless of the state of the machine. Maybe we should use the
56// wakelock or something to modify the run characteristics. This can be done when we
57// have some performance data after it's been used for a while.
58
59
60// This is called from either a thread list traversal or from a checkpoint. Regardless
61// of which caller, the mutator lock must be held.
62static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
63 BackgroundMethodSamplingProfiler* profiler =
64 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
65 mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
66 if (false && method == nullptr) {
67 LOG(INFO) << "No current method available";
68 std::ostringstream os;
69 thread->Dump(os);
70 std::string data(os.str());
71 LOG(INFO) << data;
72 }
73 profiler->RecordMethod(method);
74}
75
76
77
78// A closure that is called by the thread checkpoint code.
79class SampleCheckpoint : public Closure {
80 public:
81 explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
82 profiler_(profiler) {}
83
84 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
85 Thread* self = Thread::Current();
86 if (thread == nullptr) {
87 LOG(ERROR) << "Checkpoint with nullptr thread";
88 return;
89 }
90
91 // Grab the mutator lock (shared access).
92 ScopedObjectAccess soa(self);
93
94 // Grab a sample.
95 GetSample(thread, this->profiler_);
96
97 // And finally tell the barrier that we're done.
98 this->profiler_->GetBarrier().Pass(self);
99 }
100
101 private:
102 BackgroundMethodSamplingProfiler* const profiler_;
103};
104
105bool BackgroundMethodSamplingProfiler::ShuttingDown(Thread* self) {
106 MutexLock mu(self, *Locks::profiler_lock_);
107 return shutting_down_;
108}
109
110void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
111 Runtime* runtime = Runtime::Current();
112 BackgroundMethodSamplingProfiler* profiler =
113 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
114
115 // Add a random delay for the first time run so that we don't hammer the CPU
116 // with all profiles running at the same time.
117 const int kRandomDelayMaxSecs = 30;
118 const double kMaxBackoffSecs = 24*60*60; // Max backoff time.
119
120 srand(MicroTime() * getpid());
121 int startup_delay = rand() % kRandomDelayMaxSecs; // random delay for startup.
122
123
124 CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
125 !runtime->IsCompiler()));
126
127 Thread* self = Thread::Current();
128
129 while (true) {
130 if (ShuttingDown(self)) {
131 break;
132 }
133
134 {
135 // wait until we need to run another profile
136 uint64_t delay_secs = profiler->period_s_ * profiler->backoff_factor_;
137
138 // Add a startup delay to prevent all the profiles running at once.
139 delay_secs += startup_delay;
140
141 // Immediate startup for benchmarking?
142 if (profiler->start_immediately_ && startup_delay > 0) {
143 delay_secs = 0;
144 }
145
146 startup_delay = 0;
147
148 LOG(DEBUG) << "Delaying profile start for " << delay_secs << " secs";
149 MutexLock mu(self, profiler->wait_lock_);
150 profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
151
152 // Expand the backoff by its coefficient, but don't go beyond the max.
153 double new_backoff = profiler->backoff_factor_ * profiler->backoff_coefficient_;
154 if (new_backoff < kMaxBackoffSecs) {
155 profiler->backoff_factor_ = new_backoff;
156 }
157 }
158
159 if (ShuttingDown(self)) {
160 break;
161 }
162
163
164 uint64_t start_us = MicroTime();
165 uint64_t end_us = start_us + profiler->duration_s_ * 1000000LL;
166 uint64_t now_us = start_us;
167
168 LOG(DEBUG) << "Starting profiling run now for " << PrettyDuration((end_us - start_us) * 1000);
169
170
171 SampleCheckpoint check_point(profiler);
172
173 while (now_us < end_us) {
174 if (ShuttingDown(self)) {
175 break;
176 }
177
178 usleep(profiler->interval_us_); // Non-interruptible sleep.
179
180 ThreadList* thread_list = runtime->GetThreadList();
181
182 profiler->profiler_barrier_->Init(self, 0);
183 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
184
185 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
186
187 // Wait for the barrier to be crossed by all runnable threads. This wait
188 // is done with a timeout so that we can detect problems with the checkpoint
189 // running code. We should never see this.
190 const uint32_t kWaitTimeoutMs = 10000;
191 const uint32_t kWaitTimeoutUs = kWaitTimeoutMs * 1000;
192
193 uint64_t waitstart_us = MicroTime();
194 // Wait for all threads to pass the barrier.
195 profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
196 uint64_t waitend_us = MicroTime();
197 uint64_t waitdiff_us = waitend_us - waitstart_us;
198
199 // We should never get a timeout. If we do, it suggests a problem with the checkpoint
200 // code. Crash the process in this case.
201 CHECK_LT(waitdiff_us, kWaitTimeoutUs);
202
203 self->SetState(old_state);
204
205 // Update the current time.
206 now_us = MicroTime();
207 }
208
209 if (!ShuttingDown(self)) {
210 // After the profile has been taken, write it out.
211 ScopedObjectAccess soa(self); // Acquire the mutator lock.
212 uint32_t size = profiler->WriteProfile();
213 LOG(DEBUG) << "Profile size: " << size;
214 }
215 }
216
217 LOG(INFO) << "Profiler shutdown";
218 runtime->DetachCurrentThread();
219 return nullptr;
220}
221
222// Write out the profile file if we are generating a profile.
223uint32_t BackgroundMethodSamplingProfiler::WriteProfile() {
224 UniquePtr<File> profile_file;
225 Runtime* runtime = Runtime::Current();
226 std::string classpath = runtime->GetClassPathString();
227 size_t colon = classpath.find(':');
228 if (colon != std::string::npos) {
229 // More than one file in the classpath. Possible?
230 classpath = classpath.substr(0, colon);
231 }
232
233 std::replace(classpath.begin(), classpath.end(), '/', '@');
234 std::string full_name = profile_file_name_;
235 if (classpath != "") {
236 full_name = StringPrintf("%s-%s", profile_file_name_.c_str(), classpath.c_str());
237 }
238 LOG(DEBUG) << "Saving profile to " << full_name;
239
240 profile_file.reset(OS::CreateEmptyFile(full_name.c_str()));
241 if (profile_file.get() == nullptr) {
242 // Failed to open the profile file, ignore.
243 LOG(INFO) << "Failed to op file";
244 return 0;
245 }
246 std::ostringstream os;
247 uint32_t num_methods = DumpProfile(os);
248 std::string data(os.str());
249 profile_file->WriteFully(data.c_str(), data.length());
250 profile_file->Close();
251 return num_methods;
252}
253
254// Start a profile thread with the user-supplied arguments.
255void BackgroundMethodSamplingProfiler::Start(int period, int duration,
256 std::string profile_file_name, int interval_us,
257 double backoff_coefficient, bool startImmediately) {
258 Thread* self = Thread::Current();
259 {
260 MutexLock mu(self, *Locks::profiler_lock_);
261 // Don't start two profiler threads.
262 if (profiler_ != nullptr) {
263 return;
264 }
265 }
266
Dave Allison4a7867b2014-01-30 17:44:12 -0800267 // Only on target...
268#ifdef HAVE_ANDROID_OS
269 // Switch off profiler if the dalvik.vm.profiler property has value 0.
270 char buf[PROP_VALUE_MAX];
271 property_get("dalvik.vm.profiler", buf, "0");
272 if (strcmp(buf, "0") == 0) {
273 LOG(INFO) << "Profiler disabled. To enable setprop dalvik.vm.profiler 1";
274 return;
275 }
276#endif
277
Dave Allison0aded082013-11-07 13:15:11 -0800278 LOG(INFO) << "Starting profile with period " << period << "s, duration " << duration <<
279 "s, interval " << interval_us << "us. Profile file " << profile_file_name;
280
281 {
282 MutexLock mu(self, *Locks::profiler_lock_);
283 profiler_ = new BackgroundMethodSamplingProfiler(period, duration, profile_file_name,
284 backoff_coefficient,
285 interval_us, startImmediately);
286
287 CHECK_PTHREAD_CALL(pthread_create, (&profiler_pthread_, nullptr, &RunProfilerThread,
288 reinterpret_cast<void*>(profiler_)),
289 "Profiler thread");
290 }
291}
292
293
294
295void BackgroundMethodSamplingProfiler::Stop() {
296 BackgroundMethodSamplingProfiler* profiler = nullptr;
297 pthread_t profiler_pthread = 0U;
298 {
299 MutexLock trace_mu(Thread::Current(), *Locks::profiler_lock_);
300 profiler = profiler_;
301 shutting_down_ = true;
302 profiler_pthread = profiler_pthread_;
303 }
304
305 // Now wake up the sampler thread if it sleeping.
306 {
307 MutexLock profile_mu(Thread::Current(), profiler->wait_lock_);
308 profiler->period_condition_.Signal(Thread::Current());
309 }
310 // Wait for the sample thread to stop.
311 CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profiler thread shutdown");
312
313 {
314 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
315 profiler_ = nullptr;
316 }
317 delete profiler;
318}
319
320
321void BackgroundMethodSamplingProfiler::Shutdown() {
322 Stop();
323}
324
325BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(int period, int duration,
326 std::string profile_file_name,
327 double backoff_coefficient, int interval_us, bool startImmediately)
328 : profile_file_name_(profile_file_name),
329 period_s_(period), start_immediately_(startImmediately),
330 interval_us_(interval_us), backoff_factor_(1.0),
331 backoff_coefficient_(backoff_coefficient), duration_s_(duration),
332 wait_lock_("Profile wait lock"),
333 period_condition_("Profile condition", wait_lock_),
334 profile_table_(wait_lock_),
335 profiler_barrier_(new Barrier(0)) {
336 // Populate the filtered_methods set.
337 // This is empty right now, but to add a method, do this:
338 //
339 // filtered_methods_.insert("void java.lang.Object.wait(long, int)");
340}
341
342// A method has been hit, record its invocation in the method map.
343// The mutator_lock must be held (shared) when this is called.
344void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method) {
345 if (method == nullptr) {
346 profile_table_.NullMethod();
347 // Don't record a nullptr method.
348 return;
349 }
350
351 mirror::Class* cls = method->GetDeclaringClass();
352 if (cls != nullptr) {
353 if (cls->GetClassLoader() == nullptr) {
354 // Don't include things in the boot
355 profile_table_.BootMethod();
356 return;
357 }
358 }
359
360 bool is_filtered = false;
361
362 MethodHelper mh(method);
363 if (strcmp(mh.GetName(), "<clinit>") == 0) {
364 // always filter out class init
365 is_filtered = true;
366 }
367
368 // Filter out methods by name if there are any.
369 if (!is_filtered && filtered_methods_.size() > 0) {
370 std::string method_full_name = PrettyMethod(method);
371
372 // Don't include specific filtered methods.
373 is_filtered = filtered_methods_.count(method_full_name) != 0;
374 }
375
376 // Add to the profile table unless it is filtered out.
377 if (!is_filtered) {
378 profile_table_.Put(method);
379 }
380}
381
382// Clean out any recordings for the method traces.
383void BackgroundMethodSamplingProfiler::CleanProfile() {
384 profile_table_.Clear();
385}
386
387uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
388 return profile_table_.Write(os);
389}
390
391// Profile Table.
392// This holds a mapping of mirror::ArtMethod* to a count of how many times a sample
393// hit it at the top of the stack.
394ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0),
395 num_null_methods_(0),
396 num_boot_methods_(0) {
397 for (int i = 0; i < kHashSize; i++) {
398 table[i] = nullptr;
399 }
400}
401
402ProfileSampleResults::~ProfileSampleResults() {
403 for (int i = 0; i < kHashSize; i++) {
404 delete table[i];
405 }
406}
407
408// Add a method to the profile table. If it the first time the method
409// has been seen, add it with count=1, otherwise increment the count.
410void ProfileSampleResults::Put(mirror::ArtMethod* method) {
411 lock_.Lock(Thread::Current());
412 uint32_t index = Hash(method);
413 if (table[index] == nullptr) {
414 table[index] = new Map();
415 }
416 Map::iterator i = table[index]->find(method);
417 if (i == table[index]->end()) {
418 (*table[index])[method] = 1;
419 } else {
420 i->second++;
421 }
422 num_samples_++;
423 lock_.Unlock(Thread::Current());
424}
425
426// Write the profile table to the output stream.
427uint32_t ProfileSampleResults::Write(std::ostream &os) {
428 ScopedObjectAccess soa(Thread::Current());
429 LOG(DEBUG) << "Profile: " << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_;
430 os << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_ << "\n";
431 uint32_t num_methods = 0;
432 for (int i = 0 ; i < kHashSize; i++) {
433 Map *map = table[i];
434 if (map != nullptr) {
435 for (const auto &meth_iter : *map) {
436 mirror::ArtMethod *method = meth_iter.first;
437 std::string method_name = PrettyMethod(method);
438 uint32_t method_size = method->GetCodeSize();
439 os << StringPrintf("%s/%u/%u\n", method_name.c_str(), meth_iter.second, method_size);
440 ++num_methods;
441 }
442 }
443 }
444 return num_methods;
445}
446
447void ProfileSampleResults::Clear() {
448 num_samples_ = 0;
449 num_null_methods_ = 0;
450 num_boot_methods_ = 0;
451 for (int i = 0; i < kHashSize; i++) {
452 delete table[i];
453 table[i] = nullptr;
454 }
455}
456
457uint32_t ProfileSampleResults::Hash(mirror::ArtMethod* method) {
Ian Rogersef7d42f2014-01-06 12:55:46 -0800458 return (PointerToLowMemUInt32(method) >> 3) % kHashSize;
Dave Allison0aded082013-11-07 13:15:11 -0800459}
460
461} // namespace art
462