blob: 7e8c551ab2bd4ea03b51f30237833b0f2dc89bbb [file] [log] [blame]
Dave Allison0aded082013-11-07 13:15:11 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "profiler.h"
18
Dave Allison39c3bfb2014-01-28 18:33:52 -080019#include <sys/file.h>
Ian Rogers6f3dbba2014-10-14 17:41:57 -070020#include <sys/stat.h>
21#include <sys/uio.h>
22
23#include <fstream>
Dave Allison0aded082013-11-07 13:15:11 -080024
Mathieu Chartiere401d142015-04-22 13:56:20 -070025#include "art_method-inl.h"
Dave Allison0aded082013-11-07 13:15:11 -080026#include "base/stl_util.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010027#include "base/time_utils.h"
Dave Allison0aded082013-11-07 13:15:11 -080028#include "base/unix_file/fd_file.h"
29#include "class_linker.h"
30#include "common_throws.h"
Dave Allison0aded082013-11-07 13:15:11 -080031#include "dex_file-inl.h"
32#include "instrumentation.h"
Dave Allison0aded082013-11-07 13:15:11 -080033#include "mirror/class-inl.h"
34#include "mirror/dex_cache.h"
35#include "mirror/object_array-inl.h"
36#include "mirror/object-inl.h"
Dave Allison0aded082013-11-07 13:15:11 -080037#include "os.h"
38#include "scoped_thread_state_change.h"
39#include "ScopedLocalRef.h"
40#include "thread.h"
41#include "thread_list.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010042#include "utils.h"
Dave Allison4a7867b2014-01-30 17:44:12 -080043
Dave Allison0aded082013-11-07 13:15:11 -080044#include "entrypoints/quick/quick_entrypoints.h"
Dave Allison0aded082013-11-07 13:15:11 -080045
46namespace art {
47
48BackgroundMethodSamplingProfiler* BackgroundMethodSamplingProfiler::profiler_ = nullptr;
49pthread_t BackgroundMethodSamplingProfiler::profiler_pthread_ = 0U;
50volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false;
51
Dave Allison0aded082013-11-07 13:15:11 -080052// TODO: this profiler runs regardless of the state of the machine. Maybe we should use the
53// wakelock or something to modify the run characteristics. This can be done when we
54// have some performance data after it's been used for a while.
55
Wei Jin445220d2014-06-20 15:56:53 -070056// Walk through the method within depth of max_depth_ on the Java stack
57class BoundedStackVisitor : public StackVisitor {
58 public:
Mathieu Chartiere401d142015-04-22 13:56:20 -070059 BoundedStackVisitor(std::vector<std::pair<ArtMethod*, uint32_t>>* stack,
Wei Jin445220d2014-06-20 15:56:53 -070060 Thread* thread, uint32_t max_depth)
Mathieu Chartier90443472015-07-16 20:32:27 -070061 SHARED_REQUIRES(Locks::mutator_lock_)
Nicolas Geoffray8e5bd182015-05-06 11:34:34 +010062 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
63 stack_(stack),
64 max_depth_(max_depth),
65 depth_(0) {}
Wei Jin445220d2014-06-20 15:56:53 -070066
Mathieu Chartier90443472015-07-16 20:32:27 -070067 bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiere401d142015-04-22 13:56:20 -070068 ArtMethod* m = GetMethod();
Wei Jin445220d2014-06-20 15:56:53 -070069 if (m->IsRuntimeMethod()) {
70 return true;
71 }
72 uint32_t dex_pc_ = GetDexPc();
73 stack_->push_back(std::make_pair(m, dex_pc_));
74 ++depth_;
75 if (depth_ < max_depth_) {
76 return true;
77 } else {
78 return false;
79 }
80 }
81
82 private:
Mathieu Chartiere401d142015-04-22 13:56:20 -070083 std::vector<std::pair<ArtMethod*, uint32_t>>* stack_;
Wei Jin445220d2014-06-20 15:56:53 -070084 const uint32_t max_depth_;
85 uint32_t depth_;
86};
Dave Allison0aded082013-11-07 13:15:11 -080087
88// This is called from either a thread list traversal or from a checkpoint. Regardless
89// of which caller, the mutator lock must be held.
Mathieu Chartier90443472015-07-16 20:32:27 -070090static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
Dave Allison0aded082013-11-07 13:15:11 -080091 BackgroundMethodSamplingProfiler* profiler =
92 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
Wei Jin445220d2014-06-20 15:56:53 -070093 const ProfilerOptions profile_options = profiler->GetProfilerOptions();
94 switch (profile_options.GetProfileType()) {
95 case kProfilerMethod: {
Mathieu Chartiere401d142015-04-22 13:56:20 -070096 ArtMethod* method = thread->GetCurrentMethod(nullptr);
Ian Rogerscf7f1912014-10-22 22:06:39 -070097 if ((false) && method == nullptr) {
Wei Jin445220d2014-06-20 15:56:53 -070098 LOG(INFO) << "No current method available";
99 std::ostringstream os;
100 thread->Dump(os);
101 std::string data(os.str());
102 LOG(INFO) << data;
103 }
104 profiler->RecordMethod(method);
105 break;
106 }
107 case kProfilerBoundedStack: {
108 std::vector<InstructionLocation> stack;
109 uint32_t max_depth = profile_options.GetMaxStackDepth();
110 BoundedStackVisitor bounded_stack_visitor(&stack, thread, max_depth);
111 bounded_stack_visitor.WalkStack();
112 profiler->RecordStack(stack);
113 break;
114 }
115 default:
116 LOG(INFO) << "This profile type is not implemented.";
Dave Allison0aded082013-11-07 13:15:11 -0800117 }
Dave Allison0aded082013-11-07 13:15:11 -0800118}
119
Dave Allison0aded082013-11-07 13:15:11 -0800120// A closure that is called by the thread checkpoint code.
Ian Rogers7b078e82014-09-10 14:44:24 -0700121class SampleCheckpoint FINAL : public Closure {
Dave Allison0aded082013-11-07 13:15:11 -0800122 public:
123 explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
124 profiler_(profiler) {}
125
Ian Rogers7b078e82014-09-10 14:44:24 -0700126 void Run(Thread* thread) OVERRIDE {
Dave Allison0aded082013-11-07 13:15:11 -0800127 Thread* self = Thread::Current();
128 if (thread == nullptr) {
129 LOG(ERROR) << "Checkpoint with nullptr thread";
130 return;
131 }
132
133 // Grab the mutator lock (shared access).
134 ScopedObjectAccess soa(self);
135
136 // Grab a sample.
137 GetSample(thread, this->profiler_);
138
139 // And finally tell the barrier that we're done.
140 this->profiler_->GetBarrier().Pass(self);
141 }
142
143 private:
144 BackgroundMethodSamplingProfiler* const profiler_;
145};
146
147bool BackgroundMethodSamplingProfiler::ShuttingDown(Thread* self) {
148 MutexLock mu(self, *Locks::profiler_lock_);
149 return shutting_down_;
150}
151
152void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
153 Runtime* runtime = Runtime::Current();
154 BackgroundMethodSamplingProfiler* profiler =
155 reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
156
157 // Add a random delay for the first time run so that we don't hammer the CPU
158 // with all profiles running at the same time.
159 const int kRandomDelayMaxSecs = 30;
160 const double kMaxBackoffSecs = 24*60*60; // Max backoff time.
161
162 srand(MicroTime() * getpid());
163 int startup_delay = rand() % kRandomDelayMaxSecs; // random delay for startup.
164
165
166 CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800167 !runtime->IsAotCompiler()));
Dave Allison0aded082013-11-07 13:15:11 -0800168
169 Thread* self = Thread::Current();
170
Calin Juravlec1b643c2014-05-30 23:44:11 +0100171 double backoff = 1.0;
Dave Allison0aded082013-11-07 13:15:11 -0800172 while (true) {
173 if (ShuttingDown(self)) {
174 break;
175 }
176
177 {
178 // wait until we need to run another profile
Calin Juravlec1b643c2014-05-30 23:44:11 +0100179 uint64_t delay_secs = profiler->options_.GetPeriodS() * backoff;
Dave Allison0aded082013-11-07 13:15:11 -0800180
181 // Add a startup delay to prevent all the profiles running at once.
182 delay_secs += startup_delay;
183
184 // Immediate startup for benchmarking?
Calin Juravlec1b643c2014-05-30 23:44:11 +0100185 if (profiler->options_.GetStartImmediately() && startup_delay > 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800186 delay_secs = 0;
187 }
188
189 startup_delay = 0;
190
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700191 VLOG(profiler) << "Delaying profile start for " << delay_secs << " secs";
Dave Allison0aded082013-11-07 13:15:11 -0800192 MutexLock mu(self, profiler->wait_lock_);
193 profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
Ian Rogers7b078e82014-09-10 14:44:24 -0700194 // We were either signaled by Stop or timedout, in either case ignore the timed out result.
Dave Allison0aded082013-11-07 13:15:11 -0800195
196 // Expand the backoff by its coefficient, but don't go beyond the max.
Calin Juravlec1b643c2014-05-30 23:44:11 +0100197 backoff = std::min(backoff * profiler->options_.GetBackoffCoefficient(), kMaxBackoffSecs);
Dave Allison0aded082013-11-07 13:15:11 -0800198 }
199
200 if (ShuttingDown(self)) {
201 break;
202 }
203
204
205 uint64_t start_us = MicroTime();
Calin Juravlec1b643c2014-05-30 23:44:11 +0100206 uint64_t end_us = start_us + profiler->options_.GetDurationS() * UINT64_C(1000000);
Dave Allison0aded082013-11-07 13:15:11 -0800207 uint64_t now_us = start_us;
208
Calin Juravlec1b643c2014-05-30 23:44:11 +0100209 VLOG(profiler) << "Starting profiling run now for "
210 << PrettyDuration((end_us - start_us) * 1000);
Dave Allison0aded082013-11-07 13:15:11 -0800211
212 SampleCheckpoint check_point(profiler);
213
Dave Allison39c3bfb2014-01-28 18:33:52 -0800214 size_t valid_samples = 0;
Dave Allison0aded082013-11-07 13:15:11 -0800215 while (now_us < end_us) {
216 if (ShuttingDown(self)) {
217 break;
218 }
219
Calin Juravlec1b643c2014-05-30 23:44:11 +0100220 usleep(profiler->options_.GetIntervalUs()); // Non-interruptible sleep.
Dave Allison0aded082013-11-07 13:15:11 -0800221
222 ThreadList* thread_list = runtime->GetThreadList();
223
224 profiler->profiler_barrier_->Init(self, 0);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800225 size_t barrier_count = thread_list->RunCheckpointOnRunnableThreads(&check_point);
226
227 // All threads are suspended, nothing to do.
228 if (barrier_count == 0) {
229 now_us = MicroTime();
230 continue;
231 }
232
233 valid_samples += barrier_count;
Dave Allison0aded082013-11-07 13:15:11 -0800234
Wei Jin6a586912014-05-21 16:07:40 -0700235 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
Dave Allison0aded082013-11-07 13:15:11 -0800236
237 // Wait for the barrier to be crossed by all runnable threads. This wait
238 // is done with a timeout so that we can detect problems with the checkpoint
239 // running code. We should never see this.
240 const uint32_t kWaitTimeoutMs = 10000;
Dave Allison0aded082013-11-07 13:15:11 -0800241
Dave Allison0aded082013-11-07 13:15:11 -0800242 // Wait for all threads to pass the barrier.
Ian Rogers7b078e82014-09-10 14:44:24 -0700243 bool timed_out = profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
Dave Allison0aded082013-11-07 13:15:11 -0800244
245 // We should never get a timeout. If we do, it suggests a problem with the checkpoint
246 // code. Crash the process in this case.
Ian Rogers7b078e82014-09-10 14:44:24 -0700247 CHECK(!timed_out);
Dave Allison0aded082013-11-07 13:15:11 -0800248
Dave Allison0aded082013-11-07 13:15:11 -0800249 // Update the current time.
250 now_us = MicroTime();
251 }
252
Wei Jin6a586912014-05-21 16:07:40 -0700253 if (valid_samples > 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800254 // After the profile has been taken, write it out.
255 ScopedObjectAccess soa(self); // Acquire the mutator lock.
256 uint32_t size = profiler->WriteProfile();
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700257 VLOG(profiler) << "Profile size: " << size;
Dave Allison0aded082013-11-07 13:15:11 -0800258 }
259 }
260
261 LOG(INFO) << "Profiler shutdown";
262 runtime->DetachCurrentThread();
263 return nullptr;
264}
265
266// Write out the profile file if we are generating a profile.
267uint32_t BackgroundMethodSamplingProfiler::WriteProfile() {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100268 std::string full_name = output_filename_;
Brian Carlstrom4d466a82014-05-08 19:05:29 -0700269 VLOG(profiler) << "Saving profile to " << full_name;
Dave Allison0aded082013-11-07 13:15:11 -0800270
Dave Allison39c3bfb2014-01-28 18:33:52 -0800271 int fd = open(full_name.c_str(), O_RDWR);
272 if (fd < 0) {
273 // Open failed.
274 LOG(ERROR) << "Failed to open profile file " << full_name;
Dave Allison0aded082013-11-07 13:15:11 -0800275 return 0;
276 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800277
278 // Lock the file for exclusive access. This will block if another process is using
279 // the file.
280 int err = flock(fd, LOCK_EX);
281 if (err < 0) {
282 LOG(ERROR) << "Failed to lock profile file " << full_name;
283 return 0;
284 }
285
286 // Read the previous profile.
Wei Jina93b0bb2014-06-09 16:19:15 -0700287 profile_table_.ReadPrevious(fd, options_.GetProfileType());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800288
289 // Move back to the start of the file.
290 lseek(fd, 0, SEEK_SET);
291
292 // Format the profile output and write to the file.
Dave Allison0aded082013-11-07 13:15:11 -0800293 std::ostringstream os;
294 uint32_t num_methods = DumpProfile(os);
295 std::string data(os.str());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800296 const char *p = data.c_str();
297 size_t length = data.length();
298 size_t full_length = length;
299 do {
300 int n = ::write(fd, p, length);
301 p += n;
302 length -= n;
303 } while (length > 0);
304
305 // Truncate the file to the new length.
Elliott Hughes06f08e42015-05-12 21:25:36 -0700306 if (ftruncate(fd, full_length) == -1) {
307 LOG(ERROR) << "Failed to truncate profile file " << full_name;
308 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800309
310 // Now unlock the file, allowing another process in.
311 err = flock(fd, LOCK_UN);
312 if (err < 0) {
313 LOG(ERROR) << "Failed to unlock profile file " << full_name;
314 }
315
316 // Done, close the file.
317 ::close(fd);
318
319 // Clean the profile for the next time.
320 CleanProfile();
321
Dave Allison0aded082013-11-07 13:15:11 -0800322 return num_methods;
323}
324
Calin Juravlec1b643c2014-05-30 23:44:11 +0100325bool BackgroundMethodSamplingProfiler::Start(
326 const std::string& output_filename, const ProfilerOptions& options) {
327 if (!options.IsEnabled()) {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100328 return false;
329 }
330
331 CHECK(!output_filename.empty());
332
Dave Allison0aded082013-11-07 13:15:11 -0800333 Thread* self = Thread::Current();
334 {
335 MutexLock mu(self, *Locks::profiler_lock_);
336 // Don't start two profiler threads.
337 if (profiler_ != nullptr) {
Calin Juravlec1b643c2014-05-30 23:44:11 +0100338 return true;
Dave Allison0aded082013-11-07 13:15:11 -0800339 }
340 }
341
Calin Juravlec1b643c2014-05-30 23:44:11 +0100342 LOG(INFO) << "Starting profiler using output file: " << output_filename
343 << " and options: " << options;
Dave Allison0aded082013-11-07 13:15:11 -0800344 {
345 MutexLock mu(self, *Locks::profiler_lock_);
Calin Juravlec1b643c2014-05-30 23:44:11 +0100346 profiler_ = new BackgroundMethodSamplingProfiler(output_filename, options);
Dave Allison0aded082013-11-07 13:15:11 -0800347
348 CHECK_PTHREAD_CALL(pthread_create, (&profiler_pthread_, nullptr, &RunProfilerThread,
349 reinterpret_cast<void*>(profiler_)),
350 "Profiler thread");
351 }
Calin Juravlec1b643c2014-05-30 23:44:11 +0100352 return true;
Dave Allison0aded082013-11-07 13:15:11 -0800353}
354
355
356
357void BackgroundMethodSamplingProfiler::Stop() {
358 BackgroundMethodSamplingProfiler* profiler = nullptr;
359 pthread_t profiler_pthread = 0U;
360 {
361 MutexLock trace_mu(Thread::Current(), *Locks::profiler_lock_);
Wei Jin6a586912014-05-21 16:07:40 -0700362 CHECK(!shutting_down_);
Dave Allison0aded082013-11-07 13:15:11 -0800363 profiler = profiler_;
364 shutting_down_ = true;
365 profiler_pthread = profiler_pthread_;
366 }
367
368 // Now wake up the sampler thread if it sleeping.
369 {
370 MutexLock profile_mu(Thread::Current(), profiler->wait_lock_);
371 profiler->period_condition_.Signal(Thread::Current());
372 }
373 // Wait for the sample thread to stop.
374 CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profiler thread shutdown");
375
376 {
377 MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
378 profiler_ = nullptr;
379 }
380 delete profiler;
381}
382
383
384void BackgroundMethodSamplingProfiler::Shutdown() {
385 Stop();
386}
387
Calin Juravlec1b643c2014-05-30 23:44:11 +0100388BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(
389 const std::string& output_filename, const ProfilerOptions& options)
390 : output_filename_(output_filename),
391 options_(options),
Dave Allison0aded082013-11-07 13:15:11 -0800392 wait_lock_("Profile wait lock"),
393 period_condition_("Profile condition", wait_lock_),
394 profile_table_(wait_lock_),
395 profiler_barrier_(new Barrier(0)) {
396 // Populate the filtered_methods set.
397 // This is empty right now, but to add a method, do this:
398 //
399 // filtered_methods_.insert("void java.lang.Object.wait(long, int)");
400}
401
Wei Jin445220d2014-06-20 15:56:53 -0700402// Filter out methods the profiler doesn't want to record.
403// We require mutator lock since some statistics will be updated here.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700404bool BackgroundMethodSamplingProfiler::ProcessMethod(ArtMethod* method) {
Dave Allison0aded082013-11-07 13:15:11 -0800405 if (method == nullptr) {
406 profile_table_.NullMethod();
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700407 // Don't record a null method.
Wei Jin445220d2014-06-20 15:56:53 -0700408 return false;
Dave Allison0aded082013-11-07 13:15:11 -0800409 }
410
411 mirror::Class* cls = method->GetDeclaringClass();
412 if (cls != nullptr) {
413 if (cls->GetClassLoader() == nullptr) {
414 // Don't include things in the boot
415 profile_table_.BootMethod();
Wei Jin445220d2014-06-20 15:56:53 -0700416 return false;
Dave Allison0aded082013-11-07 13:15:11 -0800417 }
418 }
419
420 bool is_filtered = false;
421
Mathieu Chartierbfd9a432014-05-21 17:43:44 -0700422 if (strcmp(method->GetName(), "<clinit>") == 0) {
Dave Allison0aded082013-11-07 13:15:11 -0800423 // always filter out class init
424 is_filtered = true;
425 }
426
427 // Filter out methods by name if there are any.
428 if (!is_filtered && filtered_methods_.size() > 0) {
429 std::string method_full_name = PrettyMethod(method);
430
431 // Don't include specific filtered methods.
432 is_filtered = filtered_methods_.count(method_full_name) != 0;
433 }
Wei Jin445220d2014-06-20 15:56:53 -0700434 return !is_filtered;
435}
Dave Allison0aded082013-11-07 13:15:11 -0800436
Wei Jin445220d2014-06-20 15:56:53 -0700437// A method has been hit, record its invocation in the method map.
438// The mutator_lock must be held (shared) when this is called.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700439void BackgroundMethodSamplingProfiler::RecordMethod(ArtMethod* method) {
Dave Allison0aded082013-11-07 13:15:11 -0800440 // Add to the profile table unless it is filtered out.
Wei Jin445220d2014-06-20 15:56:53 -0700441 if (ProcessMethod(method)) {
442 profile_table_.Put(method);
443 }
444}
445
446// Record the current bounded stack into sampling results.
447void BackgroundMethodSamplingProfiler::RecordStack(const std::vector<InstructionLocation>& stack) {
448 if (stack.size() == 0) {
449 return;
450 }
451 // Get the method on top of the stack. We use this method to perform filtering.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700452 ArtMethod* method = stack.front().first;
Wei Jin445220d2014-06-20 15:56:53 -0700453 if (ProcessMethod(method)) {
454 profile_table_.PutStack(stack);
Dave Allison0aded082013-11-07 13:15:11 -0800455 }
456}
457
458// Clean out any recordings for the method traces.
459void BackgroundMethodSamplingProfiler::CleanProfile() {
460 profile_table_.Clear();
461}
462
463uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700464 return profile_table_.Write(os, options_.GetProfileType());
Dave Allison0aded082013-11-07 13:15:11 -0800465}
466
467// Profile Table.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700468// This holds a mapping of ArtMethod* to a count of how many times a sample
Dave Allison0aded082013-11-07 13:15:11 -0800469// hit it at the top of the stack.
Sebastien Hertzaa50d3a2015-08-25 15:25:41 +0200470ProfileSampleResults::ProfileSampleResults(Mutex& lock)
471 : lock_(lock),
472 num_samples_(0U),
473 num_null_methods_(0U),
474 num_boot_methods_(0U),
475 previous_num_samples_(0U),
476 previous_num_null_methods_(0U),
477 previous_num_boot_methods_(0U) {
Dave Allison0aded082013-11-07 13:15:11 -0800478 for (int i = 0; i < kHashSize; i++) {
479 table[i] = nullptr;
480 }
Wei Jin445220d2014-06-20 15:56:53 -0700481 method_context_table = nullptr;
482 stack_trie_root_ = nullptr;
Dave Allison0aded082013-11-07 13:15:11 -0800483}
484
485ProfileSampleResults::~ProfileSampleResults() {
Wei Jina93b0bb2014-06-09 16:19:15 -0700486 Clear();
Dave Allison0aded082013-11-07 13:15:11 -0800487}
488
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100489// Add a method to the profile table. If it's the first time the method
Dave Allison0aded082013-11-07 13:15:11 -0800490// has been seen, add it with count=1, otherwise increment the count.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700491void ProfileSampleResults::Put(ArtMethod* method) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700492 MutexLock mu(Thread::Current(), lock_);
Dave Allison0aded082013-11-07 13:15:11 -0800493 uint32_t index = Hash(method);
494 if (table[index] == nullptr) {
495 table[index] = new Map();
496 }
497 Map::iterator i = table[index]->find(method);
498 if (i == table[index]->end()) {
499 (*table[index])[method] = 1;
500 } else {
501 i->second++;
502 }
503 num_samples_++;
Wei Jina93b0bb2014-06-09 16:19:15 -0700504}
505
Wei Jin445220d2014-06-20 15:56:53 -0700506// Add a bounded stack to the profile table. Only the count of the method on
507// top of the frame will be increased.
508void ProfileSampleResults::PutStack(const std::vector<InstructionLocation>& stack) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700509 MutexLock mu(Thread::Current(), lock_);
Wei Jin445220d2014-06-20 15:56:53 -0700510 ScopedObjectAccess soa(Thread::Current());
511 if (stack_trie_root_ == nullptr) {
512 // The root of the stack trie is a dummy node so that we don't have to maintain
513 // a collection of tries.
514 stack_trie_root_ = new StackTrieNode();
Wei Jina93b0bb2014-06-09 16:19:15 -0700515 }
Wei Jin445220d2014-06-20 15:56:53 -0700516
517 StackTrieNode* current = stack_trie_root_;
518 if (stack.size() == 0) {
519 current->IncreaseCount();
520 return;
521 }
522
523 for (std::vector<InstructionLocation>::const_reverse_iterator iter = stack.rbegin();
524 iter != stack.rend(); ++iter) {
525 InstructionLocation inst_loc = *iter;
Mathieu Chartiere401d142015-04-22 13:56:20 -0700526 ArtMethod* method = inst_loc.first;
Wei Jin445220d2014-06-20 15:56:53 -0700527 if (method == nullptr) {
528 // skip null method
529 continue;
530 }
531 uint32_t dex_pc = inst_loc.second;
532 uint32_t method_idx = method->GetDexMethodIndex();
533 const DexFile* dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
534 MethodReference method_ref(dex_file, method_idx);
535 StackTrieNode* child = current->FindChild(method_ref, dex_pc);
536 if (child != nullptr) {
537 current = child;
Wei Jina93b0bb2014-06-09 16:19:15 -0700538 } else {
Wei Jin445220d2014-06-20 15:56:53 -0700539 uint32_t method_size = 0;
540 const DexFile::CodeItem* codeitem = method->GetCodeItem();
541 if (codeitem != nullptr) {
542 method_size = codeitem->insns_size_in_code_units_;
543 }
544 StackTrieNode* new_node = new StackTrieNode(method_ref, dex_pc, method_size, current);
545 current->AppendChild(new_node);
546 current = new_node;
Wei Jina93b0bb2014-06-09 16:19:15 -0700547 }
548 }
Wei Jin445220d2014-06-20 15:56:53 -0700549
550 if (current != stack_trie_root_ && current->GetCount() == 0) {
551 // Insert into method_context table;
552 if (method_context_table == nullptr) {
553 method_context_table = new MethodContextMap();
554 }
555 MethodReference method = current->GetMethod();
556 MethodContextMap::iterator i = method_context_table->find(method);
557 if (i == method_context_table->end()) {
558 TrieNodeSet* node_set = new TrieNodeSet();
559 node_set->insert(current);
560 (*method_context_table)[method] = node_set;
561 } else {
562 TrieNodeSet* node_set = i->second;
563 node_set->insert(current);
564 }
565 }
566 current->IncreaseCount();
Wei Jina93b0bb2014-06-09 16:19:15 -0700567 num_samples_++;
Dave Allison0aded082013-11-07 13:15:11 -0800568}
569
Dave Allison39c3bfb2014-01-28 18:33:52 -0800570// Write the profile table to the output stream. Also merge with the previous profile.
Wei Jina93b0bb2014-06-09 16:19:15 -0700571uint32_t ProfileSampleResults::Write(std::ostream& os, ProfileDataType type) {
Dave Allison0aded082013-11-07 13:15:11 -0800572 ScopedObjectAccess soa(Thread::Current());
Dave Allison39c3bfb2014-01-28 18:33:52 -0800573 num_samples_ += previous_num_samples_;
574 num_null_methods_ += previous_num_null_methods_;
575 num_boot_methods_ += previous_num_boot_methods_;
576
Calin Juravlec1b643c2014-05-30 23:44:11 +0100577 VLOG(profiler) << "Profile: "
578 << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_;
Dave Allison0aded082013-11-07 13:15:11 -0800579 os << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_ << "\n";
580 uint32_t num_methods = 0;
Wei Jina93b0bb2014-06-09 16:19:15 -0700581 if (type == kProfilerMethod) {
582 for (int i = 0 ; i < kHashSize; i++) {
583 Map *map = table[i];
584 if (map != nullptr) {
585 for (const auto &meth_iter : *map) {
Mathieu Chartiere401d142015-04-22 13:56:20 -0700586 ArtMethod *method = meth_iter.first;
Wei Jina93b0bb2014-06-09 16:19:15 -0700587 std::string method_name = PrettyMethod(method);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800588
Wei Jina93b0bb2014-06-09 16:19:15 -0700589 const DexFile::CodeItem* codeitem = method->GetCodeItem();
590 uint32_t method_size = 0;
591 if (codeitem != nullptr) {
592 method_size = codeitem->insns_size_in_code_units_;
593 }
594 uint32_t count = meth_iter.second;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800595
Wei Jina93b0bb2014-06-09 16:19:15 -0700596 // Merge this profile entry with one from a previous run (if present). Also
597 // remove the previous entry.
598 PreviousProfile::iterator pi = previous_.find(method_name);
599 if (pi != previous_.end()) {
600 count += pi->second.count_;
601 previous_.erase(pi);
602 }
603 os << StringPrintf("%s/%u/%u\n", method_name.c_str(), count, method_size);
604 ++num_methods;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800605 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700606 }
607 }
Wei Jin445220d2014-06-20 15:56:53 -0700608 } else if (type == kProfilerBoundedStack) {
609 if (method_context_table != nullptr) {
610 for (const auto &method_iter : *method_context_table) {
611 MethodReference method = method_iter.first;
612 TrieNodeSet* node_set = method_iter.second;
613 std::string method_name = PrettyMethod(method.dex_method_index, *(method.dex_file));
614 uint32_t method_size = 0;
615 uint32_t total_count = 0;
616 PreviousContextMap new_context_map;
617 for (const auto &trie_node_i : *node_set) {
618 StackTrieNode* node = trie_node_i;
619 method_size = node->GetMethodSize();
620 uint32_t count = node->GetCount();
621 uint32_t dexpc = node->GetDexPC();
622 total_count += count;
Wei Jina93b0bb2014-06-09 16:19:15 -0700623
Wei Jin445220d2014-06-20 15:56:53 -0700624 StackTrieNode* current = node->GetParent();
625 // We go backward on the trie to retrieve context and dex_pc until the dummy root.
626 // The format of the context is "method_1@pc_1@method_2@pc_2@..."
627 std::vector<std::string> context_vector;
628 while (current != nullptr && current->GetParent() != nullptr) {
629 context_vector.push_back(StringPrintf("%s@%u",
630 PrettyMethod(current->GetMethod().dex_method_index, *(current->GetMethod().dex_file)).c_str(),
631 current->GetDexPC()));
632 current = current->GetParent();
Wei Jina93b0bb2014-06-09 16:19:15 -0700633 }
Wei Jin445220d2014-06-20 15:56:53 -0700634 std::string context_sig = Join(context_vector, '@');
635 new_context_map[std::make_pair(dexpc, context_sig)] = count;
636 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700637
Wei Jin445220d2014-06-20 15:56:53 -0700638 PreviousProfile::iterator pi = previous_.find(method_name);
639 if (pi != previous_.end()) {
640 total_count += pi->second.count_;
641 PreviousContextMap* previous_context_map = pi->second.context_map_;
642 if (previous_context_map != nullptr) {
643 for (const auto &context_i : *previous_context_map) {
644 uint32_t count = context_i.second;
645 PreviousContextMap::iterator ci = new_context_map.find(context_i.first);
646 if (ci == new_context_map.end()) {
647 new_context_map[context_i.first] = count;
648 } else {
649 ci->second += count;
Wei Jina93b0bb2014-06-09 16:19:15 -0700650 }
651 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700652 }
Wei Jin445220d2014-06-20 15:56:53 -0700653 delete previous_context_map;
654 previous_.erase(pi);
Wei Jina93b0bb2014-06-09 16:19:15 -0700655 }
Wei Jin445220d2014-06-20 15:56:53 -0700656 // We write out profile data with dex pc and context information in the following format:
657 // "method/total_count/size/[pc_1:count_1:context_1#pc_2:count_2:context_2#...]".
658 std::vector<std::string> context_count_vector;
659 for (const auto &context_i : new_context_map) {
660 context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
661 context_i.second, context_i.first.second.c_str()));
662 }
663 os << StringPrintf("%s/%u/%u/[%s]\n", method_name.c_str(), total_count,
664 method_size, Join(context_count_vector, '#').c_str());
665 ++num_methods;
Dave Allison39c3bfb2014-01-28 18:33:52 -0800666 }
Dave Allison0aded082013-11-07 13:15:11 -0800667 }
668 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800669
670 // Now we write out the remaining previous methods.
Wei Jina93b0bb2014-06-09 16:19:15 -0700671 for (const auto &pi : previous_) {
672 if (type == kProfilerMethod) {
673 os << StringPrintf("%s/%u/%u\n", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
Wei Jin445220d2014-06-20 15:56:53 -0700674 } else if (type == kProfilerBoundedStack) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700675 os << StringPrintf("%s/%u/%u/[", pi.first.c_str(), pi.second.count_, pi.second.method_size_);
Wei Jin445220d2014-06-20 15:56:53 -0700676 PreviousContextMap* previous_context_map = pi.second.context_map_;
677 if (previous_context_map != nullptr) {
678 std::vector<std::string> context_count_vector;
679 for (const auto &context_i : *previous_context_map) {
680 context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
681 context_i.second, context_i.first.second.c_str()));
Wei Jina93b0bb2014-06-09 16:19:15 -0700682 }
Wei Jin445220d2014-06-20 15:56:53 -0700683 os << Join(context_count_vector, '#');
Wei Jina93b0bb2014-06-09 16:19:15 -0700684 }
685 os << "]\n";
686 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800687 ++num_methods;
688 }
Dave Allison0aded082013-11-07 13:15:11 -0800689 return num_methods;
690}
691
692void ProfileSampleResults::Clear() {
693 num_samples_ = 0;
694 num_null_methods_ = 0;
695 num_boot_methods_ = 0;
696 for (int i = 0; i < kHashSize; i++) {
Wei Jina93b0bb2014-06-09 16:19:15 -0700697 delete table[i];
698 table[i] = nullptr;
Wei Jin445220d2014-06-20 15:56:53 -0700699 }
700 if (stack_trie_root_ != nullptr) {
701 stack_trie_root_->DeleteChildren();
702 delete stack_trie_root_;
703 stack_trie_root_ = nullptr;
704 if (method_context_table != nullptr) {
705 delete method_context_table;
706 method_context_table = nullptr;
Wei Jina93b0bb2014-06-09 16:19:15 -0700707 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700708 }
709 for (auto &pi : previous_) {
Wei Jin445220d2014-06-20 15:56:53 -0700710 if (pi.second.context_map_ != nullptr) {
711 delete pi.second.context_map_;
712 pi.second.context_map_ = nullptr;
713 }
Dave Allison0aded082013-11-07 13:15:11 -0800714 }
Dave Allison39c3bfb2014-01-28 18:33:52 -0800715 previous_.clear();
Dave Allison0aded082013-11-07 13:15:11 -0800716}
717
Mathieu Chartiere401d142015-04-22 13:56:20 -0700718uint32_t ProfileSampleResults::Hash(ArtMethod* method) {
Ian Rogersef7d42f2014-01-06 12:55:46 -0800719 return (PointerToLowMemUInt32(method) >> 3) % kHashSize;
Dave Allison0aded082013-11-07 13:15:11 -0800720}
721
Dave Allison39c3bfb2014-01-28 18:33:52 -0800722// Read a single line into the given string. Returns true if everything OK, false
723// on EOF or error.
724static bool ReadProfileLine(int fd, std::string& line) {
725 char buf[4];
726 line.clear();
727 while (true) {
728 int n = read(fd, buf, 1); // TODO: could speed this up but is it worth it?
729 if (n != 1) {
730 return false;
731 }
732 if (buf[0] == '\n') {
733 break;
734 }
735 line += buf[0];
736 }
737 return true;
738}
739
Wei Jina93b0bb2014-06-09 16:19:15 -0700740void ProfileSampleResults::ReadPrevious(int fd, ProfileDataType type) {
Dave Allison39c3bfb2014-01-28 18:33:52 -0800741 // Reset counters.
742 previous_num_samples_ = previous_num_null_methods_ = previous_num_boot_methods_ = 0;
743
744 std::string line;
745
746 // The first line contains summary information.
747 if (!ReadProfileLine(fd, line)) {
748 return;
749 }
750 std::vector<std::string> summary_info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700751 Split(line, '/', &summary_info);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800752 if (summary_info.size() != 3) {
753 // Bad summary info. It should be count/nullcount/bootcount
754 return;
755 }
Wei Jinf21f0a92014-06-27 17:44:18 -0700756 previous_num_samples_ = strtoul(summary_info[0].c_str(), nullptr, 10);
757 previous_num_null_methods_ = strtoul(summary_info[1].c_str(), nullptr, 10);
758 previous_num_boot_methods_ = strtoul(summary_info[2].c_str(), nullptr, 10);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800759
Wei Jina93b0bb2014-06-09 16:19:15 -0700760 // Now read each line until the end of file. Each line consists of 3 or 4 fields separated by /
Dave Allison39c3bfb2014-01-28 18:33:52 -0800761 while (true) {
762 if (!ReadProfileLine(fd, line)) {
763 break;
764 }
765 std::vector<std::string> info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700766 Split(line, '/', &info);
Wei Jina93b0bb2014-06-09 16:19:15 -0700767 if (info.size() != 3 && info.size() != 4) {
Dave Allison39c3bfb2014-01-28 18:33:52 -0800768 // Malformed.
769 break;
770 }
771 std::string methodname = info[0];
Wei Jinf21f0a92014-06-27 17:44:18 -0700772 uint32_t total_count = strtoul(info[1].c_str(), nullptr, 10);
773 uint32_t size = strtoul(info[2].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700774 PreviousContextMap* context_map = nullptr;
775 if (type == kProfilerBoundedStack && info.size() == 4) {
776 context_map = new PreviousContextMap();
777 std::string context_counts_str = info[3].substr(1, info[3].size() - 2);
778 std::vector<std::string> context_count_pairs;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700779 Split(context_counts_str, '#', &context_count_pairs);
Wei Jin445220d2014-06-20 15:56:53 -0700780 for (uint32_t i = 0; i < context_count_pairs.size(); ++i) {
781 std::vector<std::string> context_count;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700782 Split(context_count_pairs[i], ':', &context_count);
Wei Jin445220d2014-06-20 15:56:53 -0700783 if (context_count.size() == 2) {
784 // Handles the situtation when the profile file doesn't contain context information.
Wei Jinf21f0a92014-06-27 17:44:18 -0700785 uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
786 uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700787 (*context_map)[std::make_pair(dexpc, "")] = count;
788 } else {
789 // Handles the situtation when the profile file contains context information.
Wei Jinf21f0a92014-06-27 17:44:18 -0700790 uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
791 uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
Wei Jin445220d2014-06-20 15:56:53 -0700792 std::string context = context_count[2];
793 (*context_map)[std::make_pair(dexpc, context)] = count;
794 }
Wei Jina93b0bb2014-06-09 16:19:15 -0700795 }
796 }
Wei Jin445220d2014-06-20 15:56:53 -0700797 previous_[methodname] = PreviousValue(total_count, size, context_map);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800798 }
799}
Dave Allison0aded082013-11-07 13:15:11 -0800800
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100801bool ProfileFile::LoadFile(const std::string& fileName) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300802 LOG(VERBOSE) << "reading profile file " << fileName;
803 struct stat st;
804 int err = stat(fileName.c_str(), &st);
805 if (err == -1) {
806 LOG(VERBOSE) << "not found";
807 return false;
808 }
809 if (st.st_size == 0) {
Dave Allison644789f2014-04-10 13:06:10 -0700810 return false; // Empty profiles are invalid.
Calin Juravle9dae5b42014-04-07 16:36:21 +0300811 }
812 std::ifstream in(fileName.c_str());
813 if (!in) {
814 LOG(VERBOSE) << "profile file " << fileName << " exists but can't be opened";
815 LOG(VERBOSE) << "file owner: " << st.st_uid << ":" << st.st_gid;
816 LOG(VERBOSE) << "me: " << getuid() << ":" << getgid();
817 LOG(VERBOSE) << "file permissions: " << std::oct << st.st_mode;
818 LOG(VERBOSE) << "errno: " << errno;
819 return false;
820 }
821 // The first line contains summary information.
822 std::string line;
823 std::getline(in, line);
824 if (in.eof()) {
825 return false;
826 }
827 std::vector<std::string> summary_info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700828 Split(line, '/', &summary_info);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300829 if (summary_info.size() != 3) {
Calin Juravle19477a82014-06-06 12:24:21 +0100830 // Bad summary info. It should be total/null/boot.
Calin Juravle9dae5b42014-04-07 16:36:21 +0300831 return false;
832 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700833 // This is the number of hits in all profiled methods (without null or boot methods)
Wei Jinf21f0a92014-06-27 17:44:18 -0700834 uint32_t total_count = strtoul(summary_info[0].c_str(), nullptr, 10);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300835
836 // Now read each line until the end of file. Each line consists of 3 fields separated by '/'.
837 // Store the info in descending order given by the most used methods.
838 typedef std::set<std::pair<int, std::vector<std::string>>> ProfileSet;
839 ProfileSet countSet;
840 while (!in.eof()) {
841 std::getline(in, line);
842 if (in.eof()) {
843 break;
844 }
845 std::vector<std::string> info;
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700846 Split(line, '/', &info);
Wei Jina93b0bb2014-06-09 16:19:15 -0700847 if (info.size() != 3 && info.size() != 4) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300848 // Malformed.
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100849 return false;
Calin Juravle9dae5b42014-04-07 16:36:21 +0300850 }
851 int count = atoi(info[1].c_str());
852 countSet.insert(std::make_pair(-count, info));
853 }
854
855 uint32_t curTotalCount = 0;
856 ProfileSet::iterator end = countSet.end();
857 const ProfileData* prevData = nullptr;
858 for (ProfileSet::iterator it = countSet.begin(); it != end ; it++) {
859 const std::string& methodname = it->second[0];
860 uint32_t count = -it->first;
Wei Jinf21f0a92014-06-27 17:44:18 -0700861 uint32_t size = strtoul(it->second[2].c_str(), nullptr, 10);
Calin Juravle9dae5b42014-04-07 16:36:21 +0300862 double usedPercent = (count * 100.0) / total_count;
863
864 curTotalCount += count;
865 // Methods with the same count should be part of the same top K percentage bucket.
866 double topKPercentage = (prevData != nullptr) && (prevData->GetCount() == count)
867 ? prevData->GetTopKUsedPercentage()
868 : 100 * static_cast<double>(curTotalCount) / static_cast<double>(total_count);
869
870 // Add it to the profile map.
871 ProfileData curData = ProfileData(methodname, count, size, usedPercent, topKPercentage);
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100872 profile_map_[methodname] = curData;
Calin Juravle9dae5b42014-04-07 16:36:21 +0300873 prevData = &curData;
874 }
875 return true;
876}
877
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100878bool ProfileFile::GetProfileData(ProfileFile::ProfileData* data, const std::string& method_name) {
879 ProfileMap::iterator i = profile_map_.find(method_name);
880 if (i == profile_map_.end()) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300881 return false;
882 }
Calin Juravlebb0b53f2014-05-23 17:33:29 +0100883 *data = i->second;
884 return true;
885}
886
887bool ProfileFile::GetTopKSamples(std::set<std::string>& topKSamples, double topKPercentage) {
888 ProfileMap::iterator end = profile_map_.end();
889 for (ProfileMap::iterator it = profile_map_.begin(); it != end; it++) {
Calin Juravle9dae5b42014-04-07 16:36:21 +0300890 if (it->second.GetTopKUsedPercentage() < topKPercentage) {
891 topKSamples.insert(it->first);
892 }
893 }
894 return true;
895}
896
Wei Jin445220d2014-06-20 15:56:53 -0700897StackTrieNode* StackTrieNode::FindChild(MethodReference method, uint32_t dex_pc) {
898 if (children_.size() == 0) {
899 return nullptr;
900 }
901 // Create a dummy node for searching.
902 StackTrieNode* node = new StackTrieNode(method, dex_pc, 0, nullptr);
903 std::set<StackTrieNode*, StackTrieNodeComparator>::iterator i = children_.find(node);
904 delete node;
905 return (i == children_.end()) ? nullptr : *i;
906}
907
908void StackTrieNode::DeleteChildren() {
909 for (auto &child : children_) {
910 if (child != nullptr) {
911 child->DeleteChildren();
912 delete child;
913 }
914 }
915}
916
Calin Juravle9dae5b42014-04-07 16:36:21 +0300917} // namespace art