Elliott Hughes | 2faa5f1 | 2012-01-30 14:42:07 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_TRACE_H_ |
| 18 | #define ART_RUNTIME_TRACE_H_ |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 19 | |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 20 | #include <bitset> |
| 21 | #include <map> |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 22 | #include <memory> |
jeffhao | a9ef3fd | 2011-12-13 18:33:43 -0800 | [diff] [blame] | 23 | #include <ostream> |
| 24 | #include <set> |
| 25 | #include <string> |
Mathieu Chartier | 4d64cd4 | 2015-06-02 16:38:29 -0700 | [diff] [blame] | 26 | #include <unordered_map> |
Jeff Hao | 0abc72e | 2013-08-13 13:45:14 -0700 | [diff] [blame] | 27 | #include <vector> |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 28 | |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 29 | #include "base/atomic.h" |
David Sehr | 1979c64 | 2018-04-26 14:41:18 -0700 | [diff] [blame] | 30 | #include "base/globals.h" |
Elliott Hughes | 7616005 | 2012-12-12 16:31:20 -0800 | [diff] [blame] | 31 | #include "base/macros.h" |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 32 | #include "base/os.h" |
David Sehr | 67bf42e | 2018-02-26 16:43:04 -0800 | [diff] [blame] | 33 | #include "base/safe_map.h" |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 34 | #include "instrumentation.h" |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 35 | |
Andreas Gampe | 6ff3b37 | 2018-03-15 08:53:16 -0700 | [diff] [blame] | 36 | namespace unix_file { |
| 37 | class FdFile; |
| 38 | } // namespace unix_file |
| 39 | |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 40 | namespace art { |
| 41 | |
Mathieu Chartier | c785344 | 2015-03-27 14:35:38 -0700 | [diff] [blame] | 42 | class ArtField; |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 43 | class ArtMethod; |
Andreas Gampe | 7526d78 | 2015-06-22 22:53:45 -0700 | [diff] [blame] | 44 | class DexFile; |
Alex Light | 05f4774 | 2017-09-14 00:34:44 +0000 | [diff] [blame] | 45 | class ShadowFrame; |
jeffhao | a9ef3fd | 2011-12-13 18:33:43 -0800 | [diff] [blame] | 46 | class Thread; |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 47 | |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 48 | using DexIndexBitSet = std::bitset<65536>; |
Alex Light | a344f6a | 2016-07-20 10:43:39 -0700 | [diff] [blame] | 49 | |
| 50 | constexpr size_t kMaxThreadIdNumber = kIsTargetBuild ? 65536U : 1048576U; |
| 51 | using ThreadIDBitSet = std::bitset<kMaxThreadIdNumber>; |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 52 | |
Jeff Hao | 64caa7d | 2013-08-29 11:18:01 -0700 | [diff] [blame] | 53 | enum TracingMode { |
| 54 | kTracingInactive, |
Orion Hodson | 283ad2d | 2018-03-26 13:37:41 +0100 | [diff] [blame] | 55 | kMethodTracingActive, // Trace activity synchronous with method progress. |
| 56 | kSampleProfilingActive, // Trace activity captured by sampling thread. |
Jeff Hao | 64caa7d | 2013-08-29 11:18:01 -0700 | [diff] [blame] | 57 | }; |
Orion Hodson | 283ad2d | 2018-03-26 13:37:41 +0100 | [diff] [blame] | 58 | std::ostream& operator<<(std::ostream& os, const TracingMode& rhs); |
Jeff Hao | 64caa7d | 2013-08-29 11:18:01 -0700 | [diff] [blame] | 59 | |
Mathieu Chartier | 4d64cd4 | 2015-06-02 16:38:29 -0700 | [diff] [blame] | 60 | // File format: |
| 61 | // header |
| 62 | // record 0 |
| 63 | // record 1 |
| 64 | // ... |
| 65 | // |
| 66 | // Header format: |
| 67 | // u4 magic ('SLOW') |
| 68 | // u2 version |
| 69 | // u2 offset to data |
| 70 | // u8 start date/time in usec |
| 71 | // u2 record size in bytes (version >= 2 only) |
| 72 | // ... padding to 32 bytes |
| 73 | // |
| 74 | // Record format v1: |
| 75 | // u1 thread ID |
| 76 | // u4 method ID | method action |
| 77 | // u4 time delta since start, in usec |
| 78 | // |
| 79 | // Record format v2: |
| 80 | // u2 thread ID |
| 81 | // u4 method ID | method action |
| 82 | // u4 time delta since start, in usec |
| 83 | // |
| 84 | // Record format v3: |
| 85 | // u2 thread ID |
| 86 | // u4 method ID | method action |
| 87 | // u4 time delta since start, in usec |
| 88 | // u4 wall time since start, in usec (when clock == "dual" only) |
| 89 | // |
| 90 | // 32 bits of microseconds is 70 minutes. |
| 91 | // |
| 92 | // All values are stored in little-endian order. |
| 93 | |
| 94 | enum TraceAction { |
| 95 | kTraceMethodEnter = 0x00, // method entry |
| 96 | kTraceMethodExit = 0x01, // method exit |
| 97 | kTraceUnroll = 0x02, // method exited by exception unrolling |
| 98 | // 0x03 currently unused |
| 99 | kTraceMethodActionMask = 0x03, // two bits |
| 100 | }; |
| 101 | |
Orion Hodson | 283ad2d | 2018-03-26 13:37:41 +0100 | [diff] [blame] | 102 | // Class for recording event traces. Trace data is either collected |
| 103 | // synchronously during execution (TracingMode::kMethodTracingActive), |
| 104 | // or by a separate sampling thread (TracingMode::kSampleProfilingActive). |
Sebastien Hertz | 3f52eaf | 2014-04-04 17:50:18 +0200 | [diff] [blame] | 105 | class Trace FINAL : public instrumentation::InstrumentationListener { |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 106 | public: |
jeffhao | 0791adc | 2012-04-04 11:14:32 -0700 | [diff] [blame] | 107 | enum TraceFlag { |
| 108 | kTraceCountAllocs = 1, |
| 109 | }; |
| 110 | |
Andreas Gampe | 7e7e0f4 | 2015-03-29 15:26:23 -0700 | [diff] [blame] | 111 | enum class TraceOutputMode { |
| 112 | kFile, |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 113 | kDDMS, |
| 114 | kStreaming |
Andreas Gampe | 7e7e0f4 | 2015-03-29 15:26:23 -0700 | [diff] [blame] | 115 | }; |
| 116 | |
| 117 | enum class TraceMode { |
| 118 | kMethodTracing, |
| 119 | kSampling |
| 120 | }; |
| 121 | |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 122 | ~Trace(); |
| 123 | |
Ian Rogers | e63db27 | 2014-07-15 15:36:11 -0700 | [diff] [blame] | 124 | static void SetDefaultClockSource(TraceClockSource clock_source); |
Elliott Hughes | cfbe73d | 2012-05-22 17:37:06 -0700 | [diff] [blame] | 125 | |
Andreas Gampe | 6ff3b37 | 2018-03-15 08:53:16 -0700 | [diff] [blame] | 126 | static void Start(const char* trace_filename, |
| 127 | size_t buffer_size, |
| 128 | int flags, |
| 129 | TraceOutputMode output_mode, |
| 130 | TraceMode trace_mode, |
| 131 | int interval_us) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 132 | REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, |
| 133 | !Locks::trace_lock_); |
Andreas Gampe | 6ff3b37 | 2018-03-15 08:53:16 -0700 | [diff] [blame] | 134 | static void Start(int trace_fd, |
| 135 | size_t buffer_size, |
| 136 | int flags, |
| 137 | TraceOutputMode output_mode, |
| 138 | TraceMode trace_mode, |
| 139 | int interval_us) |
| 140 | REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, |
| 141 | !Locks::trace_lock_); |
| 142 | static void Start(std::unique_ptr<unix_file::FdFile>&& file, |
| 143 | size_t buffer_size, |
| 144 | int flags, |
| 145 | TraceOutputMode output_mode, |
| 146 | TraceMode trace_mode, |
| 147 | int interval_us) |
| 148 | REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, |
| 149 | !Locks::trace_lock_); |
| 150 | static void StartDDMS(size_t buffer_size, |
| 151 | int flags, |
| 152 | TraceMode trace_mode, |
| 153 | int interval_us) |
| 154 | REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, |
| 155 | !Locks::trace_lock_); |
| 156 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 157 | static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_); |
| 158 | static void Resume() REQUIRES(!Locks::trace_lock_); |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 159 | |
| 160 | // Stop tracing. This will finish the trace and write it to file/send it via DDMS. |
Sebastien Hertz | bae182c | 2013-12-17 10:42:03 +0100 | [diff] [blame] | 161 | static void Stop() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 162 | REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_); |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 163 | // Abort tracing. This will just stop tracing and *not* write/send the collected data. |
| 164 | static void Abort() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 165 | REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_); |
Andreas Gampe | 7526d78 | 2015-06-22 22:53:45 -0700 | [diff] [blame] | 166 | static void Shutdown() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 167 | REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_); |
| 168 | static TracingMode GetMethodTracingMode() REQUIRES(!Locks::trace_lock_); |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 169 | |
Elliott Hughes | cfbe73d | 2012-05-22 17:37:06 -0700 | [diff] [blame] | 170 | bool UseWallClock(); |
| 171 | bool UseThreadCpuClock(); |
Jeff Hao | c5d824a | 2014-07-28 18:35:38 -0700 | [diff] [blame] | 172 | void MeasureClockOverhead(); |
| 173 | uint32_t GetClockOverheadNanoSeconds(); |
Elliott Hughes | cfbe73d | 2012-05-22 17:37:06 -0700 | [diff] [blame] | 174 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 175 | void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 176 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_); |
Jeff Hao | 0abc72e | 2013-08-13 13:45:14 -0700 | [diff] [blame] | 177 | |
Sebastien Hertz | 3f52eaf | 2014-04-04 17:50:18 +0200 | [diff] [blame] | 178 | // InstrumentationListener implementation. |
Alex Light | d766158 | 2017-05-01 13:48:16 -0700 | [diff] [blame] | 179 | void MethodEntered(Thread* thread, |
| 180 | Handle<mirror::Object> this_object, |
| 181 | ArtMethod* method, |
| 182 | uint32_t dex_pc) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 183 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 184 | OVERRIDE; |
Alex Light | d766158 | 2017-05-01 13:48:16 -0700 | [diff] [blame] | 185 | void MethodExited(Thread* thread, |
| 186 | Handle<mirror::Object> this_object, |
| 187 | ArtMethod* method, |
| 188 | uint32_t dex_pc, |
Sebastien Hertz | 3f52eaf | 2014-04-04 17:50:18 +0200 | [diff] [blame] | 189 | const JValue& return_value) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 190 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 191 | OVERRIDE; |
Alex Light | d766158 | 2017-05-01 13:48:16 -0700 | [diff] [blame] | 192 | void MethodUnwind(Thread* thread, |
| 193 | Handle<mirror::Object> this_object, |
| 194 | ArtMethod* method, |
| 195 | uint32_t dex_pc) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 196 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 197 | OVERRIDE; |
Alex Light | d766158 | 2017-05-01 13:48:16 -0700 | [diff] [blame] | 198 | void DexPcMoved(Thread* thread, |
| 199 | Handle<mirror::Object> this_object, |
| 200 | ArtMethod* method, |
| 201 | uint32_t new_dex_pc) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 202 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 203 | OVERRIDE; |
Alex Light | d766158 | 2017-05-01 13:48:16 -0700 | [diff] [blame] | 204 | void FieldRead(Thread* thread, |
| 205 | Handle<mirror::Object> this_object, |
| 206 | ArtMethod* method, |
| 207 | uint32_t dex_pc, |
| 208 | ArtField* field) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 209 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; |
Alex Light | d766158 | 2017-05-01 13:48:16 -0700 | [diff] [blame] | 210 | void FieldWritten(Thread* thread, |
| 211 | Handle<mirror::Object> this_object, |
| 212 | ArtMethod* method, |
| 213 | uint32_t dex_pc, |
| 214 | ArtField* field, |
Sebastien Hertz | 3f52eaf | 2014-04-04 17:50:18 +0200 | [diff] [blame] | 215 | const JValue& field_value) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 216 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; |
Alex Light | 6e1607e | 2017-08-23 10:06:18 -0700 | [diff] [blame] | 217 | void ExceptionThrown(Thread* thread, |
Alex Light | d766158 | 2017-05-01 13:48:16 -0700 | [diff] [blame] | 218 | Handle<mirror::Throwable> exception_object) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 219 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; |
Alex Light | 798eab0 | 2017-08-23 12:54:53 -0700 | [diff] [blame] | 220 | void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object) |
| 221 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; |
Alex Light | d766158 | 2017-05-01 13:48:16 -0700 | [diff] [blame] | 222 | void Branch(Thread* thread, |
| 223 | ArtMethod* method, |
| 224 | uint32_t dex_pc, |
| 225 | int32_t dex_pc_offset) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 226 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; |
Nicolas Geoffray | 5550ca8 | 2015-08-21 18:38:30 +0100 | [diff] [blame] | 227 | void InvokeVirtualOrInterface(Thread* thread, |
Alex Light | d766158 | 2017-05-01 13:48:16 -0700 | [diff] [blame] | 228 | Handle<mirror::Object> this_object, |
Nicolas Geoffray | 5550ca8 | 2015-08-21 18:38:30 +0100 | [diff] [blame] | 229 | ArtMethod* caller, |
| 230 | uint32_t dex_pc, |
| 231 | ArtMethod* callee) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 232 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; |
Alex Light | 05f4774 | 2017-09-14 00:34:44 +0000 | [diff] [blame] | 233 | void WatchedFramePop(Thread* thread, const ShadowFrame& frame) |
| 234 | REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 235 | // Reuse an old stack trace if it exists, otherwise allocate a new one. |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 236 | static std::vector<ArtMethod*>* AllocStackTrace(); |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 237 | // Clear and store an old stack trace for later use. |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 238 | static void FreeStackTrace(std::vector<ArtMethod*>* stack_trace); |
Jeff Hao | e094b87 | 2014-10-14 13:12:01 -0700 | [diff] [blame] | 239 | // Save id and name of a thread before it exits. |
| 240 | static void StoreExitingThreadInfo(Thread* thread); |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 241 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 242 | static TraceOutputMode GetOutputMode() REQUIRES(!Locks::trace_lock_); |
| 243 | static TraceMode GetMode() REQUIRES(!Locks::trace_lock_); |
| 244 | static size_t GetBufferSize() REQUIRES(!Locks::trace_lock_); |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 245 | |
Mathieu Chartier | 7778b88 | 2015-10-05 16:41:10 -0700 | [diff] [blame] | 246 | // Used by class linker to prevent class unloading. |
| 247 | static bool IsTracingEnabled() REQUIRES(!Locks::trace_lock_); |
| 248 | |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 249 | private: |
Andreas Gampe | 6ff3b37 | 2018-03-15 08:53:16 -0700 | [diff] [blame] | 250 | Trace(File* trace_file, |
| 251 | size_t buffer_size, |
| 252 | int flags, |
| 253 | TraceOutputMode output_mode, |
| 254 | TraceMode trace_mode); |
jeffhao | 2692b57 | 2011-12-16 15:42:28 -0800 | [diff] [blame] | 255 | |
Jeff Hao | 23009dc | 2013-08-22 15:36:42 -0700 | [diff] [blame] | 256 | // The sampling interval in microseconds is passed as an argument. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 257 | static void* RunSamplingThread(void* arg) REQUIRES(!Locks::trace_lock_); |
Jeff Hao | 0abc72e | 2013-08-13 13:45:14 -0700 | [diff] [blame] | 258 | |
Andreas Gampe | 7526d78 | 2015-06-22 22:53:45 -0700 | [diff] [blame] | 259 | static void StopTracing(bool finish_tracing, bool flush_file) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 260 | REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_) |
| 261 | // There is an annoying issue with static functions that create a new object and call into |
| 262 | // that object that causes them to not be able to tell that we don't currently hold the lock. |
| 263 | // This causes the negative annotations to incorrectly have a false positive. TODO: Figure out |
| 264 | // how to annotate this. |
| 265 | NO_THREAD_SAFETY_ANALYSIS; |
Shukang Zhou | 8a5ab91 | 2017-01-20 11:40:16 -0800 | [diff] [blame] | 266 | void FinishTracing() |
| 267 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_); |
jeffhao | 2692b57 | 2011-12-16 15:42:28 -0800 | [diff] [blame] | 268 | |
Jeff Hao | c1ff4b7 | 2013-08-19 11:33:10 -0700 | [diff] [blame] | 269 | void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff); |
| 270 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 271 | void LogMethodTraceEvent(Thread* thread, ArtMethod* method, |
Jeff Hao | c1ff4b7 | 2013-08-19 11:33:10 -0700 | [diff] [blame] | 272 | instrumentation::Instrumentation::InstrumentationEvent event, |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 273 | uint32_t thread_clock_diff, uint32_t wall_clock_diff) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 274 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_); |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 275 | |
jeffhao | a9ef3fd | 2011-12-13 18:33:43 -0800 | [diff] [blame] | 276 | // Methods to output traced methods and threads. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 277 | void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods) |
| 278 | REQUIRES(!*unique_methods_lock_); |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 279 | void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 280 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_); |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 281 | void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_); |
jeffhao | a9ef3fd | 2011-12-13 18:33:43 -0800 | [diff] [blame] | 282 | |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 283 | // Methods to register seen entitites in streaming mode. The methods return true if the entity |
| 284 | // is newly discovered. |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 285 | bool RegisterMethod(ArtMethod* method) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 286 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(streaming_lock_); |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 287 | bool RegisterThread(Thread* thread) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 288 | REQUIRES(streaming_lock_); |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 289 | |
| 290 | // Copy a temporary buffer to the main buffer. Used for streaming. Exposed here for lock |
| 291 | // annotation. |
| 292 | void WriteToBuf(const uint8_t* src, size_t src_size) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 293 | REQUIRES(streaming_lock_); |
Shukang Zhou | 8a5ab91 | 2017-01-20 11:40:16 -0800 | [diff] [blame] | 294 | // Flush the main buffer to file. Used for streaming. Exposed here for lock annotation. |
| 295 | void FlushBuf() |
| 296 | REQUIRES(streaming_lock_); |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 297 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 298 | uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!*unique_methods_lock_); |
Mathieu Chartier | 4d64cd4 | 2015-06-02 16:38:29 -0700 | [diff] [blame] | 299 | uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 300 | REQUIRES(!*unique_methods_lock_); |
| 301 | ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!*unique_methods_lock_); |
| 302 | std::string GetMethodLine(ArtMethod* method) REQUIRES(!*unique_methods_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 303 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 4d64cd4 | 2015-06-02 16:38:29 -0700 | [diff] [blame] | 304 | |
| 305 | void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 306 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_); |
Mathieu Chartier | 4d64cd4 | 2015-06-02 16:38:29 -0700 | [diff] [blame] | 307 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 308 | // Singleton instance of the Trace or null when no method tracing is active. |
Jeff Hao | 0abc72e | 2013-08-13 13:45:14 -0700 | [diff] [blame] | 309 | static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_); |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 310 | |
| 311 | // The default profiler clock source. |
Ian Rogers | e63db27 | 2014-07-15 15:36:11 -0700 | [diff] [blame] | 312 | static TraceClockSource default_clock_source_; |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 313 | |
Jeff Hao | 0abc72e | 2013-08-13 13:45:14 -0700 | [diff] [blame] | 314 | // Sampling thread, non-zero when sampling. |
| 315 | static pthread_t sampling_pthread_; |
| 316 | |
Jeff Hao | 5ce4b17 | 2013-08-16 16:27:18 -0700 | [diff] [blame] | 317 | // Used to remember an unused stack trace to avoid re-allocation during sampling. |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 318 | static std::unique_ptr<std::vector<ArtMethod*>> temp_stack_trace_; |
jeffhao | a9ef3fd | 2011-12-13 18:33:43 -0800 | [diff] [blame] | 319 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 320 | // File to write trace data out to, null if direct to ddms. |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 321 | std::unique_ptr<File> trace_file_; |
jeffhao | a9ef3fd | 2011-12-13 18:33:43 -0800 | [diff] [blame] | 322 | |
Orion Hodson | 283ad2d | 2018-03-26 13:37:41 +0100 | [diff] [blame] | 323 | // Buffer to store trace data. In streaming mode, this is protected |
| 324 | // by the streaming_lock_. In non-streaming mode, reserved regions |
| 325 | // are atomically allocated (using cur_offset_) for log entries to |
| 326 | // be written. |
Christopher Ferris | 241a958 | 2015-04-27 15:19:41 -0700 | [diff] [blame] | 327 | std::unique_ptr<uint8_t[]> buf_; |
jeffhao | 2692b57 | 2011-12-16 15:42:28 -0800 | [diff] [blame] | 328 | |
jeffhao | 0791adc | 2012-04-04 11:14:32 -0700 | [diff] [blame] | 329 | // Flags enabling extra tracing of things such as alloc counts. |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 330 | const int flags_; |
jeffhao | 0791adc | 2012-04-04 11:14:32 -0700 | [diff] [blame] | 331 | |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 332 | // The kind of output for this tracing. |
| 333 | const TraceOutputMode trace_output_mode_; |
| 334 | |
| 335 | // The tracing method. |
Andreas Gampe | 7e7e0f4 | 2015-03-29 15:26:23 -0700 | [diff] [blame] | 336 | const TraceMode trace_mode_; |
Jeff Hao | 23009dc | 2013-08-22 15:36:42 -0700 | [diff] [blame] | 337 | |
Ian Rogers | e63db27 | 2014-07-15 15:36:11 -0700 | [diff] [blame] | 338 | const TraceClockSource clock_source_; |
Elliott Hughes | cfbe73d | 2012-05-22 17:37:06 -0700 | [diff] [blame] | 339 | |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 340 | // Size of buf_. |
Andreas Gampe | e34a42c | 2015-04-25 14:44:29 -0700 | [diff] [blame] | 341 | const size_t buffer_size_; |
jeffhao | 2692b57 | 2011-12-16 15:42:28 -0800 | [diff] [blame] | 342 | |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 343 | // Time trace was created. |
| 344 | const uint64_t start_time_; |
| 345 | |
Jeff Hao | c5d824a | 2014-07-28 18:35:38 -0700 | [diff] [blame] | 346 | // Clock overhead. |
| 347 | const uint32_t clock_overhead_ns_; |
| 348 | |
Orion Hodson | 283ad2d | 2018-03-26 13:37:41 +0100 | [diff] [blame] | 349 | // Offset into buf_. The field is atomic to allow multiple writers |
| 350 | // to concurrently reserve space in the buffer. The newly written |
| 351 | // buffer contents are not read without some other form of thread |
| 352 | // synchronization, such as suspending all potential writers or |
| 353 | // acquiring *streaming_lock_. Reading cur_offset_ is thus never |
| 354 | // used to ensure visibility of any other objects, and all accesses |
| 355 | // are memory_order_relaxed. |
| 356 | // |
| 357 | // All accesses to buf_ in streaming mode occur whilst holding the |
| 358 | // streaming lock. In streaming mode, the buffer may be written out |
| 359 | // so cur_offset_ can move forwards and backwards. |
| 360 | // |
| 361 | // When not in streaming mode, the buf_ writes can come from |
| 362 | // multiple threads when the trace mode is kMethodTracing. When |
| 363 | // trace mode is kSampling, writes only come from the sampling |
| 364 | // thread. |
| 365 | // |
| 366 | // Reads to the buffer happen after the event sources writing to the |
| 367 | // buffer have been shutdown and all stores have completed. The |
| 368 | // stores are made visible in StopTracing() when execution leaves |
| 369 | // the ScopedSuspendAll block. |
Ian Rogers | 8ab25ef | 2014-07-09 18:00:50 -0700 | [diff] [blame] | 370 | AtomicInteger cur_offset_; |
jeffhao | a9ef3fd | 2011-12-13 18:33:43 -0800 | [diff] [blame] | 371 | |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 372 | // Did we overflow the buffer recording traces? |
| 373 | bool overflow_; |
| 374 | |
Jeff Hao | e094b87 | 2014-10-14 13:12:01 -0700 | [diff] [blame] | 375 | // Map of thread ids and names that have already exited. |
| 376 | SafeMap<pid_t, std::string> exited_threads_; |
| 377 | |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 378 | // Sampling profiler sampling interval. |
| 379 | int interval_us_; |
| 380 | |
| 381 | // Streaming mode data. |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 382 | Mutex* streaming_lock_; |
Orion Hodson | 283ad2d | 2018-03-26 13:37:41 +0100 | [diff] [blame] | 383 | std::map<const DexFile*, DexIndexBitSet*> seen_methods_ GUARDED_BY(streaming_lock_); |
| 384 | std::unique_ptr<ThreadIDBitSet> seen_threads_ GUARDED_BY(streaming_lock_); |
Andreas Gampe | 40da286 | 2015-02-27 12:49:04 -0800 | [diff] [blame] | 385 | |
Mathieu Chartier | 4d64cd4 | 2015-06-02 16:38:29 -0700 | [diff] [blame] | 386 | // Bijective map from ArtMethod* to index. |
| 387 | // Map from ArtMethod* to index in unique_methods_; |
Andreas Gampe | 7526d78 | 2015-06-22 22:53:45 -0700 | [diff] [blame] | 388 | Mutex* unique_methods_lock_ ACQUIRED_AFTER(streaming_lock_); |
Mathieu Chartier | 4d64cd4 | 2015-06-02 16:38:29 -0700 | [diff] [blame] | 389 | std::unordered_map<ArtMethod*, uint32_t> art_method_id_map_ GUARDED_BY(unique_methods_lock_); |
| 390 | std::vector<ArtMethod*> unique_methods_ GUARDED_BY(unique_methods_lock_); |
| 391 | |
jeffhao | e343b76 | 2011-12-05 16:36:44 -0800 | [diff] [blame] | 392 | DISALLOW_COPY_AND_ASSIGN(Trace); |
| 393 | }; |
| 394 | |
| 395 | } // namespace art |
| 396 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 397 | #endif // ART_RUNTIME_TRACE_H_ |