Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 1 | /* Copyright (C) 2016 The Android Open Source Project |
| 2 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 3 | * |
| 4 | * This file implements interfaces from the file jvmti.h. This implementation |
| 5 | * is licensed under the same terms as the file jvmti.h. The |
| 6 | * copyright and license information for the file jvmti.h follows. |
| 7 | * |
| 8 | * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. |
| 9 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 10 | * |
| 11 | * This code is free software; you can redistribute it and/or modify it |
| 12 | * under the terms of the GNU General Public License version 2 only, as |
| 13 | * published by the Free Software Foundation. Oracle designates this |
| 14 | * particular file as subject to the "Classpath" exception as provided |
| 15 | * by Oracle in the LICENSE file that accompanied this code. |
| 16 | * |
| 17 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 20 | * version 2 for more details (a copy is included in the LICENSE file that |
| 21 | * accompanied this code). |
| 22 | * |
| 23 | * You should have received a copy of the GNU General Public License version |
| 24 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 25 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 26 | * |
| 27 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 28 | * or visit www.oracle.com if you need additional information or have any |
| 29 | * questions. |
| 30 | */ |
| 31 | |
| 32 | #include "ti_stack.h" |
| 33 | |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 34 | #include <algorithm> |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 35 | #include <list> |
| 36 | #include <unordered_map> |
| 37 | #include <vector> |
| 38 | |
Andreas Gampe | a1d2f95 | 2017-04-20 22:53:58 -0700 | [diff] [blame] | 39 | #include "art_field-inl.h" |
Alex Light | e814f9d | 2017-07-31 16:14:39 -0700 | [diff] [blame] | 40 | #include "art_method-inl.h" |
Andreas Gampe | a1d2f95 | 2017-04-20 22:53:58 -0700 | [diff] [blame] | 41 | #include "art_jvmti.h" |
Steven Moreland | e431e27 | 2017-07-18 16:53:49 -0700 | [diff] [blame] | 42 | #include "art_method-inl.h" |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 43 | #include "barrier.h" |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 44 | #include "base/bit_utils.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 45 | #include "base/enums.h" |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 46 | #include "base/mutex.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 47 | #include "dex_file.h" |
| 48 | #include "dex_file_annotations.h" |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 49 | #include "gc_root.h" |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 50 | #include "handle_scope-inl.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 51 | #include "jni_env_ext.h" |
Andreas Gampe | 13b2784 | 2016-11-07 16:48:23 -0800 | [diff] [blame] | 52 | #include "jni_internal.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 53 | #include "mirror/class.h" |
| 54 | #include "mirror/dex_cache.h" |
Steven Moreland | e431e27 | 2017-07-18 16:53:49 -0700 | [diff] [blame] | 55 | #include "nativehelper/ScopedLocalRef.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 56 | #include "scoped_thread_state_change-inl.h" |
| 57 | #include "stack.h" |
Alex Light | e814f9d | 2017-07-31 16:14:39 -0700 | [diff] [blame] | 58 | #include "ti_thread.h" |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 59 | #include "thread-current-inl.h" |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 60 | #include "thread_list.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 61 | #include "thread_pool.h" |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 62 | #include "ti_thread.h" |
Andreas Gampe | a1d2f95 | 2017-04-20 22:53:58 -0700 | [diff] [blame] | 63 | #include "well_known_classes.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 64 | |
| 65 | namespace openjdkjvmti { |
| 66 | |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 67 | template <typename FrameFn> |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 68 | struct GetStackTraceVisitor : public art::StackVisitor { |
| 69 | GetStackTraceVisitor(art::Thread* thread_in, |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 70 | size_t start_, |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 71 | size_t stop_, |
| 72 | FrameFn fn_) |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 73 | : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 74 | fn(fn_), |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 75 | start(start_), |
| 76 | stop(stop_) {} |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 77 | GetStackTraceVisitor(const GetStackTraceVisitor&) = default; |
| 78 | GetStackTraceVisitor(GetStackTraceVisitor&&) = default; |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 79 | |
| 80 | bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 81 | art::ArtMethod* m = GetMethod(); |
| 82 | if (m->IsRuntimeMethod()) { |
| 83 | return true; |
| 84 | } |
| 85 | |
| 86 | if (start == 0) { |
| 87 | m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize); |
Andreas Gampe | 13b2784 | 2016-11-07 16:48:23 -0800 | [diff] [blame] | 88 | jmethodID id = art::jni::EncodeArtMethod(m); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 89 | |
Andreas Gampe | 2340e3f | 2016-12-12 19:37:19 -0800 | [diff] [blame] | 90 | uint32_t dex_pc = GetDexPc(false); |
| 91 | jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 92 | |
Andreas Gampe | 2340e3f | 2016-12-12 19:37:19 -0800 | [diff] [blame] | 93 | jvmtiFrameInfo info = { id, dex_location }; |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 94 | fn(info); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 95 | |
| 96 | if (stop == 1) { |
| 97 | return false; // We're done. |
| 98 | } else if (stop > 0) { |
| 99 | stop--; |
| 100 | } |
| 101 | } else { |
| 102 | start--; |
| 103 | } |
| 104 | |
| 105 | return true; |
| 106 | } |
| 107 | |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 108 | FrameFn fn; |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 109 | size_t start; |
| 110 | size_t stop; |
| 111 | }; |
| 112 | |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 113 | template <typename FrameFn> |
| 114 | GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in, |
| 115 | size_t start, |
| 116 | size_t stop, |
| 117 | FrameFn fn) { |
| 118 | return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn); |
| 119 | } |
| 120 | |
| 121 | struct GetStackTraceVectorClosure : public art::Closure { |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 122 | public: |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 123 | GetStackTraceVectorClosure(size_t start, size_t stop) |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 124 | : start_input(start), |
| 125 | stop_input(stop), |
| 126 | start_result(0), |
| 127 | stop_result(0) {} |
| 128 | |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 129 | void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 130 | auto frames_fn = [&](jvmtiFrameInfo info) { |
| 131 | frames.push_back(info); |
| 132 | }; |
| 133 | auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn); |
| 134 | visitor.WalkStack(/* include_transitions */ false); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 135 | |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 136 | start_result = visitor.start; |
| 137 | stop_result = visitor.stop; |
| 138 | } |
| 139 | |
| 140 | const size_t start_input; |
| 141 | const size_t stop_input; |
| 142 | |
| 143 | std::vector<jvmtiFrameInfo> frames; |
| 144 | size_t start_result; |
| 145 | size_t stop_result; |
| 146 | }; |
| 147 | |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 148 | static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames, |
| 149 | jint start_depth, |
| 150 | size_t start_result, |
| 151 | jint max_frame_count, |
| 152 | jvmtiFrameInfo* frame_buffer, |
| 153 | jint* count_ptr) { |
| 154 | size_t collected_frames = frames.size(); |
| 155 | |
| 156 | // Assume we're here having collected something. |
| 157 | DCHECK_GT(max_frame_count, 0); |
| 158 | |
| 159 | // Frames from the top. |
| 160 | if (start_depth >= 0) { |
| 161 | if (start_result != 0) { |
| 162 | // Not enough frames. |
| 163 | return ERR(ILLEGAL_ARGUMENT); |
| 164 | } |
| 165 | DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); |
| 166 | if (frames.size() > 0) { |
| 167 | memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo)); |
| 168 | } |
| 169 | *count_ptr = static_cast<jint>(frames.size()); |
| 170 | return ERR(NONE); |
| 171 | } |
| 172 | |
| 173 | // Frames from the bottom. |
| 174 | if (collected_frames < static_cast<size_t>(-start_depth)) { |
| 175 | return ERR(ILLEGAL_ARGUMENT); |
| 176 | } |
| 177 | |
| 178 | size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count)); |
| 179 | memcpy(frame_buffer, |
| 180 | &frames.data()[collected_frames + start_depth], |
| 181 | count * sizeof(jvmtiFrameInfo)); |
| 182 | *count_ptr = static_cast<jint>(count); |
| 183 | return ERR(NONE); |
| 184 | } |
| 185 | |
Andreas Gampe | 850a0fe | 2017-06-12 18:37:19 -0700 | [diff] [blame] | 186 | struct GetStackTraceDirectClosure : public art::Closure { |
| 187 | public: |
| 188 | GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop) |
| 189 | : frame_buffer(frame_buffer_), |
| 190 | start_input(start), |
| 191 | stop_input(stop), |
| 192 | index(0) { |
| 193 | DCHECK_GE(start_input, 0u); |
| 194 | } |
| 195 | |
| 196 | void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 197 | auto frames_fn = [&](jvmtiFrameInfo info) { |
| 198 | frame_buffer[index] = info; |
| 199 | ++index; |
| 200 | }; |
| 201 | auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn); |
| 202 | visitor.WalkStack(/* include_transitions */ false); |
| 203 | } |
| 204 | |
| 205 | jvmtiFrameInfo* frame_buffer; |
| 206 | |
| 207 | const size_t start_input; |
| 208 | const size_t stop_input; |
| 209 | |
| 210 | size_t index = 0; |
| 211 | }; |
| 212 | |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 213 | static jvmtiError GetThread(JNIEnv* env, |
| 214 | art::ScopedObjectAccessAlreadyRunnable& soa, |
| 215 | jthread java_thread, |
| 216 | art::Thread** thread) |
| 217 | REQUIRES_SHARED(art::Locks::mutator_lock_) // Needed for FromManagedThread. |
| 218 | REQUIRES(art::Locks::thread_list_lock_) { // Needed for FromManagedThread. |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 219 | if (java_thread == nullptr) { |
| 220 | *thread = art::Thread::Current(); |
| 221 | if (*thread == nullptr) { |
| 222 | // GetStackTrace can only be run during the live phase, so the current thread should be |
| 223 | // attached and thus available. Getting a null for current means we're starting up or |
| 224 | // dying. |
| 225 | return ERR(WRONG_PHASE); |
| 226 | } |
| 227 | } else { |
| 228 | if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) { |
| 229 | return ERR(INVALID_THREAD); |
| 230 | } |
| 231 | |
| 232 | // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD. |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 233 | *thread = art::Thread::FromManagedThread(soa, java_thread); |
| 234 | if (*thread == nullptr) { |
| 235 | return ERR(THREAD_NOT_ALIVE); |
| 236 | } |
| 237 | } |
| 238 | return ERR(NONE); |
| 239 | } |
| 240 | |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 241 | jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED, |
| 242 | jthread java_thread, |
| 243 | jint start_depth, |
| 244 | jint max_frame_count, |
| 245 | jvmtiFrameInfo* frame_buffer, |
| 246 | jint* count_ptr) { |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 247 | // It is not great that we have to hold these locks for so long, but it is necessary to ensure |
| 248 | // that the thread isn't dying on us. |
| 249 | art::ScopedObjectAccess soa(art::Thread::Current()); |
| 250 | art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); |
| 251 | |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 252 | art::Thread* thread; |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 253 | jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), |
| 254 | soa, |
| 255 | java_thread, |
| 256 | &thread); |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 257 | if (thread_error != ERR(NONE)) { |
| 258 | return thread_error; |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 259 | } |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 260 | DCHECK(thread != nullptr); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 261 | |
| 262 | art::ThreadState state = thread->GetState(); |
| 263 | if (state == art::ThreadState::kStarting || |
| 264 | state == art::ThreadState::kTerminated || |
| 265 | thread->IsStillStarting()) { |
| 266 | return ERR(THREAD_NOT_ALIVE); |
| 267 | } |
| 268 | |
| 269 | if (max_frame_count < 0) { |
| 270 | return ERR(ILLEGAL_ARGUMENT); |
| 271 | } |
| 272 | if (frame_buffer == nullptr || count_ptr == nullptr) { |
| 273 | return ERR(NULL_POINTER); |
| 274 | } |
| 275 | |
| 276 | if (max_frame_count == 0) { |
| 277 | *count_ptr = 0; |
| 278 | return ERR(NONE); |
| 279 | } |
| 280 | |
Andreas Gampe | 850a0fe | 2017-06-12 18:37:19 -0700 | [diff] [blame] | 281 | if (start_depth >= 0) { |
| 282 | // Fast path: Regular order of stack trace. Fill into the frame_buffer directly. |
| 283 | GetStackTraceDirectClosure closure(frame_buffer, |
| 284 | static_cast<size_t>(start_depth), |
| 285 | static_cast<size_t>(max_frame_count)); |
| 286 | thread->RequestSynchronousCheckpoint(&closure); |
| 287 | *count_ptr = static_cast<jint>(closure.index); |
| 288 | if (closure.index < static_cast<size_t>(start_depth)) { |
| 289 | return ERR(ILLEGAL_ARGUMENT); |
| 290 | } |
| 291 | return ERR(NONE); |
| 292 | } |
| 293 | |
| 294 | GetStackTraceVectorClosure closure(0, 0); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 295 | thread->RequestSynchronousCheckpoint(&closure); |
| 296 | |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 297 | return TranslateFrameVector(closure.frames, |
| 298 | start_depth, |
| 299 | closure.start_result, |
| 300 | max_frame_count, |
| 301 | frame_buffer, |
| 302 | count_ptr); |
| 303 | } |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 304 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 305 | template <typename Data> |
| 306 | struct GetAllStackTracesVectorClosure : public art::Closure { |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 307 | GetAllStackTracesVectorClosure(size_t stop, Data* data_) |
| 308 | : barrier(0), stop_input(stop), data(data_) {} |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 309 | |
| 310 | void Run(art::Thread* thread) OVERRIDE |
| 311 | REQUIRES_SHARED(art::Locks::mutator_lock_) |
| 312 | REQUIRES(!data->mutex) { |
| 313 | art::Thread* self = art::Thread::Current(); |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 314 | Work(thread, self); |
| 315 | barrier.Pass(self); |
| 316 | } |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 317 | |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 318 | void Work(art::Thread* thread, art::Thread* self) |
| 319 | REQUIRES_SHARED(art::Locks::mutator_lock_) |
| 320 | REQUIRES(!data->mutex) { |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 321 | // Skip threads that are still starting. |
| 322 | if (thread->IsStillStarting()) { |
| 323 | return; |
| 324 | } |
| 325 | |
| 326 | std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread); |
| 327 | if (thread_frames == nullptr) { |
| 328 | return; |
| 329 | } |
| 330 | |
| 331 | // Now collect the data. |
| 332 | auto frames_fn = [&](jvmtiFrameInfo info) { |
| 333 | thread_frames->push_back(info); |
| 334 | }; |
| 335 | auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn); |
| 336 | visitor.WalkStack(/* include_transitions */ false); |
| 337 | } |
| 338 | |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 339 | art::Barrier barrier; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 340 | const size_t stop_input; |
| 341 | Data* data; |
| 342 | }; |
| 343 | |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 344 | template <typename Data> |
| 345 | static void RunCheckpointAndWait(Data* data, size_t max_frame_count) { |
| 346 | GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data); |
| 347 | size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr); |
| 348 | if (barrier_count == 0) { |
| 349 | return; |
| 350 | } |
| 351 | art::Thread* self = art::Thread::Current(); |
| 352 | art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun); |
| 353 | closure.barrier.Increment(self, barrier_count); |
| 354 | } |
| 355 | |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 356 | jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env, |
| 357 | jint max_frame_count, |
| 358 | jvmtiStackInfo** stack_info_ptr, |
| 359 | jint* thread_count_ptr) { |
| 360 | if (max_frame_count < 0) { |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 361 | return ERR(ILLEGAL_ARGUMENT); |
| 362 | } |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 363 | if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) { |
| 364 | return ERR(NULL_POINTER); |
| 365 | } |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 366 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 367 | struct AllStackTracesData { |
| 368 | AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {} |
| 369 | ~AllStackTracesData() { |
| 370 | JNIEnv* jni_env = art::Thread::Current()->GetJniEnv(); |
| 371 | for (jthread global_thread_ref : thread_peers) { |
| 372 | jni_env->DeleteGlobalRef(global_thread_ref); |
| 373 | } |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 374 | } |
| 375 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 376 | std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread) |
| 377 | REQUIRES_SHARED(art::Locks::mutator_lock_) |
| 378 | REQUIRES(!mutex) { |
| 379 | art::MutexLock mu(self, mutex); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 380 | |
| 381 | threads.push_back(thread); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 382 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 383 | jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef( |
| 384 | self, thread->GetPeerFromOtherThread()); |
| 385 | thread_peers.push_back(peer); |
| 386 | |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 387 | frames.emplace_back(new std::vector<jvmtiFrameInfo>()); |
| 388 | return frames.back().get(); |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 389 | } |
| 390 | |
| 391 | art::Mutex mutex; |
| 392 | |
| 393 | // Storage. Only access directly after completion. |
| 394 | |
| 395 | std::vector<art::Thread*> threads; |
| 396 | // "thread_peers" contains global references to their peers. |
| 397 | std::vector<jthread> thread_peers; |
| 398 | |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 399 | std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 400 | }; |
| 401 | |
| 402 | AllStackTracesData data; |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 403 | RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count)); |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 404 | |
| 405 | art::Thread* current = art::Thread::Current(); |
| 406 | |
| 407 | // Convert the data into our output format. |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 408 | |
| 409 | // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to |
| 410 | // allocate one big chunk for this and the actual frames, which means we need |
| 411 | // to either be conservative or rearrange things later (the latter is implemented). |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 412 | std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 413 | std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 414 | frame_infos.reserve(data.frames.size()); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 415 | |
| 416 | // Now run through and add data for each thread. |
| 417 | size_t sum_frames = 0; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 418 | for (size_t index = 0; index < data.frames.size(); ++index) { |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 419 | jvmtiStackInfo& stack_info = stack_info_array.get()[index]; |
| 420 | memset(&stack_info, 0, sizeof(jvmtiStackInfo)); |
| 421 | |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 422 | const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get(); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 423 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 424 | // For the time being, set the thread to null. We'll fix it up in the second stage. |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 425 | stack_info.thread = nullptr; |
| 426 | stack_info.state = JVMTI_THREAD_STATE_SUSPENDED; |
| 427 | |
| 428 | size_t collected_frames = thread_frames.size(); |
| 429 | if (max_frame_count == 0 || collected_frames == 0) { |
| 430 | stack_info.frame_count = 0; |
| 431 | stack_info.frame_buffer = nullptr; |
| 432 | continue; |
| 433 | } |
| 434 | DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); |
| 435 | |
| 436 | jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames]; |
| 437 | frame_infos.emplace_back(frame_info); |
| 438 | |
| 439 | jint count; |
| 440 | jvmtiError translate_result = TranslateFrameVector(thread_frames, |
| 441 | 0, |
| 442 | 0, |
| 443 | static_cast<jint>(collected_frames), |
| 444 | frame_info, |
| 445 | &count); |
| 446 | DCHECK(translate_result == JVMTI_ERROR_NONE); |
| 447 | stack_info.frame_count = static_cast<jint>(collected_frames); |
| 448 | stack_info.frame_buffer = frame_info; |
| 449 | sum_frames += static_cast<size_t>(count); |
| 450 | } |
| 451 | |
| 452 | // No errors, yet. Now put it all into an output buffer. |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 453 | size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(), |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 454 | alignof(jvmtiFrameInfo)); |
| 455 | size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo); |
| 456 | unsigned char* chunk_data; |
| 457 | jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data); |
| 458 | if (alloc_result != ERR(NONE)) { |
| 459 | return alloc_result; |
| 460 | } |
| 461 | |
| 462 | jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data); |
| 463 | // First copy in all the basic data. |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 464 | memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size()); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 465 | |
| 466 | // Now copy the frames and fix up the pointers. |
| 467 | jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>( |
| 468 | chunk_data + rounded_stack_info_size); |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 469 | for (size_t i = 0; i < data.frames.size(); ++i) { |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 470 | jvmtiStackInfo& old_stack_info = stack_info_array.get()[i]; |
| 471 | jvmtiStackInfo& new_stack_info = stack_info[i]; |
| 472 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 473 | // Translate the global ref into a local ref. |
| 474 | new_stack_info.thread = |
| 475 | static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 476 | |
| 477 | if (old_stack_info.frame_count > 0) { |
| 478 | // Only copy when there's data - leave the nullptr alone. |
| 479 | size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo); |
| 480 | memcpy(frame_info, old_stack_info.frame_buffer, frames_size); |
| 481 | new_stack_info.frame_buffer = frame_info; |
| 482 | frame_info += old_stack_info.frame_count; |
| 483 | } |
| 484 | } |
| 485 | |
| 486 | *stack_info_ptr = stack_info; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 487 | *thread_count_ptr = static_cast<jint>(data.frames.size()); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 488 | |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 489 | return ERR(NONE); |
| 490 | } |
| 491 | |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 492 | jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env, |
| 493 | jint thread_count, |
| 494 | const jthread* thread_list, |
| 495 | jint max_frame_count, |
| 496 | jvmtiStackInfo** stack_info_ptr) { |
| 497 | if (max_frame_count < 0) { |
| 498 | return ERR(ILLEGAL_ARGUMENT); |
| 499 | } |
| 500 | if (thread_count < 0) { |
| 501 | return ERR(ILLEGAL_ARGUMENT); |
| 502 | } |
| 503 | if (thread_count == 0) { |
| 504 | *stack_info_ptr = nullptr; |
| 505 | return ERR(NONE); |
| 506 | } |
| 507 | if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) { |
| 508 | return ERR(NULL_POINTER); |
| 509 | } |
| 510 | |
| 511 | art::Thread* current = art::Thread::Current(); |
| 512 | art::ScopedObjectAccess soa(current); // Now we know we have the shared lock. |
| 513 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 514 | struct SelectStackTracesData { |
| 515 | SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {} |
| 516 | |
| 517 | std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread) |
| 518 | REQUIRES_SHARED(art::Locks::mutator_lock_) |
| 519 | REQUIRES(!mutex) { |
| 520 | art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread(); |
| 521 | for (size_t index = 0; index != handles.size(); ++index) { |
| 522 | if (peer == handles[index].Get()) { |
| 523 | // Found the thread. |
| 524 | art::MutexLock mu(self, mutex); |
| 525 | |
| 526 | threads.push_back(thread); |
| 527 | thread_list_indices.push_back(index); |
| 528 | |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 529 | frames.emplace_back(new std::vector<jvmtiFrameInfo>()); |
| 530 | return frames.back().get(); |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 531 | } |
| 532 | } |
| 533 | return nullptr; |
| 534 | } |
| 535 | |
| 536 | art::Mutex mutex; |
| 537 | |
| 538 | // Selection data. |
| 539 | |
| 540 | std::vector<art::Handle<art::mirror::Object>> handles; |
| 541 | |
| 542 | // Storage. Only access directly after completion. |
| 543 | |
| 544 | std::vector<art::Thread*> threads; |
| 545 | std::vector<size_t> thread_list_indices; |
| 546 | |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 547 | std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 548 | }; |
| 549 | |
| 550 | SelectStackTracesData data; |
| 551 | |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 552 | // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs. |
| 553 | art::VariableSizedHandleScope hs(current); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 554 | for (jint i = 0; i != thread_count; ++i) { |
| 555 | if (thread_list[i] == nullptr) { |
| 556 | return ERR(INVALID_THREAD); |
| 557 | } |
| 558 | if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) { |
| 559 | return ERR(INVALID_THREAD); |
| 560 | } |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 561 | data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i]))); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 562 | } |
| 563 | |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 564 | RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count)); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 565 | |
| 566 | // Convert the data into our output format. |
| 567 | |
| 568 | // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to |
| 569 | // allocate one big chunk for this and the actual frames, which means we need |
| 570 | // to either be conservative or rearrange things later (the latter is implemented). |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 571 | std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 572 | std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 573 | frame_infos.reserve(data.frames.size()); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 574 | |
| 575 | // Now run through and add data for each thread. |
| 576 | size_t sum_frames = 0; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 577 | for (size_t index = 0; index < data.frames.size(); ++index) { |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 578 | jvmtiStackInfo& stack_info = stack_info_array.get()[index]; |
| 579 | memset(&stack_info, 0, sizeof(jvmtiStackInfo)); |
| 580 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 581 | art::Thread* self = data.threads[index]; |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 582 | const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get(); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 583 | |
| 584 | // For the time being, set the thread to null. We don't have good ScopedLocalRef |
| 585 | // infrastructure. |
Nicolas Geoffray | ffc8cad | 2017-02-10 10:59:22 +0000 | [diff] [blame] | 586 | DCHECK(self->GetPeerFromOtherThread() != nullptr); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 587 | stack_info.thread = nullptr; |
| 588 | stack_info.state = JVMTI_THREAD_STATE_SUSPENDED; |
| 589 | |
| 590 | size_t collected_frames = thread_frames.size(); |
| 591 | if (max_frame_count == 0 || collected_frames == 0) { |
| 592 | stack_info.frame_count = 0; |
| 593 | stack_info.frame_buffer = nullptr; |
| 594 | continue; |
| 595 | } |
| 596 | DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); |
| 597 | |
| 598 | jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames]; |
| 599 | frame_infos.emplace_back(frame_info); |
| 600 | |
| 601 | jint count; |
| 602 | jvmtiError translate_result = TranslateFrameVector(thread_frames, |
| 603 | 0, |
| 604 | 0, |
| 605 | static_cast<jint>(collected_frames), |
| 606 | frame_info, |
| 607 | &count); |
| 608 | DCHECK(translate_result == JVMTI_ERROR_NONE); |
| 609 | stack_info.frame_count = static_cast<jint>(collected_frames); |
| 610 | stack_info.frame_buffer = frame_info; |
| 611 | sum_frames += static_cast<size_t>(count); |
| 612 | } |
| 613 | |
| 614 | // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(), |
| 615 | // potentially. |
| 616 | size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count, |
| 617 | alignof(jvmtiFrameInfo)); |
| 618 | size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo); |
| 619 | unsigned char* chunk_data; |
| 620 | jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data); |
| 621 | if (alloc_result != ERR(NONE)) { |
| 622 | return alloc_result; |
| 623 | } |
| 624 | |
| 625 | jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data); |
| 626 | jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>( |
| 627 | chunk_data + rounded_stack_info_size); |
| 628 | |
| 629 | for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) { |
| 630 | // Check whether we found a running thread for this. |
| 631 | // Note: For simplicity, and with the expectation that the list is usually small, use a simple |
| 632 | // search. (The list is *not* sorted!) |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 633 | auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i); |
| 634 | if (it == data.thread_list_indices.end()) { |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 635 | // No native thread. Must be new or dead. We need to fill out the stack info now. |
| 636 | // (Need to read the Java "started" field to know whether this is starting or terminated.) |
| 637 | art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]); |
| 638 | art::ObjPtr<art::mirror::Class> klass = peer->GetClass(); |
| 639 | art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z"); |
| 640 | CHECK(started_field != nullptr); |
| 641 | bool started = started_field->GetBoolean(peer) != 0; |
| 642 | constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW; |
| 643 | constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED | |
| 644 | JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED; |
| 645 | stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]); |
| 646 | stack_info[i].state = started ? kTerminatedState : kStartedState; |
| 647 | stack_info[i].frame_count = 0; |
| 648 | stack_info[i].frame_buffer = nullptr; |
| 649 | } else { |
| 650 | // Had a native thread and frames. |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 651 | size_t f_index = it - data.thread_list_indices.begin(); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 652 | |
| 653 | jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index]; |
| 654 | jvmtiStackInfo& new_stack_info = stack_info[i]; |
| 655 | |
| 656 | memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo)); |
| 657 | new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]); |
| 658 | if (old_stack_info.frame_count > 0) { |
| 659 | // Only copy when there's data - leave the nullptr alone. |
| 660 | size_t frames_size = |
| 661 | static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo); |
| 662 | memcpy(frame_info, old_stack_info.frame_buffer, frames_size); |
| 663 | new_stack_info.frame_buffer = frame_info; |
| 664 | frame_info += old_stack_info.frame_count; |
| 665 | } |
| 666 | } |
| 667 | } |
| 668 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 669 | *stack_info_ptr = stack_info; |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 670 | |
| 671 | return ERR(NONE); |
| 672 | } |
| 673 | |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 674 | // Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as |
| 675 | // runtime methods and transitions must not be counted. |
| 676 | struct GetFrameCountVisitor : public art::StackVisitor { |
| 677 | explicit GetFrameCountVisitor(art::Thread* thread) |
| 678 | : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames), |
| 679 | count(0) {} |
| 680 | |
| 681 | bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 682 | art::ArtMethod* m = GetMethod(); |
| 683 | const bool do_count = !(m == nullptr || m->IsRuntimeMethod()); |
| 684 | if (do_count) { |
| 685 | count++; |
| 686 | } |
| 687 | return true; |
| 688 | } |
| 689 | |
| 690 | size_t count; |
| 691 | }; |
| 692 | |
| 693 | struct GetFrameCountClosure : public art::Closure { |
| 694 | public: |
| 695 | GetFrameCountClosure() : count(0) {} |
| 696 | |
| 697 | void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 698 | GetFrameCountVisitor visitor(self); |
| 699 | visitor.WalkStack(false); |
| 700 | |
| 701 | count = visitor.count; |
| 702 | } |
| 703 | |
| 704 | size_t count; |
| 705 | }; |
| 706 | |
| 707 | jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED, |
| 708 | jthread java_thread, |
| 709 | jint* count_ptr) { |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 710 | // It is not great that we have to hold these locks for so long, but it is necessary to ensure |
| 711 | // that the thread isn't dying on us. |
| 712 | art::ScopedObjectAccess soa(art::Thread::Current()); |
| 713 | art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); |
| 714 | |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 715 | art::Thread* thread; |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 716 | jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), |
| 717 | soa, |
| 718 | java_thread, |
| 719 | &thread); |
| 720 | |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 721 | if (thread_error != ERR(NONE)) { |
| 722 | return thread_error; |
| 723 | } |
| 724 | DCHECK(thread != nullptr); |
| 725 | |
| 726 | if (count_ptr == nullptr) { |
| 727 | return ERR(NULL_POINTER); |
| 728 | } |
| 729 | |
| 730 | GetFrameCountClosure closure; |
| 731 | thread->RequestSynchronousCheckpoint(&closure); |
| 732 | |
| 733 | *count_ptr = closure.count; |
| 734 | return ERR(NONE); |
| 735 | } |
| 736 | |
| 737 | // Walks up the stack 'n' callers, when used with Thread::WalkStack. |
| 738 | struct GetLocationVisitor : public art::StackVisitor { |
| 739 | GetLocationVisitor(art::Thread* thread, size_t n_in) |
| 740 | : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames), |
| 741 | n(n_in), |
| 742 | count(0), |
| 743 | caller(nullptr), |
| 744 | caller_dex_pc(0) {} |
| 745 | |
| 746 | bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 747 | art::ArtMethod* m = GetMethod(); |
| 748 | const bool do_count = !(m == nullptr || m->IsRuntimeMethod()); |
| 749 | if (do_count) { |
| 750 | DCHECK(caller == nullptr); |
| 751 | if (count == n) { |
| 752 | caller = m; |
| 753 | caller_dex_pc = GetDexPc(false); |
| 754 | return false; |
| 755 | } |
| 756 | count++; |
| 757 | } |
| 758 | return true; |
| 759 | } |
| 760 | |
| 761 | const size_t n; |
| 762 | size_t count; |
| 763 | art::ArtMethod* caller; |
| 764 | uint32_t caller_dex_pc; |
| 765 | }; |
| 766 | |
| 767 | struct GetLocationClosure : public art::Closure { |
| 768 | public: |
| 769 | explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {} |
| 770 | |
| 771 | void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 772 | GetLocationVisitor visitor(self, n); |
| 773 | visitor.WalkStack(false); |
| 774 | |
| 775 | method = visitor.caller; |
| 776 | dex_pc = visitor.caller_dex_pc; |
| 777 | } |
| 778 | |
| 779 | const size_t n; |
| 780 | art::ArtMethod* method; |
| 781 | uint32_t dex_pc; |
| 782 | }; |
| 783 | |
| 784 | jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED, |
| 785 | jthread java_thread, |
| 786 | jint depth, |
| 787 | jmethodID* method_ptr, |
| 788 | jlocation* location_ptr) { |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 789 | // It is not great that we have to hold these locks for so long, but it is necessary to ensure |
| 790 | // that the thread isn't dying on us. |
| 791 | art::ScopedObjectAccess soa(art::Thread::Current()); |
| 792 | art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); |
| 793 | |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 794 | art::Thread* thread; |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 795 | jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), |
| 796 | soa, |
| 797 | java_thread, |
| 798 | &thread); |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 799 | if (thread_error != ERR(NONE)) { |
| 800 | return thread_error; |
| 801 | } |
| 802 | DCHECK(thread != nullptr); |
| 803 | |
| 804 | if (depth < 0) { |
| 805 | return ERR(ILLEGAL_ARGUMENT); |
| 806 | } |
| 807 | if (method_ptr == nullptr || location_ptr == nullptr) { |
| 808 | return ERR(NULL_POINTER); |
| 809 | } |
| 810 | |
| 811 | GetLocationClosure closure(static_cast<size_t>(depth)); |
| 812 | thread->RequestSynchronousCheckpoint(&closure); |
| 813 | |
| 814 | if (closure.method == nullptr) { |
| 815 | return ERR(NO_MORE_FRAMES); |
| 816 | } |
| 817 | |
| 818 | *method_ptr = art::jni::EncodeArtMethod(closure.method); |
| 819 | if (closure.method->IsNative()) { |
| 820 | *location_ptr = -1; |
| 821 | } else { |
| 822 | if (closure.dex_pc == art::DexFile::kDexNoIndex) { |
| 823 | return ERR(INTERNAL); |
| 824 | } |
| 825 | *location_ptr = static_cast<jlocation>(closure.dex_pc); |
| 826 | } |
| 827 | |
| 828 | return ERR(NONE); |
| 829 | } |
| 830 | |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 831 | struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor { |
| 832 | // We need a context because VisitLocks needs it retrieve the monitor objects. |
| 833 | explicit MonitorVisitor(art::Thread* thread) |
| 834 | REQUIRES_SHARED(art::Locks::mutator_lock_) |
| 835 | : art::StackVisitor(thread, |
| 836 | art::Context::Create(), |
| 837 | art::StackVisitor::StackWalkKind::kIncludeInlinedFrames), |
| 838 | hs(art::Thread::Current()), |
| 839 | current_stack_depth(0) {} |
| 840 | |
| 841 | ~MonitorVisitor() { |
| 842 | delete context_; |
| 843 | } |
| 844 | |
| 845 | bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 846 | art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current()); |
| 847 | if (!GetMethod()->IsRuntimeMethod()) { |
| 848 | art::Monitor::VisitLocks(this, AppendOwnedMonitors, this); |
| 849 | ++current_stack_depth; |
| 850 | } |
| 851 | return true; |
| 852 | } |
| 853 | |
| 854 | static void AppendOwnedMonitors(art::mirror::Object* owned_monitor, void* arg) |
| 855 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 856 | art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current()); |
| 857 | MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg); |
| 858 | art::ObjPtr<art::mirror::Object> mon(owned_monitor); |
| 859 | // Filter out duplicates. |
| 860 | for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) { |
| 861 | if (monitor.Get() == mon.Ptr()) { |
| 862 | return; |
| 863 | } |
| 864 | } |
| 865 | visitor->monitors.push_back(visitor->hs.NewHandle(mon)); |
| 866 | visitor->stack_depths.push_back(visitor->current_stack_depth); |
| 867 | } |
| 868 | |
| 869 | void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED) |
| 870 | OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 871 | for (const art::Handle<art::mirror::Object>& m : monitors) { |
| 872 | if (m.Get() == obj) { |
| 873 | return; |
| 874 | } |
| 875 | } |
| 876 | monitors.push_back(hs.NewHandle(obj)); |
| 877 | stack_depths.push_back(-1); |
| 878 | } |
| 879 | |
| 880 | art::VariableSizedHandleScope hs; |
| 881 | jint current_stack_depth; |
| 882 | std::vector<art::Handle<art::mirror::Object>> monitors; |
| 883 | std::vector<jint> stack_depths; |
| 884 | }; |
| 885 | |
| 886 | template<typename Fn> |
| 887 | struct MonitorInfoClosure : public art::Closure { |
| 888 | public: |
| 889 | MonitorInfoClosure(art::ScopedObjectAccess& soa, Fn handle_results) |
| 890 | : soa_(soa), err_(OK), handle_results_(handle_results) {} |
| 891 | |
| 892 | void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 893 | art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current()); |
| 894 | // Find the monitors on the stack. |
| 895 | MonitorVisitor visitor(target); |
| 896 | visitor.WalkStack(/* include_transitions */ false); |
| 897 | // Find any other monitors, including ones acquired in native code. |
| 898 | art::RootInfo root_info(art::kRootVMInternal); |
| 899 | target->GetJniEnv()->monitors.VisitRoots(&visitor, root_info); |
| 900 | err_ = handle_results_(soa_, visitor); |
| 901 | } |
| 902 | |
| 903 | jvmtiError GetError() { |
| 904 | return err_; |
| 905 | } |
| 906 | |
| 907 | private: |
| 908 | art::ScopedObjectAccess& soa_; |
| 909 | jvmtiError err_; |
| 910 | Fn handle_results_; |
| 911 | }; |
| 912 | |
| 913 | |
| 914 | template <typename Fn> |
| 915 | static jvmtiError GetOwnedMonitorInfoCommon(jthread thread, Fn handle_results) { |
| 916 | art::Thread* self = art::Thread::Current(); |
| 917 | art::ScopedObjectAccess soa(self); |
| 918 | MonitorInfoClosure<Fn> closure(soa, handle_results); |
| 919 | bool called_method = false; |
| 920 | { |
| 921 | art::MutexLock mu(self, *art::Locks::thread_list_lock_); |
| 922 | art::Thread* target = ThreadUtil::GetNativeThread(thread, soa); |
| 923 | if (target == nullptr && thread == nullptr) { |
| 924 | return ERR(INVALID_THREAD); |
| 925 | } |
| 926 | if (target == nullptr) { |
| 927 | return ERR(THREAD_NOT_ALIVE); |
| 928 | } |
| 929 | if (target != self) { |
| 930 | called_method = true; |
| 931 | if (!target->RequestSynchronousCheckpoint(&closure)) { |
| 932 | return ERR(THREAD_NOT_ALIVE); |
| 933 | } |
| 934 | } |
| 935 | } |
| 936 | // Cannot call the closure on the current thread if we have thread_list_lock since we need to call |
| 937 | // into the verifier which can cause the current thread to suspend for gc. Suspending would be a |
| 938 | // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a |
| 939 | // checkpoint we are fine but if the thread is the current one we need to drop the mutex first. |
| 940 | if (!called_method) { |
| 941 | closure.Run(self); |
| 942 | } |
| 943 | return closure.GetError(); |
| 944 | } |
| 945 | |
| 946 | jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env, |
| 947 | jthread thread, |
| 948 | jint* info_cnt, |
| 949 | jvmtiMonitorStackDepthInfo** info_ptr) { |
| 950 | if (info_cnt == nullptr || info_ptr == nullptr) { |
| 951 | return ERR(NULL_POINTER); |
| 952 | } |
| 953 | auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor) |
| 954 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 955 | auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * visitor.monitors.size(); |
| 956 | jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr)); |
| 957 | if (err != OK) { |
| 958 | return err; |
| 959 | } |
| 960 | *info_cnt = visitor.monitors.size(); |
| 961 | for (size_t i = 0; i < visitor.monitors.size(); i++) { |
| 962 | (*info_ptr)[i] = { |
| 963 | soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get()), |
| 964 | visitor.stack_depths[i] |
| 965 | }; |
| 966 | } |
| 967 | return OK; |
| 968 | }; |
| 969 | return GetOwnedMonitorInfoCommon(thread, handle_fun); |
| 970 | } |
| 971 | |
| 972 | jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env, |
| 973 | jthread thread, |
| 974 | jint* owned_monitor_count_ptr, |
| 975 | jobject** owned_monitors_ptr) { |
| 976 | if (owned_monitors_ptr == nullptr || owned_monitors_ptr == nullptr) { |
| 977 | return ERR(NULL_POINTER); |
| 978 | } |
| 979 | auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor) |
| 980 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 981 | auto nbytes = sizeof(jobject) * visitor.monitors.size(); |
| 982 | jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr)); |
| 983 | if (err != OK) { |
| 984 | return err; |
| 985 | } |
| 986 | *owned_monitor_count_ptr = visitor.monitors.size(); |
| 987 | for (size_t i = 0; i < visitor.monitors.size(); i++) { |
| 988 | (*owned_monitors_ptr)[i] = |
| 989 | soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get()); |
| 990 | } |
| 991 | return OK; |
| 992 | }; |
| 993 | return GetOwnedMonitorInfoCommon(thread, handle_fun); |
| 994 | } |
| 995 | |
Alex Light | e814f9d | 2017-07-31 16:14:39 -0700 | [diff] [blame] | 996 | jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) { |
| 997 | if (depth < 0) { |
| 998 | return ERR(ILLEGAL_ARGUMENT); |
| 999 | } |
| 1000 | ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env); |
| 1001 | art::Thread* self = art::Thread::Current(); |
| 1002 | art::Thread* target; |
| 1003 | do { |
| 1004 | ThreadUtil::SuspendCheck(self); |
| 1005 | art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_); |
| 1006 | // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a |
| 1007 | // user-code suspension. We retry and do another SuspendCheck to clear this. |
| 1008 | if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) { |
| 1009 | continue; |
| 1010 | } |
| 1011 | // From now on we know we cannot get suspended by user-code. |
| 1012 | // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't |
| 1013 | // have the 'suspend_lock' locked here. |
| 1014 | art::ScopedObjectAccess soa(self); |
| 1015 | art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_); |
| 1016 | target = ThreadUtil::GetNativeThread(thread, soa); |
| 1017 | if (target == nullptr) { |
| 1018 | return ERR(THREAD_NOT_ALIVE); |
| 1019 | } else if (target != self) { |
| 1020 | // TODO This is part of the spec but we could easily avoid needing to do it. We would just put |
| 1021 | // all the logic into a sync-checkpoint. |
| 1022 | art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_); |
| 1023 | if (target->GetUserCodeSuspendCount() == 0) { |
| 1024 | return ERR(THREAD_NOT_SUSPENDED); |
| 1025 | } |
| 1026 | } |
| 1027 | // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are |
| 1028 | // done (unless it's 'self' in which case we don't care since we aren't going to be returning). |
| 1029 | // TODO We could implement this using a synchronous checkpoint and not bother with any of the |
| 1030 | // suspension stuff. The spec does specifically say to return THREAD_NOT_SUSPENDED though. |
| 1031 | // Find the requested stack frame. |
| 1032 | std::unique_ptr<art::Context> context(art::Context::Create()); |
| 1033 | FindFrameAtDepthVisitor visitor(target, context.get(), depth); |
| 1034 | visitor.WalkStack(); |
| 1035 | if (!visitor.FoundFrame()) { |
| 1036 | return ERR(NO_MORE_FRAMES); |
| 1037 | } |
| 1038 | art::ArtMethod* method = visitor.GetMethod(); |
| 1039 | if (method->IsNative()) { |
| 1040 | return ERR(OPAQUE_FRAME); |
| 1041 | } |
| 1042 | // From here we are sure to succeed. |
| 1043 | bool needs_instrument = false; |
| 1044 | // Get/create a shadow frame |
| 1045 | art::ShadowFrame* shadow_frame = visitor.GetCurrentShadowFrame(); |
| 1046 | if (shadow_frame == nullptr) { |
| 1047 | needs_instrument = true; |
| 1048 | const size_t frame_id = visitor.GetFrameId(); |
| 1049 | const uint16_t num_regs = method->GetCodeItem()->registers_size_; |
| 1050 | shadow_frame = target->FindOrCreateDebuggerShadowFrame(frame_id, |
| 1051 | num_regs, |
| 1052 | method, |
| 1053 | visitor.GetDexPc()); |
| 1054 | } |
| 1055 | // Mark shadow frame as needs_notify_pop_ |
| 1056 | shadow_frame->SetNotifyPop(true); |
| 1057 | tienv->notify_frames.insert(shadow_frame); |
| 1058 | // Make sure can we will go to the interpreter and use the shadow frames. |
| 1059 | if (needs_instrument) { |
| 1060 | art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target); |
| 1061 | } |
| 1062 | return OK; |
| 1063 | } while (true); |
| 1064 | } |
| 1065 | |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 1066 | } // namespace openjdkjvmti |