| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 1 | /* Copyright (C) 2016 The Android Open Source Project | 
 | 2 |  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
 | 3 |  * | 
 | 4 |  * This file implements interfaces from the file jvmti.h. This implementation | 
 | 5 |  * is licensed under the same terms as the file jvmti.h.  The | 
 | 6 |  * copyright and license information for the file jvmti.h follows. | 
 | 7 |  * | 
 | 8 |  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. | 
 | 9 |  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
 | 10 |  * | 
 | 11 |  * This code is free software; you can redistribute it and/or modify it | 
 | 12 |  * under the terms of the GNU General Public License version 2 only, as | 
 | 13 |  * published by the Free Software Foundation.  Oracle designates this | 
 | 14 |  * particular file as subject to the "Classpath" exception as provided | 
 | 15 |  * by Oracle in the LICENSE file that accompanied this code. | 
 | 16 |  * | 
 | 17 |  * This code is distributed in the hope that it will be useful, but WITHOUT | 
 | 18 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 | 19 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
 | 20 |  * version 2 for more details (a copy is included in the LICENSE file that | 
 | 21 |  * accompanied this code). | 
 | 22 |  * | 
 | 23 |  * You should have received a copy of the GNU General Public License version | 
 | 24 |  * 2 along with this work; if not, write to the Free Software Foundation, | 
 | 25 |  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
 | 26 |  * | 
 | 27 |  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
 | 28 |  * or visit www.oracle.com if you need additional information or have any | 
 | 29 |  * questions. | 
 | 30 |  */ | 
 | 31 |  | 
 | 32 | #include "ti_stack.h" | 
 | 33 |  | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 34 | #include <algorithm> | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 35 | #include <list> | 
 | 36 | #include <unordered_map> | 
 | 37 | #include <vector> | 
 | 38 |  | 
| Andreas Gampe | a1d2f95 | 2017-04-20 22:53:58 -0700 | [diff] [blame] | 39 | #include "art_field-inl.h" | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 40 | #include "art_method-inl.h" | 
| Andreas Gampe | a1d2f95 | 2017-04-20 22:53:58 -0700 | [diff] [blame] | 41 | #include "art_jvmti.h" | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 42 | #include "base/bit_utils.h" | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 43 | #include "base/enums.h" | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 44 | #include "base/mutex.h" | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 45 | #include "dex_file.h" | 
 | 46 | #include "dex_file_annotations.h" | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 47 | #include "handle_scope-inl.h" | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 48 | #include "jni_env_ext.h" | 
| Andreas Gampe | 13b2784 | 2016-11-07 16:48:23 -0800 | [diff] [blame] | 49 | #include "jni_internal.h" | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 50 | #include "mirror/class.h" | 
 | 51 | #include "mirror/dex_cache.h" | 
 | 52 | #include "scoped_thread_state_change-inl.h" | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 53 | #include "ScopedLocalRef.h" | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 54 | #include "stack.h" | 
| Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 55 | #include "thread-current-inl.h" | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 56 | #include "thread_list.h" | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 57 | #include "thread_pool.h" | 
| Andreas Gampe | a1d2f95 | 2017-04-20 22:53:58 -0700 | [diff] [blame] | 58 | #include "well_known_classes.h" | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 59 |  | 
 | 60 | namespace openjdkjvmti { | 
 | 61 |  | 
| Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 62 | template <typename FrameFn> | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 63 | struct GetStackTraceVisitor : public art::StackVisitor { | 
 | 64 |   GetStackTraceVisitor(art::Thread* thread_in, | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 65 |                        size_t start_, | 
| Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 66 |                        size_t stop_, | 
 | 67 |                        FrameFn fn_) | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 68 |       : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), | 
| Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 69 |         fn(fn_), | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 70 |         start(start_), | 
 | 71 |         stop(stop_) {} | 
| Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 72 |   GetStackTraceVisitor(const GetStackTraceVisitor&) = default; | 
 | 73 |   GetStackTraceVisitor(GetStackTraceVisitor&&) = default; | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 74 |  | 
 | 75 |   bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) { | 
 | 76 |     art::ArtMethod* m = GetMethod(); | 
 | 77 |     if (m->IsRuntimeMethod()) { | 
 | 78 |       return true; | 
 | 79 |     } | 
 | 80 |  | 
 | 81 |     if (start == 0) { | 
 | 82 |       m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize); | 
| Andreas Gampe | 13b2784 | 2016-11-07 16:48:23 -0800 | [diff] [blame] | 83 |       jmethodID id = art::jni::EncodeArtMethod(m); | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 84 |  | 
| Andreas Gampe | 2340e3f | 2016-12-12 19:37:19 -0800 | [diff] [blame] | 85 |       uint32_t dex_pc = GetDexPc(false); | 
 | 86 |       jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc); | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 87 |  | 
| Andreas Gampe | 2340e3f | 2016-12-12 19:37:19 -0800 | [diff] [blame] | 88 |       jvmtiFrameInfo info = { id, dex_location }; | 
| Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 89 |       fn(info); | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 90 |  | 
 | 91 |       if (stop == 1) { | 
 | 92 |         return false;  // We're done. | 
 | 93 |       } else if (stop > 0) { | 
 | 94 |         stop--; | 
 | 95 |       } | 
 | 96 |     } else { | 
 | 97 |       start--; | 
 | 98 |     } | 
 | 99 |  | 
 | 100 |     return true; | 
 | 101 |   } | 
 | 102 |  | 
| Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 103 |   FrameFn fn; | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 104 |   size_t start; | 
 | 105 |   size_t stop; | 
 | 106 | }; | 
 | 107 |  | 
| Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 108 | template <typename FrameFn> | 
 | 109 | GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in, | 
 | 110 |                                                     size_t start, | 
 | 111 |                                                     size_t stop, | 
 | 112 |                                                     FrameFn fn) { | 
 | 113 |   return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn); | 
 | 114 | } | 
 | 115 |  | 
 | 116 | struct GetStackTraceVectorClosure : public art::Closure { | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 117 |  public: | 
| Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 118 |   GetStackTraceVectorClosure(size_t start, size_t stop) | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 119 |       : start_input(start), | 
 | 120 |         stop_input(stop), | 
 | 121 |         start_result(0), | 
 | 122 |         stop_result(0) {} | 
 | 123 |  | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 124 |   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { | 
| Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 125 |     auto frames_fn = [&](jvmtiFrameInfo info) { | 
 | 126 |       frames.push_back(info); | 
 | 127 |     }; | 
 | 128 |     auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn); | 
 | 129 |     visitor.WalkStack(/* include_transitions */ false); | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 130 |  | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 131 |     start_result = visitor.start; | 
 | 132 |     stop_result = visitor.stop; | 
 | 133 |   } | 
 | 134 |  | 
 | 135 |   const size_t start_input; | 
 | 136 |   const size_t stop_input; | 
 | 137 |  | 
 | 138 |   std::vector<jvmtiFrameInfo> frames; | 
 | 139 |   size_t start_result; | 
 | 140 |   size_t stop_result; | 
 | 141 | }; | 
 | 142 |  | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 143 | static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames, | 
 | 144 |                                        jint start_depth, | 
 | 145 |                                        size_t start_result, | 
 | 146 |                                        jint max_frame_count, | 
 | 147 |                                        jvmtiFrameInfo* frame_buffer, | 
 | 148 |                                        jint* count_ptr) { | 
 | 149 |   size_t collected_frames = frames.size(); | 
 | 150 |  | 
 | 151 |   // Assume we're here having collected something. | 
 | 152 |   DCHECK_GT(max_frame_count, 0); | 
 | 153 |  | 
 | 154 |   // Frames from the top. | 
 | 155 |   if (start_depth >= 0) { | 
 | 156 |     if (start_result != 0) { | 
 | 157 |       // Not enough frames. | 
 | 158 |       return ERR(ILLEGAL_ARGUMENT); | 
 | 159 |     } | 
 | 160 |     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); | 
 | 161 |     if (frames.size() > 0) { | 
 | 162 |       memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo)); | 
 | 163 |     } | 
 | 164 |     *count_ptr = static_cast<jint>(frames.size()); | 
 | 165 |     return ERR(NONE); | 
 | 166 |   } | 
 | 167 |  | 
 | 168 |   // Frames from the bottom. | 
 | 169 |   if (collected_frames < static_cast<size_t>(-start_depth)) { | 
 | 170 |     return ERR(ILLEGAL_ARGUMENT); | 
 | 171 |   } | 
 | 172 |  | 
 | 173 |   size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count)); | 
 | 174 |   memcpy(frame_buffer, | 
 | 175 |          &frames.data()[collected_frames + start_depth], | 
 | 176 |          count * sizeof(jvmtiFrameInfo)); | 
 | 177 |   *count_ptr = static_cast<jint>(count); | 
 | 178 |   return ERR(NONE); | 
 | 179 | } | 
 | 180 |  | 
| Andreas Gampe | 850a0fe | 2017-06-12 18:37:19 -0700 | [diff] [blame] | 181 | struct GetStackTraceDirectClosure : public art::Closure { | 
 | 182 |  public: | 
 | 183 |   GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop) | 
 | 184 |       : frame_buffer(frame_buffer_), | 
 | 185 |         start_input(start), | 
 | 186 |         stop_input(stop), | 
 | 187 |         index(0) { | 
 | 188 |     DCHECK_GE(start_input, 0u); | 
 | 189 |   } | 
 | 190 |  | 
 | 191 |   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { | 
 | 192 |     auto frames_fn = [&](jvmtiFrameInfo info) { | 
 | 193 |       frame_buffer[index] = info; | 
 | 194 |       ++index; | 
 | 195 |     }; | 
 | 196 |     auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn); | 
 | 197 |     visitor.WalkStack(/* include_transitions */ false); | 
 | 198 |   } | 
 | 199 |  | 
 | 200 |   jvmtiFrameInfo* frame_buffer; | 
 | 201 |  | 
 | 202 |   const size_t start_input; | 
 | 203 |   const size_t stop_input; | 
 | 204 |  | 
 | 205 |   size_t index = 0; | 
 | 206 | }; | 
 | 207 |  | 
| Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 208 | static jvmtiError GetThread(JNIEnv* env, | 
 | 209 |                             art::ScopedObjectAccessAlreadyRunnable& soa, | 
 | 210 |                             jthread java_thread, | 
 | 211 |                             art::Thread** thread) | 
 | 212 |     REQUIRES_SHARED(art::Locks::mutator_lock_)  // Needed for FromManagedThread. | 
 | 213 |     REQUIRES(art::Locks::thread_list_lock_) {   // Needed for FromManagedThread. | 
| Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 214 |   if (java_thread == nullptr) { | 
 | 215 |     *thread = art::Thread::Current(); | 
 | 216 |     if (*thread == nullptr) { | 
 | 217 |       // GetStackTrace can only be run during the live phase, so the current thread should be | 
 | 218 |       // attached and thus available. Getting a null for current means we're starting up or | 
 | 219 |       // dying. | 
 | 220 |       return ERR(WRONG_PHASE); | 
 | 221 |     } | 
 | 222 |   } else { | 
 | 223 |     if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) { | 
 | 224 |       return ERR(INVALID_THREAD); | 
 | 225 |     } | 
 | 226 |  | 
 | 227 |     // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD. | 
| Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 228 |     *thread = art::Thread::FromManagedThread(soa, java_thread); | 
 | 229 |     if (*thread == nullptr) { | 
 | 230 |       return ERR(THREAD_NOT_ALIVE); | 
 | 231 |     } | 
 | 232 |   } | 
 | 233 |   return ERR(NONE); | 
 | 234 | } | 
 | 235 |  | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 236 | jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED, | 
 | 237 |                                     jthread java_thread, | 
 | 238 |                                     jint start_depth, | 
 | 239 |                                     jint max_frame_count, | 
 | 240 |                                     jvmtiFrameInfo* frame_buffer, | 
 | 241 |                                     jint* count_ptr) { | 
| Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 242 |   // It is not great that we have to hold these locks for so long, but it is necessary to ensure | 
 | 243 |   // that the thread isn't dying on us. | 
 | 244 |   art::ScopedObjectAccess soa(art::Thread::Current()); | 
 | 245 |   art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); | 
 | 246 |  | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 247 |   art::Thread* thread; | 
| Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 248 |   jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), | 
 | 249 |                                       soa, | 
 | 250 |                                       java_thread, | 
 | 251 |                                       &thread); | 
| Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 252 |   if (thread_error != ERR(NONE)) { | 
 | 253 |     return thread_error; | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 254 |   } | 
| Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 255 |   DCHECK(thread != nullptr); | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 256 |  | 
 | 257 |   art::ThreadState state = thread->GetState(); | 
 | 258 |   if (state == art::ThreadState::kStarting || | 
 | 259 |       state == art::ThreadState::kTerminated || | 
 | 260 |       thread->IsStillStarting()) { | 
 | 261 |     return ERR(THREAD_NOT_ALIVE); | 
 | 262 |   } | 
 | 263 |  | 
 | 264 |   if (max_frame_count < 0) { | 
 | 265 |     return ERR(ILLEGAL_ARGUMENT); | 
 | 266 |   } | 
 | 267 |   if (frame_buffer == nullptr || count_ptr == nullptr) { | 
 | 268 |     return ERR(NULL_POINTER); | 
 | 269 |   } | 
 | 270 |  | 
 | 271 |   if (max_frame_count == 0) { | 
 | 272 |     *count_ptr = 0; | 
 | 273 |     return ERR(NONE); | 
 | 274 |   } | 
 | 275 |  | 
| Andreas Gampe | 850a0fe | 2017-06-12 18:37:19 -0700 | [diff] [blame] | 276 |   if (start_depth >= 0) { | 
 | 277 |     // Fast path: Regular order of stack trace. Fill into the frame_buffer directly. | 
 | 278 |     GetStackTraceDirectClosure closure(frame_buffer, | 
 | 279 |                                        static_cast<size_t>(start_depth), | 
 | 280 |                                        static_cast<size_t>(max_frame_count)); | 
 | 281 |     thread->RequestSynchronousCheckpoint(&closure); | 
 | 282 |     *count_ptr = static_cast<jint>(closure.index); | 
 | 283 |     if (closure.index < static_cast<size_t>(start_depth)) { | 
 | 284 |       return ERR(ILLEGAL_ARGUMENT); | 
 | 285 |     } | 
 | 286 |     return ERR(NONE); | 
 | 287 |   } | 
 | 288 |  | 
 | 289 |   GetStackTraceVectorClosure closure(0, 0); | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 290 |   thread->RequestSynchronousCheckpoint(&closure); | 
 | 291 |  | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 292 |   return TranslateFrameVector(closure.frames, | 
 | 293 |                               start_depth, | 
 | 294 |                               closure.start_result, | 
 | 295 |                               max_frame_count, | 
 | 296 |                               frame_buffer, | 
 | 297 |                               count_ptr); | 
 | 298 | } | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 299 |  | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 300 | template <typename Data> | 
 | 301 | struct GetAllStackTracesVectorClosure : public art::Closure { | 
 | 302 |   GetAllStackTracesVectorClosure(size_t stop, Data* data_) : stop_input(stop), data(data_) {} | 
 | 303 |  | 
 | 304 |   void Run(art::Thread* thread) OVERRIDE | 
 | 305 |       REQUIRES_SHARED(art::Locks::mutator_lock_) | 
 | 306 |       REQUIRES(!data->mutex) { | 
 | 307 |     art::Thread* self = art::Thread::Current(); | 
 | 308 |  | 
 | 309 |     // Skip threads that are still starting. | 
 | 310 |     if (thread->IsStillStarting()) { | 
 | 311 |       return; | 
 | 312 |     } | 
 | 313 |  | 
 | 314 |     std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread); | 
 | 315 |     if (thread_frames == nullptr) { | 
 | 316 |       return; | 
 | 317 |     } | 
 | 318 |  | 
 | 319 |     // Now collect the data. | 
 | 320 |     auto frames_fn = [&](jvmtiFrameInfo info) { | 
 | 321 |       thread_frames->push_back(info); | 
 | 322 |     }; | 
 | 323 |     auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn); | 
 | 324 |     visitor.WalkStack(/* include_transitions */ false); | 
 | 325 |   } | 
 | 326 |  | 
 | 327 |   const size_t stop_input; | 
 | 328 |   Data* data; | 
 | 329 | }; | 
 | 330 |  | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 331 | jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env, | 
 | 332 |                                         jint max_frame_count, | 
 | 333 |                                         jvmtiStackInfo** stack_info_ptr, | 
 | 334 |                                         jint* thread_count_ptr) { | 
 | 335 |   if (max_frame_count < 0) { | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 336 |     return ERR(ILLEGAL_ARGUMENT); | 
 | 337 |   } | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 338 |   if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) { | 
 | 339 |     return ERR(NULL_POINTER); | 
 | 340 |   } | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 341 |  | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 342 |   struct AllStackTracesData { | 
 | 343 |     AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {} | 
 | 344 |     ~AllStackTracesData() { | 
 | 345 |       JNIEnv* jni_env = art::Thread::Current()->GetJniEnv(); | 
 | 346 |       for (jthread global_thread_ref : thread_peers) { | 
 | 347 |         jni_env->DeleteGlobalRef(global_thread_ref); | 
 | 348 |       } | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 349 |     } | 
 | 350 |  | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 351 |     std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread) | 
 | 352 |         REQUIRES_SHARED(art::Locks::mutator_lock_) | 
 | 353 |         REQUIRES(!mutex) { | 
 | 354 |       art::MutexLock mu(self, mutex); | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 355 |  | 
 | 356 |       threads.push_back(thread); | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 357 |  | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 358 |       jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef( | 
 | 359 |           self, thread->GetPeerFromOtherThread()); | 
 | 360 |       thread_peers.push_back(peer); | 
 | 361 |  | 
| Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame^] | 362 |       frames.emplace_back(new std::vector<jvmtiFrameInfo>()); | 
 | 363 |       return frames.back().get(); | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 364 |     } | 
 | 365 |  | 
 | 366 |     art::Mutex mutex; | 
 | 367 |  | 
 | 368 |     // Storage. Only access directly after completion. | 
 | 369 |  | 
 | 370 |     std::vector<art::Thread*> threads; | 
 | 371 |     // "thread_peers" contains global references to their peers. | 
 | 372 |     std::vector<jthread> thread_peers; | 
 | 373 |  | 
| Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame^] | 374 |     std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames; | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 375 |   }; | 
 | 376 |  | 
 | 377 |   AllStackTracesData data; | 
 | 378 |   GetAllStackTracesVectorClosure<AllStackTracesData> closure( | 
 | 379 |       static_cast<size_t>(max_frame_count), &data); | 
 | 380 |   art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr); | 
 | 381 |  | 
 | 382 |   art::Thread* current = art::Thread::Current(); | 
 | 383 |  | 
 | 384 |   // Convert the data into our output format. | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 385 |  | 
 | 386 |   // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to | 
 | 387 |   //       allocate one big chunk for this and the actual frames, which means we need | 
 | 388 |   //       to either be conservative or rearrange things later (the latter is implemented). | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 389 |   std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]); | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 390 |   std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos; | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 391 |   frame_infos.reserve(data.frames.size()); | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 392 |  | 
 | 393 |   // Now run through and add data for each thread. | 
 | 394 |   size_t sum_frames = 0; | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 395 |   for (size_t index = 0; index < data.frames.size(); ++index) { | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 396 |     jvmtiStackInfo& stack_info = stack_info_array.get()[index]; | 
 | 397 |     memset(&stack_info, 0, sizeof(jvmtiStackInfo)); | 
 | 398 |  | 
| Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame^] | 399 |     const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get(); | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 400 |  | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 401 |     // For the time being, set the thread to null. We'll fix it up in the second stage. | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 402 |     stack_info.thread = nullptr; | 
 | 403 |     stack_info.state = JVMTI_THREAD_STATE_SUSPENDED; | 
 | 404 |  | 
 | 405 |     size_t collected_frames = thread_frames.size(); | 
 | 406 |     if (max_frame_count == 0 || collected_frames == 0) { | 
 | 407 |       stack_info.frame_count = 0; | 
 | 408 |       stack_info.frame_buffer = nullptr; | 
 | 409 |       continue; | 
 | 410 |     } | 
 | 411 |     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); | 
 | 412 |  | 
 | 413 |     jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames]; | 
 | 414 |     frame_infos.emplace_back(frame_info); | 
 | 415 |  | 
 | 416 |     jint count; | 
 | 417 |     jvmtiError translate_result = TranslateFrameVector(thread_frames, | 
 | 418 |                                                        0, | 
 | 419 |                                                        0, | 
 | 420 |                                                        static_cast<jint>(collected_frames), | 
 | 421 |                                                        frame_info, | 
 | 422 |                                                        &count); | 
 | 423 |     DCHECK(translate_result == JVMTI_ERROR_NONE); | 
 | 424 |     stack_info.frame_count = static_cast<jint>(collected_frames); | 
 | 425 |     stack_info.frame_buffer = frame_info; | 
 | 426 |     sum_frames += static_cast<size_t>(count); | 
 | 427 |   } | 
 | 428 |  | 
 | 429 |   // No errors, yet. Now put it all into an output buffer. | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 430 |   size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(), | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 431 |                                                 alignof(jvmtiFrameInfo)); | 
 | 432 |   size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo); | 
 | 433 |   unsigned char* chunk_data; | 
 | 434 |   jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data); | 
 | 435 |   if (alloc_result != ERR(NONE)) { | 
 | 436 |     return alloc_result; | 
 | 437 |   } | 
 | 438 |  | 
 | 439 |   jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data); | 
 | 440 |   // First copy in all the basic data. | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 441 |   memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size()); | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 442 |  | 
 | 443 |   // Now copy the frames and fix up the pointers. | 
 | 444 |   jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>( | 
 | 445 |       chunk_data + rounded_stack_info_size); | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 446 |   for (size_t i = 0; i < data.frames.size(); ++i) { | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 447 |     jvmtiStackInfo& old_stack_info = stack_info_array.get()[i]; | 
 | 448 |     jvmtiStackInfo& new_stack_info = stack_info[i]; | 
 | 449 |  | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 450 |     // Translate the global ref into a local ref. | 
 | 451 |     new_stack_info.thread = | 
 | 452 |         static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]); | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 453 |  | 
 | 454 |     if (old_stack_info.frame_count > 0) { | 
 | 455 |       // Only copy when there's data - leave the nullptr alone. | 
 | 456 |       size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo); | 
 | 457 |       memcpy(frame_info, old_stack_info.frame_buffer, frames_size); | 
 | 458 |       new_stack_info.frame_buffer = frame_info; | 
 | 459 |       frame_info += old_stack_info.frame_count; | 
 | 460 |     } | 
 | 461 |   } | 
 | 462 |  | 
 | 463 |   *stack_info_ptr = stack_info; | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 464 |   *thread_count_ptr = static_cast<jint>(data.frames.size()); | 
| Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 465 |  | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 466 |   return ERR(NONE); | 
 | 467 | } | 
 | 468 |  | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 469 | jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env, | 
 | 470 |                                                jint thread_count, | 
 | 471 |                                                const jthread* thread_list, | 
 | 472 |                                                jint max_frame_count, | 
 | 473 |                                                jvmtiStackInfo** stack_info_ptr) { | 
 | 474 |   if (max_frame_count < 0) { | 
 | 475 |     return ERR(ILLEGAL_ARGUMENT); | 
 | 476 |   } | 
 | 477 |   if (thread_count < 0) { | 
 | 478 |     return ERR(ILLEGAL_ARGUMENT); | 
 | 479 |   } | 
 | 480 |   if (thread_count == 0) { | 
 | 481 |     *stack_info_ptr = nullptr; | 
 | 482 |     return ERR(NONE); | 
 | 483 |   } | 
 | 484 |   if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) { | 
 | 485 |     return ERR(NULL_POINTER); | 
 | 486 |   } | 
 | 487 |  | 
 | 488 |   art::Thread* current = art::Thread::Current(); | 
 | 489 |   art::ScopedObjectAccess soa(current);      // Now we know we have the shared lock. | 
 | 490 |  | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 491 |   struct SelectStackTracesData { | 
 | 492 |     SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {} | 
 | 493 |  | 
 | 494 |     std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread) | 
 | 495 |               REQUIRES_SHARED(art::Locks::mutator_lock_) | 
 | 496 |               REQUIRES(!mutex) { | 
 | 497 |       art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread(); | 
 | 498 |       for (size_t index = 0; index != handles.size(); ++index) { | 
 | 499 |         if (peer == handles[index].Get()) { | 
 | 500 |           // Found the thread. | 
 | 501 |           art::MutexLock mu(self, mutex); | 
 | 502 |  | 
 | 503 |           threads.push_back(thread); | 
 | 504 |           thread_list_indices.push_back(index); | 
 | 505 |  | 
| Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame^] | 506 |           frames.emplace_back(new std::vector<jvmtiFrameInfo>()); | 
 | 507 |           return frames.back().get(); | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 508 |         } | 
 | 509 |       } | 
 | 510 |       return nullptr; | 
 | 511 |     } | 
 | 512 |  | 
 | 513 |     art::Mutex mutex; | 
 | 514 |  | 
 | 515 |     // Selection data. | 
 | 516 |  | 
 | 517 |     std::vector<art::Handle<art::mirror::Object>> handles; | 
 | 518 |  | 
 | 519 |     // Storage. Only access directly after completion. | 
 | 520 |  | 
 | 521 |     std::vector<art::Thread*> threads; | 
 | 522 |     std::vector<size_t> thread_list_indices; | 
 | 523 |  | 
| Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame^] | 524 |     std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames; | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 525 |   }; | 
 | 526 |  | 
 | 527 |   SelectStackTracesData data; | 
 | 528 |  | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 529 |   // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs. | 
 | 530 |   art::VariableSizedHandleScope hs(current); | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 531 |   for (jint i = 0; i != thread_count; ++i) { | 
 | 532 |     if (thread_list[i] == nullptr) { | 
 | 533 |       return ERR(INVALID_THREAD); | 
 | 534 |     } | 
 | 535 |     if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) { | 
 | 536 |       return ERR(INVALID_THREAD); | 
 | 537 |     } | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 538 |     data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i]))); | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 539 |   } | 
 | 540 |  | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 541 |   GetAllStackTracesVectorClosure<SelectStackTracesData> closure( | 
 | 542 |       static_cast<size_t>(max_frame_count), &data); | 
 | 543 |   art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr); | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 544 |  | 
 | 545 |   // Convert the data into our output format. | 
 | 546 |  | 
 | 547 |   // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to | 
 | 548 |   //       allocate one big chunk for this and the actual frames, which means we need | 
 | 549 |   //       to either be conservative or rearrange things later (the latter is implemented). | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 550 |   std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]); | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 551 |   std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos; | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 552 |   frame_infos.reserve(data.frames.size()); | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 553 |  | 
 | 554 |   // Now run through and add data for each thread. | 
 | 555 |   size_t sum_frames = 0; | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 556 |   for (size_t index = 0; index < data.frames.size(); ++index) { | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 557 |     jvmtiStackInfo& stack_info = stack_info_array.get()[index]; | 
 | 558 |     memset(&stack_info, 0, sizeof(jvmtiStackInfo)); | 
 | 559 |  | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 560 |     art::Thread* self = data.threads[index]; | 
| Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame^] | 561 |     const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get(); | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 562 |  | 
 | 563 |     // For the time being, set the thread to null. We don't have good ScopedLocalRef | 
 | 564 |     // infrastructure. | 
| Nicolas Geoffray | ffc8cad | 2017-02-10 10:59:22 +0000 | [diff] [blame] | 565 |     DCHECK(self->GetPeerFromOtherThread() != nullptr); | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 566 |     stack_info.thread = nullptr; | 
 | 567 |     stack_info.state = JVMTI_THREAD_STATE_SUSPENDED; | 
 | 568 |  | 
 | 569 |     size_t collected_frames = thread_frames.size(); | 
 | 570 |     if (max_frame_count == 0 || collected_frames == 0) { | 
 | 571 |       stack_info.frame_count = 0; | 
 | 572 |       stack_info.frame_buffer = nullptr; | 
 | 573 |       continue; | 
 | 574 |     } | 
 | 575 |     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); | 
 | 576 |  | 
 | 577 |     jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames]; | 
 | 578 |     frame_infos.emplace_back(frame_info); | 
 | 579 |  | 
 | 580 |     jint count; | 
 | 581 |     jvmtiError translate_result = TranslateFrameVector(thread_frames, | 
 | 582 |                                                        0, | 
 | 583 |                                                        0, | 
 | 584 |                                                        static_cast<jint>(collected_frames), | 
 | 585 |                                                        frame_info, | 
 | 586 |                                                        &count); | 
 | 587 |     DCHECK(translate_result == JVMTI_ERROR_NONE); | 
 | 588 |     stack_info.frame_count = static_cast<jint>(collected_frames); | 
 | 589 |     stack_info.frame_buffer = frame_info; | 
 | 590 |     sum_frames += static_cast<size_t>(count); | 
 | 591 |   } | 
 | 592 |  | 
 | 593 |   // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(), | 
 | 594 |   // potentially. | 
 | 595 |   size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count, | 
 | 596 |                                                 alignof(jvmtiFrameInfo)); | 
 | 597 |   size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo); | 
 | 598 |   unsigned char* chunk_data; | 
 | 599 |   jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data); | 
 | 600 |   if (alloc_result != ERR(NONE)) { | 
 | 601 |     return alloc_result; | 
 | 602 |   } | 
 | 603 |  | 
 | 604 |   jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data); | 
 | 605 |   jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>( | 
 | 606 |       chunk_data + rounded_stack_info_size); | 
 | 607 |  | 
 | 608 |   for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) { | 
 | 609 |     // Check whether we found a running thread for this. | 
 | 610 |     // Note: For simplicity, and with the expectation that the list is usually small, use a simple | 
 | 611 |     //       search. (The list is *not* sorted!) | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 612 |     auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i); | 
 | 613 |     if (it == data.thread_list_indices.end()) { | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 614 |       // No native thread. Must be new or dead. We need to fill out the stack info now. | 
 | 615 |       // (Need to read the Java "started" field to know whether this is starting or terminated.) | 
 | 616 |       art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]); | 
 | 617 |       art::ObjPtr<art::mirror::Class> klass = peer->GetClass(); | 
 | 618 |       art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z"); | 
 | 619 |       CHECK(started_field != nullptr); | 
 | 620 |       bool started = started_field->GetBoolean(peer) != 0; | 
 | 621 |       constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW; | 
 | 622 |       constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED | | 
 | 623 |           JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED; | 
 | 624 |       stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]); | 
 | 625 |       stack_info[i].state = started ? kTerminatedState : kStartedState; | 
 | 626 |       stack_info[i].frame_count = 0; | 
 | 627 |       stack_info[i].frame_buffer = nullptr; | 
 | 628 |     } else { | 
 | 629 |       // Had a native thread and frames. | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 630 |       size_t f_index = it - data.thread_list_indices.begin(); | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 631 |  | 
 | 632 |       jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index]; | 
 | 633 |       jvmtiStackInfo& new_stack_info = stack_info[i]; | 
 | 634 |  | 
 | 635 |       memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo)); | 
 | 636 |       new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]); | 
 | 637 |       if (old_stack_info.frame_count > 0) { | 
 | 638 |         // Only copy when there's data - leave the nullptr alone. | 
 | 639 |         size_t frames_size = | 
 | 640 |             static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo); | 
 | 641 |         memcpy(frame_info, old_stack_info.frame_buffer, frames_size); | 
 | 642 |         new_stack_info.frame_buffer = frame_info; | 
 | 643 |         frame_info += old_stack_info.frame_count; | 
 | 644 |       } | 
 | 645 |     } | 
 | 646 |   } | 
 | 647 |  | 
| Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 648 |   *stack_info_ptr = stack_info; | 
| Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 649 |  | 
 | 650 |   return ERR(NONE); | 
 | 651 | } | 
 | 652 |  | 
| Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 653 | // Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as | 
 | 654 | // runtime methods and transitions must not be counted. | 
 | 655 | struct GetFrameCountVisitor : public art::StackVisitor { | 
 | 656 |   explicit GetFrameCountVisitor(art::Thread* thread) | 
 | 657 |       : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames), | 
 | 658 |         count(0) {} | 
 | 659 |  | 
 | 660 |   bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) { | 
 | 661 |     art::ArtMethod* m = GetMethod(); | 
 | 662 |     const bool do_count = !(m == nullptr || m->IsRuntimeMethod()); | 
 | 663 |     if (do_count) { | 
 | 664 |       count++; | 
 | 665 |     } | 
 | 666 |     return true; | 
 | 667 |   } | 
 | 668 |  | 
 | 669 |   size_t count; | 
 | 670 | }; | 
 | 671 |  | 
 | 672 | struct GetFrameCountClosure : public art::Closure { | 
 | 673 |  public: | 
 | 674 |   GetFrameCountClosure() : count(0) {} | 
 | 675 |  | 
 | 676 |   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { | 
 | 677 |     GetFrameCountVisitor visitor(self); | 
 | 678 |     visitor.WalkStack(false); | 
 | 679 |  | 
 | 680 |     count = visitor.count; | 
 | 681 |   } | 
 | 682 |  | 
 | 683 |   size_t count; | 
 | 684 | }; | 
 | 685 |  | 
 | 686 | jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED, | 
 | 687 |                                     jthread java_thread, | 
 | 688 |                                     jint* count_ptr) { | 
| Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 689 |   // It is not great that we have to hold these locks for so long, but it is necessary to ensure | 
 | 690 |   // that the thread isn't dying on us. | 
 | 691 |   art::ScopedObjectAccess soa(art::Thread::Current()); | 
 | 692 |   art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); | 
 | 693 |  | 
| Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 694 |   art::Thread* thread; | 
| Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 695 |   jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), | 
 | 696 |                                       soa, | 
 | 697 |                                       java_thread, | 
 | 698 |                                       &thread); | 
 | 699 |  | 
| Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 700 |   if (thread_error != ERR(NONE)) { | 
 | 701 |     return thread_error; | 
 | 702 |   } | 
 | 703 |   DCHECK(thread != nullptr); | 
 | 704 |  | 
 | 705 |   if (count_ptr == nullptr) { | 
 | 706 |     return ERR(NULL_POINTER); | 
 | 707 |   } | 
 | 708 |  | 
 | 709 |   GetFrameCountClosure closure; | 
 | 710 |   thread->RequestSynchronousCheckpoint(&closure); | 
 | 711 |  | 
 | 712 |   *count_ptr = closure.count; | 
 | 713 |   return ERR(NONE); | 
 | 714 | } | 
 | 715 |  | 
 | 716 | // Walks up the stack 'n' callers, when used with Thread::WalkStack. | 
 | 717 | struct GetLocationVisitor : public art::StackVisitor { | 
 | 718 |   GetLocationVisitor(art::Thread* thread, size_t n_in) | 
 | 719 |       : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames), | 
 | 720 |         n(n_in), | 
 | 721 |         count(0), | 
 | 722 |         caller(nullptr), | 
 | 723 |         caller_dex_pc(0) {} | 
 | 724 |  | 
 | 725 |   bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) { | 
 | 726 |     art::ArtMethod* m = GetMethod(); | 
 | 727 |     const bool do_count = !(m == nullptr || m->IsRuntimeMethod()); | 
 | 728 |     if (do_count) { | 
 | 729 |       DCHECK(caller == nullptr); | 
 | 730 |       if (count == n) { | 
 | 731 |         caller = m; | 
 | 732 |         caller_dex_pc = GetDexPc(false); | 
 | 733 |         return false; | 
 | 734 |       } | 
 | 735 |       count++; | 
 | 736 |     } | 
 | 737 |     return true; | 
 | 738 |   } | 
 | 739 |  | 
 | 740 |   const size_t n; | 
 | 741 |   size_t count; | 
 | 742 |   art::ArtMethod* caller; | 
 | 743 |   uint32_t caller_dex_pc; | 
 | 744 | }; | 
 | 745 |  | 
 | 746 | struct GetLocationClosure : public art::Closure { | 
 | 747 |  public: | 
 | 748 |   explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {} | 
 | 749 |  | 
 | 750 |   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { | 
 | 751 |     GetLocationVisitor visitor(self, n); | 
 | 752 |     visitor.WalkStack(false); | 
 | 753 |  | 
 | 754 |     method = visitor.caller; | 
 | 755 |     dex_pc = visitor.caller_dex_pc; | 
 | 756 |   } | 
 | 757 |  | 
 | 758 |   const size_t n; | 
 | 759 |   art::ArtMethod* method; | 
 | 760 |   uint32_t dex_pc; | 
 | 761 | }; | 
 | 762 |  | 
 | 763 | jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED, | 
 | 764 |                                        jthread java_thread, | 
 | 765 |                                        jint depth, | 
 | 766 |                                        jmethodID* method_ptr, | 
 | 767 |                                        jlocation* location_ptr) { | 
| Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 768 |   // It is not great that we have to hold these locks for so long, but it is necessary to ensure | 
 | 769 |   // that the thread isn't dying on us. | 
 | 770 |   art::ScopedObjectAccess soa(art::Thread::Current()); | 
 | 771 |   art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); | 
 | 772 |  | 
| Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 773 |   art::Thread* thread; | 
| Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 774 |   jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), | 
 | 775 |                                       soa, | 
 | 776 |                                       java_thread, | 
 | 777 |                                       &thread); | 
| Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 778 |   if (thread_error != ERR(NONE)) { | 
 | 779 |     return thread_error; | 
 | 780 |   } | 
 | 781 |   DCHECK(thread != nullptr); | 
 | 782 |  | 
 | 783 |   if (depth < 0) { | 
 | 784 |     return ERR(ILLEGAL_ARGUMENT); | 
 | 785 |   } | 
 | 786 |   if (method_ptr == nullptr || location_ptr == nullptr) { | 
 | 787 |     return ERR(NULL_POINTER); | 
 | 788 |   } | 
 | 789 |  | 
 | 790 |   GetLocationClosure closure(static_cast<size_t>(depth)); | 
 | 791 |   thread->RequestSynchronousCheckpoint(&closure); | 
 | 792 |  | 
 | 793 |   if (closure.method == nullptr) { | 
 | 794 |     return ERR(NO_MORE_FRAMES); | 
 | 795 |   } | 
 | 796 |  | 
 | 797 |   *method_ptr = art::jni::EncodeArtMethod(closure.method); | 
 | 798 |   if (closure.method->IsNative()) { | 
 | 799 |     *location_ptr = -1; | 
 | 800 |   } else { | 
 | 801 |     if (closure.dex_pc == art::DexFile::kDexNoIndex) { | 
 | 802 |       return ERR(INTERNAL); | 
 | 803 |     } | 
 | 804 |     *location_ptr = static_cast<jlocation>(closure.dex_pc); | 
 | 805 |   } | 
 | 806 |  | 
 | 807 |   return ERR(NONE); | 
 | 808 | } | 
 | 809 |  | 
| Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 810 | }  // namespace openjdkjvmti |