Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 1 | /* Copyright (C) 2016 The Android Open Source Project |
| 2 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 3 | * |
| 4 | * This file implements interfaces from the file jvmti.h. This implementation |
| 5 | * is licensed under the same terms as the file jvmti.h. The |
| 6 | * copyright and license information for the file jvmti.h follows. |
| 7 | * |
| 8 | * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. |
| 9 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 10 | * |
| 11 | * This code is free software; you can redistribute it and/or modify it |
| 12 | * under the terms of the GNU General Public License version 2 only, as |
| 13 | * published by the Free Software Foundation. Oracle designates this |
| 14 | * particular file as subject to the "Classpath" exception as provided |
| 15 | * by Oracle in the LICENSE file that accompanied this code. |
| 16 | * |
| 17 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 20 | * version 2 for more details (a copy is included in the LICENSE file that |
| 21 | * accompanied this code). |
| 22 | * |
| 23 | * You should have received a copy of the GNU General Public License version |
| 24 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 25 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 26 | * |
| 27 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 28 | * or visit www.oracle.com if you need additional information or have any |
| 29 | * questions. |
| 30 | */ |
| 31 | |
| 32 | #include "ti_stack.h" |
| 33 | |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 34 | #include <algorithm> |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 35 | #include <list> |
| 36 | #include <unordered_map> |
| 37 | #include <vector> |
| 38 | |
Andreas Gampe | e5d2398 | 2019-01-08 10:34:26 -0800 | [diff] [blame] | 39 | #include "arch/context.h" |
Andreas Gampe | a1d2f95 | 2017-04-20 22:53:58 -0700 | [diff] [blame] | 40 | #include "art_field-inl.h" |
Alex Light | e814f9d | 2017-07-31 16:14:39 -0700 | [diff] [blame] | 41 | #include "art_method-inl.h" |
Andreas Gampe | a1d2f95 | 2017-04-20 22:53:58 -0700 | [diff] [blame] | 42 | #include "art_jvmti.h" |
Steven Moreland | e431e27 | 2017-07-18 16:53:49 -0700 | [diff] [blame] | 43 | #include "art_method-inl.h" |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 44 | #include "barrier.h" |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 45 | #include "base/bit_utils.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 46 | #include "base/enums.h" |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 47 | #include "base/mutex.h" |
David Sehr | 9e734c7 | 2018-01-04 17:56:19 -0800 | [diff] [blame] | 48 | #include "dex/code_item_accessors-inl.h" |
| 49 | #include "dex/dex_file.h" |
| 50 | #include "dex/dex_file_annotations.h" |
| 51 | #include "dex/dex_file_types.h" |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 52 | #include "gc_root.h" |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 53 | #include "handle_scope-inl.h" |
Vladimir Marko | a3ad0cd | 2018-05-04 10:06:38 +0100 | [diff] [blame] | 54 | #include "jni/jni_env_ext.h" |
| 55 | #include "jni/jni_internal.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 56 | #include "mirror/class.h" |
| 57 | #include "mirror/dex_cache.h" |
Andreas Gampe | 373a9b5 | 2017-10-18 09:01:57 -0700 | [diff] [blame] | 58 | #include "nativehelper/scoped_local_ref.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 59 | #include "scoped_thread_state_change-inl.h" |
| 60 | #include "stack.h" |
Alex Light | ae45cbb | 2018-10-18 15:49:56 -0700 | [diff] [blame] | 61 | #include "ti_logging.h" |
Alex Light | e814f9d | 2017-07-31 16:14:39 -0700 | [diff] [blame] | 62 | #include "ti_thread.h" |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 63 | #include "thread-current-inl.h" |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 64 | #include "thread_list.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 65 | #include "thread_pool.h" |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 66 | #include "ti_thread.h" |
Andreas Gampe | a1d2f95 | 2017-04-20 22:53:58 -0700 | [diff] [blame] | 67 | #include "well_known_classes.h" |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 68 | |
| 69 | namespace openjdkjvmti { |
| 70 | |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 71 | template <typename FrameFn> |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 72 | struct GetStackTraceVisitor : public art::StackVisitor { |
| 73 | GetStackTraceVisitor(art::Thread* thread_in, |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 74 | size_t start_, |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 75 | size_t stop_, |
| 76 | FrameFn fn_) |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 77 | : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 78 | fn(fn_), |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 79 | start(start_), |
| 80 | stop(stop_) {} |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 81 | GetStackTraceVisitor(const GetStackTraceVisitor&) = default; |
Andreas Gampe | 44b3174 | 2018-10-01 19:30:57 -0700 | [diff] [blame] | 82 | GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default; |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 83 | |
Andreas Gampe | fa6a1b0 | 2018-09-07 08:11:55 -0700 | [diff] [blame] | 84 | bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 85 | art::ArtMethod* m = GetMethod(); |
| 86 | if (m->IsRuntimeMethod()) { |
| 87 | return true; |
| 88 | } |
| 89 | |
| 90 | if (start == 0) { |
| 91 | m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize); |
Andreas Gampe | 13b2784 | 2016-11-07 16:48:23 -0800 | [diff] [blame] | 92 | jmethodID id = art::jni::EncodeArtMethod(m); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 93 | |
Andreas Gampe | 2340e3f | 2016-12-12 19:37:19 -0800 | [diff] [blame] | 94 | uint32_t dex_pc = GetDexPc(false); |
Andreas Gampe | e2abbc6 | 2017-09-15 11:59:26 -0700 | [diff] [blame] | 95 | jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 96 | |
Andreas Gampe | 2340e3f | 2016-12-12 19:37:19 -0800 | [diff] [blame] | 97 | jvmtiFrameInfo info = { id, dex_location }; |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 98 | fn(info); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 99 | |
| 100 | if (stop == 1) { |
| 101 | return false; // We're done. |
| 102 | } else if (stop > 0) { |
| 103 | stop--; |
| 104 | } |
| 105 | } else { |
| 106 | start--; |
| 107 | } |
| 108 | |
| 109 | return true; |
| 110 | } |
| 111 | |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 112 | FrameFn fn; |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 113 | size_t start; |
| 114 | size_t stop; |
| 115 | }; |
| 116 | |
Alex Light | 0aa7a5a | 2018-10-10 15:58:14 +0000 | [diff] [blame] | 117 | art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) { |
| 118 | art::ShadowFrame* cur = GetCurrentShadowFrame(); |
| 119 | if (cur == nullptr) { |
| 120 | *created_frame = true; |
| 121 | art::ArtMethod* method = GetMethod(); |
| 122 | const uint16_t num_regs = method->DexInstructionData().RegistersSize(); |
| 123 | cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(), |
| 124 | num_regs, |
| 125 | method, |
| 126 | GetDexPc()); |
| 127 | DCHECK(cur != nullptr); |
| 128 | } else { |
| 129 | *created_frame = false; |
| 130 | } |
| 131 | return cur; |
| 132 | } |
| 133 | |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 134 | template <typename FrameFn> |
| 135 | GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in, |
| 136 | size_t start, |
| 137 | size_t stop, |
| 138 | FrameFn fn) { |
| 139 | return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn); |
| 140 | } |
| 141 | |
| 142 | struct GetStackTraceVectorClosure : public art::Closure { |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 143 | public: |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 144 | GetStackTraceVectorClosure(size_t start, size_t stop) |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 145 | : start_input(start), |
| 146 | stop_input(stop), |
| 147 | start_result(0), |
| 148 | stop_result(0) {} |
| 149 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 150 | void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Andreas Gampe | 6db6b4d | 2017-06-12 16:36:33 -0700 | [diff] [blame] | 151 | auto frames_fn = [&](jvmtiFrameInfo info) { |
| 152 | frames.push_back(info); |
| 153 | }; |
| 154 | auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn); |
Andreas Gampe | 6e89776 | 2018-10-16 13:09:32 -0700 | [diff] [blame] | 155 | visitor.WalkStack(/* include_transitions= */ false); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 156 | |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 157 | start_result = visitor.start; |
| 158 | stop_result = visitor.stop; |
| 159 | } |
| 160 | |
| 161 | const size_t start_input; |
| 162 | const size_t stop_input; |
| 163 | |
| 164 | std::vector<jvmtiFrameInfo> frames; |
| 165 | size_t start_result; |
| 166 | size_t stop_result; |
| 167 | }; |
| 168 | |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 169 | static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames, |
| 170 | jint start_depth, |
| 171 | size_t start_result, |
| 172 | jint max_frame_count, |
| 173 | jvmtiFrameInfo* frame_buffer, |
| 174 | jint* count_ptr) { |
| 175 | size_t collected_frames = frames.size(); |
| 176 | |
| 177 | // Assume we're here having collected something. |
| 178 | DCHECK_GT(max_frame_count, 0); |
| 179 | |
| 180 | // Frames from the top. |
| 181 | if (start_depth >= 0) { |
| 182 | if (start_result != 0) { |
| 183 | // Not enough frames. |
| 184 | return ERR(ILLEGAL_ARGUMENT); |
| 185 | } |
| 186 | DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); |
| 187 | if (frames.size() > 0) { |
| 188 | memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo)); |
| 189 | } |
| 190 | *count_ptr = static_cast<jint>(frames.size()); |
| 191 | return ERR(NONE); |
| 192 | } |
| 193 | |
| 194 | // Frames from the bottom. |
| 195 | if (collected_frames < static_cast<size_t>(-start_depth)) { |
| 196 | return ERR(ILLEGAL_ARGUMENT); |
| 197 | } |
| 198 | |
| 199 | size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count)); |
| 200 | memcpy(frame_buffer, |
| 201 | &frames.data()[collected_frames + start_depth], |
| 202 | count * sizeof(jvmtiFrameInfo)); |
| 203 | *count_ptr = static_cast<jint>(count); |
| 204 | return ERR(NONE); |
| 205 | } |
| 206 | |
Andreas Gampe | 850a0fe | 2017-06-12 18:37:19 -0700 | [diff] [blame] | 207 | struct GetStackTraceDirectClosure : public art::Closure { |
| 208 | public: |
| 209 | GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop) |
| 210 | : frame_buffer(frame_buffer_), |
| 211 | start_input(start), |
| 212 | stop_input(stop), |
| 213 | index(0) { |
| 214 | DCHECK_GE(start_input, 0u); |
| 215 | } |
| 216 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 217 | void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Andreas Gampe | 850a0fe | 2017-06-12 18:37:19 -0700 | [diff] [blame] | 218 | auto frames_fn = [&](jvmtiFrameInfo info) { |
| 219 | frame_buffer[index] = info; |
| 220 | ++index; |
| 221 | }; |
| 222 | auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn); |
Andreas Gampe | 6e89776 | 2018-10-16 13:09:32 -0700 | [diff] [blame] | 223 | visitor.WalkStack(/* include_transitions= */ false); |
Andreas Gampe | 850a0fe | 2017-06-12 18:37:19 -0700 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | jvmtiFrameInfo* frame_buffer; |
| 227 | |
| 228 | const size_t start_input; |
| 229 | const size_t stop_input; |
| 230 | |
| 231 | size_t index = 0; |
| 232 | }; |
| 233 | |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 234 | jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED, |
| 235 | jthread java_thread, |
| 236 | jint start_depth, |
| 237 | jint max_frame_count, |
| 238 | jvmtiFrameInfo* frame_buffer, |
| 239 | jint* count_ptr) { |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 240 | // It is not great that we have to hold these locks for so long, but it is necessary to ensure |
| 241 | // that the thread isn't dying on us. |
| 242 | art::ScopedObjectAccess soa(art::Thread::Current()); |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 243 | art::Locks::thread_list_lock_->ExclusiveLock(soa.Self()); |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 244 | |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 245 | art::Thread* thread; |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 246 | jvmtiError thread_error = ERR(INTERNAL); |
| 247 | if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 248 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 249 | return thread_error; |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 250 | } |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 251 | DCHECK(thread != nullptr); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 252 | |
| 253 | art::ThreadState state = thread->GetState(); |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 254 | if (state == art::ThreadState::kStarting || thread->IsStillStarting()) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 255 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 256 | return ERR(THREAD_NOT_ALIVE); |
| 257 | } |
| 258 | |
| 259 | if (max_frame_count < 0) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 260 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 261 | return ERR(ILLEGAL_ARGUMENT); |
| 262 | } |
| 263 | if (frame_buffer == nullptr || count_ptr == nullptr) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 264 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 265 | return ERR(NULL_POINTER); |
| 266 | } |
| 267 | |
| 268 | if (max_frame_count == 0) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 269 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 270 | *count_ptr = 0; |
| 271 | return ERR(NONE); |
| 272 | } |
| 273 | |
Andreas Gampe | 850a0fe | 2017-06-12 18:37:19 -0700 | [diff] [blame] | 274 | if (start_depth >= 0) { |
| 275 | // Fast path: Regular order of stack trace. Fill into the frame_buffer directly. |
| 276 | GetStackTraceDirectClosure closure(frame_buffer, |
| 277 | static_cast<size_t>(start_depth), |
| 278 | static_cast<size_t>(max_frame_count)); |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 279 | // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 280 | if (!thread->RequestSynchronousCheckpoint(&closure)) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 281 | return ERR(THREAD_NOT_ALIVE); |
| 282 | } |
Andreas Gampe | 850a0fe | 2017-06-12 18:37:19 -0700 | [diff] [blame] | 283 | *count_ptr = static_cast<jint>(closure.index); |
| 284 | if (closure.index < static_cast<size_t>(start_depth)) { |
| 285 | return ERR(ILLEGAL_ARGUMENT); |
| 286 | } |
| 287 | return ERR(NONE); |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 288 | } else { |
| 289 | GetStackTraceVectorClosure closure(0, 0); |
| 290 | // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 291 | if (!thread->RequestSynchronousCheckpoint(&closure)) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 292 | return ERR(THREAD_NOT_ALIVE); |
| 293 | } |
| 294 | |
| 295 | return TranslateFrameVector(closure.frames, |
| 296 | start_depth, |
| 297 | closure.start_result, |
| 298 | max_frame_count, |
| 299 | frame_buffer, |
| 300 | count_ptr); |
Andreas Gampe | 850a0fe | 2017-06-12 18:37:19 -0700 | [diff] [blame] | 301 | } |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 302 | } |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 303 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 304 | template <typename Data> |
| 305 | struct GetAllStackTracesVectorClosure : public art::Closure { |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 306 | GetAllStackTracesVectorClosure(size_t stop, Data* data_) |
| 307 | : barrier(0), stop_input(stop), data(data_) {} |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 308 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 309 | void Run(art::Thread* thread) override |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 310 | REQUIRES_SHARED(art::Locks::mutator_lock_) |
| 311 | REQUIRES(!data->mutex) { |
| 312 | art::Thread* self = art::Thread::Current(); |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 313 | Work(thread, self); |
| 314 | barrier.Pass(self); |
| 315 | } |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 316 | |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 317 | void Work(art::Thread* thread, art::Thread* self) |
| 318 | REQUIRES_SHARED(art::Locks::mutator_lock_) |
| 319 | REQUIRES(!data->mutex) { |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 320 | // Skip threads that are still starting. |
| 321 | if (thread->IsStillStarting()) { |
| 322 | return; |
| 323 | } |
| 324 | |
| 325 | std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread); |
| 326 | if (thread_frames == nullptr) { |
| 327 | return; |
| 328 | } |
| 329 | |
| 330 | // Now collect the data. |
| 331 | auto frames_fn = [&](jvmtiFrameInfo info) { |
| 332 | thread_frames->push_back(info); |
| 333 | }; |
| 334 | auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn); |
Andreas Gampe | 6e89776 | 2018-10-16 13:09:32 -0700 | [diff] [blame] | 335 | visitor.WalkStack(/* include_transitions= */ false); |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 336 | } |
| 337 | |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 338 | art::Barrier barrier; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 339 | const size_t stop_input; |
| 340 | Data* data; |
| 341 | }; |
| 342 | |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 343 | template <typename Data> |
Andreas Gampe | 6baa1c9 | 2018-05-25 16:17:49 -0700 | [diff] [blame] | 344 | static void RunCheckpointAndWait(Data* data, size_t max_frame_count) |
| 345 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 346 | // Note: requires the mutator lock as the checkpoint requires the mutator lock. |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 347 | GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data); |
| 348 | size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr); |
| 349 | if (barrier_count == 0) { |
| 350 | return; |
| 351 | } |
| 352 | art::Thread* self = art::Thread::Current(); |
| 353 | art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun); |
| 354 | closure.barrier.Increment(self, barrier_count); |
| 355 | } |
| 356 | |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 357 | jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env, |
| 358 | jint max_frame_count, |
| 359 | jvmtiStackInfo** stack_info_ptr, |
| 360 | jint* thread_count_ptr) { |
| 361 | if (max_frame_count < 0) { |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 362 | return ERR(ILLEGAL_ARGUMENT); |
| 363 | } |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 364 | if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) { |
| 365 | return ERR(NULL_POINTER); |
| 366 | } |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 367 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 368 | struct AllStackTracesData { |
| 369 | AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {} |
| 370 | ~AllStackTracesData() { |
| 371 | JNIEnv* jni_env = art::Thread::Current()->GetJniEnv(); |
| 372 | for (jthread global_thread_ref : thread_peers) { |
| 373 | jni_env->DeleteGlobalRef(global_thread_ref); |
| 374 | } |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 375 | } |
| 376 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 377 | std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread) |
| 378 | REQUIRES_SHARED(art::Locks::mutator_lock_) |
| 379 | REQUIRES(!mutex) { |
| 380 | art::MutexLock mu(self, mutex); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 381 | |
| 382 | threads.push_back(thread); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 383 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 384 | jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef( |
| 385 | self, thread->GetPeerFromOtherThread()); |
| 386 | thread_peers.push_back(peer); |
| 387 | |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 388 | frames.emplace_back(new std::vector<jvmtiFrameInfo>()); |
| 389 | return frames.back().get(); |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 390 | } |
| 391 | |
| 392 | art::Mutex mutex; |
| 393 | |
| 394 | // Storage. Only access directly after completion. |
| 395 | |
| 396 | std::vector<art::Thread*> threads; |
| 397 | // "thread_peers" contains global references to their peers. |
| 398 | std::vector<jthread> thread_peers; |
| 399 | |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 400 | std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 401 | }; |
| 402 | |
| 403 | AllStackTracesData data; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 404 | art::Thread* current = art::Thread::Current(); |
Andreas Gampe | 6baa1c9 | 2018-05-25 16:17:49 -0700 | [diff] [blame] | 405 | { |
| 406 | art::ScopedObjectAccess soa(current); |
| 407 | RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count)); |
| 408 | } |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 409 | |
| 410 | // Convert the data into our output format. |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 411 | |
| 412 | // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to |
| 413 | // allocate one big chunk for this and the actual frames, which means we need |
| 414 | // to either be conservative or rearrange things later (the latter is implemented). |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 415 | std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 416 | std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 417 | frame_infos.reserve(data.frames.size()); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 418 | |
| 419 | // Now run through and add data for each thread. |
| 420 | size_t sum_frames = 0; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 421 | for (size_t index = 0; index < data.frames.size(); ++index) { |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 422 | jvmtiStackInfo& stack_info = stack_info_array.get()[index]; |
| 423 | memset(&stack_info, 0, sizeof(jvmtiStackInfo)); |
| 424 | |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 425 | const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get(); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 426 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 427 | // For the time being, set the thread to null. We'll fix it up in the second stage. |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 428 | stack_info.thread = nullptr; |
| 429 | stack_info.state = JVMTI_THREAD_STATE_SUSPENDED; |
| 430 | |
| 431 | size_t collected_frames = thread_frames.size(); |
| 432 | if (max_frame_count == 0 || collected_frames == 0) { |
| 433 | stack_info.frame_count = 0; |
| 434 | stack_info.frame_buffer = nullptr; |
| 435 | continue; |
| 436 | } |
| 437 | DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); |
| 438 | |
| 439 | jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames]; |
| 440 | frame_infos.emplace_back(frame_info); |
| 441 | |
| 442 | jint count; |
| 443 | jvmtiError translate_result = TranslateFrameVector(thread_frames, |
| 444 | 0, |
| 445 | 0, |
| 446 | static_cast<jint>(collected_frames), |
| 447 | frame_info, |
| 448 | &count); |
| 449 | DCHECK(translate_result == JVMTI_ERROR_NONE); |
| 450 | stack_info.frame_count = static_cast<jint>(collected_frames); |
| 451 | stack_info.frame_buffer = frame_info; |
| 452 | sum_frames += static_cast<size_t>(count); |
| 453 | } |
| 454 | |
| 455 | // No errors, yet. Now put it all into an output buffer. |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 456 | size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(), |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 457 | alignof(jvmtiFrameInfo)); |
| 458 | size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo); |
| 459 | unsigned char* chunk_data; |
| 460 | jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data); |
| 461 | if (alloc_result != ERR(NONE)) { |
| 462 | return alloc_result; |
| 463 | } |
| 464 | |
| 465 | jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data); |
| 466 | // First copy in all the basic data. |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 467 | memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size()); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 468 | |
| 469 | // Now copy the frames and fix up the pointers. |
| 470 | jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>( |
| 471 | chunk_data + rounded_stack_info_size); |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 472 | for (size_t i = 0; i < data.frames.size(); ++i) { |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 473 | jvmtiStackInfo& old_stack_info = stack_info_array.get()[i]; |
| 474 | jvmtiStackInfo& new_stack_info = stack_info[i]; |
| 475 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 476 | // Translate the global ref into a local ref. |
| 477 | new_stack_info.thread = |
| 478 | static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 479 | |
| 480 | if (old_stack_info.frame_count > 0) { |
| 481 | // Only copy when there's data - leave the nullptr alone. |
| 482 | size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo); |
| 483 | memcpy(frame_info, old_stack_info.frame_buffer, frames_size); |
| 484 | new_stack_info.frame_buffer = frame_info; |
| 485 | frame_info += old_stack_info.frame_count; |
| 486 | } |
| 487 | } |
| 488 | |
| 489 | *stack_info_ptr = stack_info; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 490 | *thread_count_ptr = static_cast<jint>(data.frames.size()); |
Andreas Gampe | a1a27c6 | 2017-01-11 16:37:16 -0800 | [diff] [blame] | 491 | |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 492 | return ERR(NONE); |
| 493 | } |
| 494 | |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 495 | jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env, |
| 496 | jint thread_count, |
| 497 | const jthread* thread_list, |
| 498 | jint max_frame_count, |
| 499 | jvmtiStackInfo** stack_info_ptr) { |
| 500 | if (max_frame_count < 0) { |
| 501 | return ERR(ILLEGAL_ARGUMENT); |
| 502 | } |
| 503 | if (thread_count < 0) { |
| 504 | return ERR(ILLEGAL_ARGUMENT); |
| 505 | } |
| 506 | if (thread_count == 0) { |
| 507 | *stack_info_ptr = nullptr; |
| 508 | return ERR(NONE); |
| 509 | } |
Alex Light | 19a7d4f | 2018-03-23 10:05:49 -0700 | [diff] [blame] | 510 | if (thread_list == nullptr || stack_info_ptr == nullptr) { |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 511 | return ERR(NULL_POINTER); |
| 512 | } |
| 513 | |
| 514 | art::Thread* current = art::Thread::Current(); |
| 515 | art::ScopedObjectAccess soa(current); // Now we know we have the shared lock. |
| 516 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 517 | struct SelectStackTracesData { |
| 518 | SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {} |
| 519 | |
| 520 | std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread) |
| 521 | REQUIRES_SHARED(art::Locks::mutator_lock_) |
| 522 | REQUIRES(!mutex) { |
| 523 | art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread(); |
| 524 | for (size_t index = 0; index != handles.size(); ++index) { |
| 525 | if (peer == handles[index].Get()) { |
| 526 | // Found the thread. |
| 527 | art::MutexLock mu(self, mutex); |
| 528 | |
| 529 | threads.push_back(thread); |
| 530 | thread_list_indices.push_back(index); |
| 531 | |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 532 | frames.emplace_back(new std::vector<jvmtiFrameInfo>()); |
| 533 | return frames.back().get(); |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 534 | } |
| 535 | } |
| 536 | return nullptr; |
| 537 | } |
| 538 | |
| 539 | art::Mutex mutex; |
| 540 | |
| 541 | // Selection data. |
| 542 | |
| 543 | std::vector<art::Handle<art::mirror::Object>> handles; |
| 544 | |
| 545 | // Storage. Only access directly after completion. |
| 546 | |
| 547 | std::vector<art::Thread*> threads; |
| 548 | std::vector<size_t> thread_list_indices; |
| 549 | |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 550 | std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 551 | }; |
| 552 | |
| 553 | SelectStackTracesData data; |
| 554 | |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 555 | // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs. |
| 556 | art::VariableSizedHandleScope hs(current); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 557 | for (jint i = 0; i != thread_count; ++i) { |
| 558 | if (thread_list[i] == nullptr) { |
| 559 | return ERR(INVALID_THREAD); |
| 560 | } |
| 561 | if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) { |
| 562 | return ERR(INVALID_THREAD); |
| 563 | } |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 564 | data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i]))); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 565 | } |
| 566 | |
Andreas Gampe | 6237cd3 | 2017-06-22 22:17:38 -0700 | [diff] [blame] | 567 | RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count)); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 568 | |
| 569 | // Convert the data into our output format. |
| 570 | |
| 571 | // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to |
| 572 | // allocate one big chunk for this and the actual frames, which means we need |
| 573 | // to either be conservative or rearrange things later (the latter is implemented). |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 574 | std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 575 | std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 576 | frame_infos.reserve(data.frames.size()); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 577 | |
| 578 | // Now run through and add data for each thread. |
| 579 | size_t sum_frames = 0; |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 580 | for (size_t index = 0; index < data.frames.size(); ++index) { |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 581 | jvmtiStackInfo& stack_info = stack_info_array.get()[index]; |
| 582 | memset(&stack_info, 0, sizeof(jvmtiStackInfo)); |
| 583 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 584 | art::Thread* self = data.threads[index]; |
Andreas Gampe | ad9173d | 2017-06-22 16:33:08 -0700 | [diff] [blame] | 585 | const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get(); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 586 | |
| 587 | // For the time being, set the thread to null. We don't have good ScopedLocalRef |
| 588 | // infrastructure. |
Nicolas Geoffray | ffc8cad | 2017-02-10 10:59:22 +0000 | [diff] [blame] | 589 | DCHECK(self->GetPeerFromOtherThread() != nullptr); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 590 | stack_info.thread = nullptr; |
| 591 | stack_info.state = JVMTI_THREAD_STATE_SUSPENDED; |
| 592 | |
| 593 | size_t collected_frames = thread_frames.size(); |
| 594 | if (max_frame_count == 0 || collected_frames == 0) { |
| 595 | stack_info.frame_count = 0; |
| 596 | stack_info.frame_buffer = nullptr; |
| 597 | continue; |
| 598 | } |
| 599 | DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); |
| 600 | |
| 601 | jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames]; |
| 602 | frame_infos.emplace_back(frame_info); |
| 603 | |
| 604 | jint count; |
| 605 | jvmtiError translate_result = TranslateFrameVector(thread_frames, |
| 606 | 0, |
| 607 | 0, |
| 608 | static_cast<jint>(collected_frames), |
| 609 | frame_info, |
| 610 | &count); |
| 611 | DCHECK(translate_result == JVMTI_ERROR_NONE); |
| 612 | stack_info.frame_count = static_cast<jint>(collected_frames); |
| 613 | stack_info.frame_buffer = frame_info; |
| 614 | sum_frames += static_cast<size_t>(count); |
| 615 | } |
| 616 | |
| 617 | // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(), |
| 618 | // potentially. |
| 619 | size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count, |
| 620 | alignof(jvmtiFrameInfo)); |
| 621 | size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo); |
| 622 | unsigned char* chunk_data; |
| 623 | jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data); |
| 624 | if (alloc_result != ERR(NONE)) { |
| 625 | return alloc_result; |
| 626 | } |
| 627 | |
| 628 | jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data); |
| 629 | jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>( |
| 630 | chunk_data + rounded_stack_info_size); |
| 631 | |
| 632 | for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) { |
| 633 | // Check whether we found a running thread for this. |
| 634 | // Note: For simplicity, and with the expectation that the list is usually small, use a simple |
| 635 | // search. (The list is *not* sorted!) |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 636 | auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i); |
| 637 | if (it == data.thread_list_indices.end()) { |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 638 | // No native thread. Must be new or dead. We need to fill out the stack info now. |
| 639 | // (Need to read the Java "started" field to know whether this is starting or terminated.) |
| 640 | art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]); |
| 641 | art::ObjPtr<art::mirror::Class> klass = peer->GetClass(); |
| 642 | art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z"); |
| 643 | CHECK(started_field != nullptr); |
| 644 | bool started = started_field->GetBoolean(peer) != 0; |
| 645 | constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW; |
| 646 | constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED | |
| 647 | JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED; |
| 648 | stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]); |
| 649 | stack_info[i].state = started ? kTerminatedState : kStartedState; |
| 650 | stack_info[i].frame_count = 0; |
| 651 | stack_info[i].frame_buffer = nullptr; |
| 652 | } else { |
| 653 | // Had a native thread and frames. |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 654 | size_t f_index = it - data.thread_list_indices.begin(); |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 655 | |
| 656 | jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index]; |
| 657 | jvmtiStackInfo& new_stack_info = stack_info[i]; |
| 658 | |
| 659 | memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo)); |
| 660 | new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]); |
| 661 | if (old_stack_info.frame_count > 0) { |
| 662 | // Only copy when there's data - leave the nullptr alone. |
| 663 | size_t frames_size = |
| 664 | static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo); |
| 665 | memcpy(frame_info, old_stack_info.frame_buffer, frames_size); |
| 666 | new_stack_info.frame_buffer = frame_info; |
| 667 | frame_info += old_stack_info.frame_count; |
| 668 | } |
| 669 | } |
| 670 | } |
| 671 | |
Andreas Gampe | f1221a1 | 2017-06-21 21:20:47 -0700 | [diff] [blame] | 672 | *stack_info_ptr = stack_info; |
Andreas Gampe | eba32fb | 2017-01-12 17:40:05 -0800 | [diff] [blame] | 673 | |
| 674 | return ERR(NONE); |
| 675 | } |
| 676 | |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 677 | struct GetFrameCountClosure : public art::Closure { |
| 678 | public: |
| 679 | GetFrameCountClosure() : count(0) {} |
| 680 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 681 | void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Andreas Gampe | c7d878d | 2018-11-19 18:42:06 +0000 | [diff] [blame] | 682 | // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be |
| 683 | // counted. |
| 684 | art::StackVisitor::WalkStack( |
| 685 | [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 686 | art::ArtMethod* m = stack_visitor->GetMethod(); |
| 687 | if (m != nullptr && !m->IsRuntimeMethod()) { |
| 688 | count++; |
| 689 | } |
| 690 | return true; |
| 691 | }, |
| 692 | self, |
| 693 | /* context= */ nullptr, |
| 694 | art::StackVisitor::StackWalkKind::kIncludeInlinedFrames); |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 695 | } |
| 696 | |
| 697 | size_t count; |
| 698 | }; |
| 699 | |
| 700 | jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED, |
| 701 | jthread java_thread, |
| 702 | jint* count_ptr) { |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 703 | // It is not great that we have to hold these locks for so long, but it is necessary to ensure |
| 704 | // that the thread isn't dying on us. |
| 705 | art::ScopedObjectAccess soa(art::Thread::Current()); |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 706 | art::Locks::thread_list_lock_->ExclusiveLock(soa.Self()); |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 707 | |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 708 | art::Thread* thread; |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 709 | jvmtiError thread_error = ERR(INTERNAL); |
| 710 | if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 711 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 712 | return thread_error; |
| 713 | } |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 714 | |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 715 | DCHECK(thread != nullptr); |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 716 | art::ThreadState state = thread->GetState(); |
| 717 | if (state == art::ThreadState::kStarting || thread->IsStillStarting()) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 718 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 719 | return ERR(THREAD_NOT_ALIVE); |
| 720 | } |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 721 | |
| 722 | if (count_ptr == nullptr) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 723 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 724 | return ERR(NULL_POINTER); |
| 725 | } |
| 726 | |
| 727 | GetFrameCountClosure closure; |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 728 | // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 729 | if (!thread->RequestSynchronousCheckpoint(&closure)) { |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 730 | return ERR(THREAD_NOT_ALIVE); |
| 731 | } |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 732 | |
| 733 | *count_ptr = closure.count; |
| 734 | return ERR(NONE); |
| 735 | } |
| 736 | |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 737 | struct GetLocationClosure : public art::Closure { |
| 738 | public: |
| 739 | explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {} |
| 740 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 741 | void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Andreas Gampe | c7d878d | 2018-11-19 18:42:06 +0000 | [diff] [blame] | 742 | // Walks up the stack 'n' callers. |
| 743 | size_t count = 0u; |
| 744 | art::StackVisitor::WalkStack( |
| 745 | [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 746 | art::ArtMethod* m = stack_visitor->GetMethod(); |
| 747 | if (m != nullptr && !m->IsRuntimeMethod()) { |
| 748 | DCHECK(method == nullptr); |
| 749 | if (count == n) { |
| 750 | method = m; |
| 751 | dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false); |
| 752 | return false; |
| 753 | } |
| 754 | count++; |
| 755 | } |
| 756 | return true; |
| 757 | }, |
| 758 | self, |
| 759 | /* context= */ nullptr, |
| 760 | art::StackVisitor::StackWalkKind::kIncludeInlinedFrames); |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 761 | } |
| 762 | |
| 763 | const size_t n; |
| 764 | art::ArtMethod* method; |
| 765 | uint32_t dex_pc; |
| 766 | }; |
| 767 | |
| 768 | jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED, |
| 769 | jthread java_thread, |
| 770 | jint depth, |
| 771 | jmethodID* method_ptr, |
| 772 | jlocation* location_ptr) { |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 773 | // It is not great that we have to hold these locks for so long, but it is necessary to ensure |
| 774 | // that the thread isn't dying on us. |
| 775 | art::ScopedObjectAccess soa(art::Thread::Current()); |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 776 | art::Locks::thread_list_lock_->ExclusiveLock(soa.Self()); |
Andreas Gampe | 28c4a23 | 2017-06-21 21:21:31 -0700 | [diff] [blame] | 777 | |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 778 | art::Thread* thread; |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 779 | jvmtiError thread_error = ERR(INTERNAL); |
| 780 | if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 781 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 782 | return thread_error; |
| 783 | } |
| 784 | DCHECK(thread != nullptr); |
| 785 | |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 786 | art::ThreadState state = thread->GetState(); |
| 787 | if (state == art::ThreadState::kStarting || thread->IsStillStarting()) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 788 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 789 | return ERR(THREAD_NOT_ALIVE); |
| 790 | } |
| 791 | |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 792 | if (depth < 0) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 793 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 794 | return ERR(ILLEGAL_ARGUMENT); |
| 795 | } |
| 796 | if (method_ptr == nullptr || location_ptr == nullptr) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 797 | art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self()); |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 798 | return ERR(NULL_POINTER); |
| 799 | } |
| 800 | |
| 801 | GetLocationClosure closure(static_cast<size_t>(depth)); |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 802 | // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 803 | if (!thread->RequestSynchronousCheckpoint(&closure)) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 804 | return ERR(THREAD_NOT_ALIVE); |
| 805 | } |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 806 | |
| 807 | if (closure.method == nullptr) { |
| 808 | return ERR(NO_MORE_FRAMES); |
| 809 | } |
| 810 | |
| 811 | *method_ptr = art::jni::EncodeArtMethod(closure.method); |
Alex Light | 3dea212 | 2017-10-11 15:56:48 +0000 | [diff] [blame] | 812 | if (closure.method->IsNative() || closure.method->IsProxyMethod()) { |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 813 | *location_ptr = -1; |
| 814 | } else { |
Andreas Gampe | e2abbc6 | 2017-09-15 11:59:26 -0700 | [diff] [blame] | 815 | if (closure.dex_pc == art::dex::kDexNoIndex) { |
Andreas Gampe | f6f3b5f | 2017-01-13 09:21:42 -0800 | [diff] [blame] | 816 | return ERR(INTERNAL); |
| 817 | } |
| 818 | *location_ptr = static_cast<jlocation>(closure.dex_pc); |
| 819 | } |
| 820 | |
| 821 | return ERR(NONE); |
| 822 | } |
| 823 | |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 824 | struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor { |
| 825 | // We need a context because VisitLocks needs it retrieve the monitor objects. |
| 826 | explicit MonitorVisitor(art::Thread* thread) |
| 827 | REQUIRES_SHARED(art::Locks::mutator_lock_) |
| 828 | : art::StackVisitor(thread, |
| 829 | art::Context::Create(), |
| 830 | art::StackVisitor::StackWalkKind::kIncludeInlinedFrames), |
| 831 | hs(art::Thread::Current()), |
| 832 | current_stack_depth(0) {} |
| 833 | |
| 834 | ~MonitorVisitor() { |
| 835 | delete context_; |
| 836 | } |
| 837 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 838 | bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 839 | art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current()); |
| 840 | if (!GetMethod()->IsRuntimeMethod()) { |
| 841 | art::Monitor::VisitLocks(this, AppendOwnedMonitors, this); |
| 842 | ++current_stack_depth; |
| 843 | } |
| 844 | return true; |
| 845 | } |
| 846 | |
| 847 | static void AppendOwnedMonitors(art::mirror::Object* owned_monitor, void* arg) |
| 848 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
| 849 | art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current()); |
| 850 | MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg); |
| 851 | art::ObjPtr<art::mirror::Object> mon(owned_monitor); |
| 852 | // Filter out duplicates. |
| 853 | for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) { |
| 854 | if (monitor.Get() == mon.Ptr()) { |
| 855 | return; |
| 856 | } |
| 857 | } |
| 858 | visitor->monitors.push_back(visitor->hs.NewHandle(mon)); |
| 859 | visitor->stack_depths.push_back(visitor->current_stack_depth); |
| 860 | } |
| 861 | |
| 862 | void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED) |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 863 | override REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 864 | for (const art::Handle<art::mirror::Object>& m : monitors) { |
| 865 | if (m.Get() == obj) { |
| 866 | return; |
| 867 | } |
| 868 | } |
| 869 | monitors.push_back(hs.NewHandle(obj)); |
| 870 | stack_depths.push_back(-1); |
| 871 | } |
| 872 | |
| 873 | art::VariableSizedHandleScope hs; |
| 874 | jint current_stack_depth; |
| 875 | std::vector<art::Handle<art::mirror::Object>> monitors; |
| 876 | std::vector<jint> stack_depths; |
| 877 | }; |
| 878 | |
| 879 | template<typename Fn> |
| 880 | struct MonitorInfoClosure : public art::Closure { |
| 881 | public: |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 882 | explicit MonitorInfoClosure(Fn handle_results) |
| 883 | : err_(OK), handle_results_(handle_results) {} |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 884 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 885 | void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 886 | art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current()); |
| 887 | // Find the monitors on the stack. |
| 888 | MonitorVisitor visitor(target); |
Andreas Gampe | 6e89776 | 2018-10-16 13:09:32 -0700 | [diff] [blame] | 889 | visitor.WalkStack(/* include_transitions= */ false); |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 890 | // Find any other monitors, including ones acquired in native code. |
| 891 | art::RootInfo root_info(art::kRootVMInternal); |
Ian Rogers | 55256cb | 2017-12-21 17:07:11 -0800 | [diff] [blame] | 892 | target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info); |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 893 | err_ = handle_results_(visitor); |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 894 | } |
| 895 | |
| 896 | jvmtiError GetError() { |
| 897 | return err_; |
| 898 | } |
| 899 | |
| 900 | private: |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 901 | jvmtiError err_; |
| 902 | Fn handle_results_; |
| 903 | }; |
| 904 | |
| 905 | |
| 906 | template <typename Fn> |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 907 | static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa, |
| 908 | jthread thread, |
| 909 | Fn handle_results) |
| 910 | REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 911 | art::Thread* self = art::Thread::Current(); |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 912 | MonitorInfoClosure<Fn> closure(handle_results); |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 913 | bool called_method = false; |
| 914 | { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 915 | art::Locks::thread_list_lock_->ExclusiveLock(self); |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 916 | art::Thread* target = nullptr; |
| 917 | jvmtiError err = ERR(INTERNAL); |
| 918 | if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) { |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 919 | art::Locks::thread_list_lock_->ExclusiveUnlock(self); |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 920 | return err; |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 921 | } |
| 922 | if (target != self) { |
| 923 | called_method = true; |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 924 | // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. |
Alex Light | a5cd4c0 | 2018-03-28 16:07:39 -0700 | [diff] [blame] | 925 | // Since this deals with object references we need to avoid going to sleep. |
| 926 | art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage"); |
| 927 | if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) { |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 928 | return ERR(THREAD_NOT_ALIVE); |
| 929 | } |
Alex Light | b1e31a8 | 2017-10-04 16:57:36 -0700 | [diff] [blame] | 930 | } else { |
| 931 | art::Locks::thread_list_lock_->ExclusiveUnlock(self); |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 932 | } |
| 933 | } |
| 934 | // Cannot call the closure on the current thread if we have thread_list_lock since we need to call |
| 935 | // into the verifier which can cause the current thread to suspend for gc. Suspending would be a |
| 936 | // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a |
| 937 | // checkpoint we are fine but if the thread is the current one we need to drop the mutex first. |
| 938 | if (!called_method) { |
| 939 | closure.Run(self); |
| 940 | } |
| 941 | return closure.GetError(); |
| 942 | } |
| 943 | |
| 944 | jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env, |
| 945 | jthread thread, |
| 946 | jint* info_cnt, |
| 947 | jvmtiMonitorStackDepthInfo** info_ptr) { |
| 948 | if (info_cnt == nullptr || info_ptr == nullptr) { |
| 949 | return ERR(NULL_POINTER); |
| 950 | } |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 951 | art::ScopedObjectAccess soa(art::Thread::Current()); |
| 952 | std::vector<art::GcRoot<art::mirror::Object>> mons; |
| 953 | std::vector<uint32_t> depths; |
| 954 | auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 955 | for (size_t i = 0; i < visitor.monitors.size(); i++) { |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 956 | mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get())); |
| 957 | depths.push_back(visitor.stack_depths[i]); |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 958 | } |
| 959 | return OK; |
| 960 | }; |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 961 | jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun); |
| 962 | if (err != OK) { |
| 963 | return err; |
| 964 | } |
| 965 | auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size(); |
| 966 | err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr)); |
| 967 | if (err != OK) { |
| 968 | return err; |
| 969 | } |
| 970 | *info_cnt = mons.size(); |
| 971 | for (uint32_t i = 0; i < mons.size(); i++) { |
| 972 | (*info_ptr)[i] = { |
| 973 | soa.AddLocalReference<jobject>(mons[i].Read()), |
| 974 | static_cast<jint>(depths[i]) |
| 975 | }; |
| 976 | } |
| 977 | return err; |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 978 | } |
| 979 | |
| 980 | jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env, |
| 981 | jthread thread, |
| 982 | jint* owned_monitor_count_ptr, |
| 983 | jobject** owned_monitors_ptr) { |
Alex Light | 19a7d4f | 2018-03-23 10:05:49 -0700 | [diff] [blame] | 984 | if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) { |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 985 | return ERR(NULL_POINTER); |
| 986 | } |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 987 | art::ScopedObjectAccess soa(art::Thread::Current()); |
| 988 | std::vector<art::GcRoot<art::mirror::Object>> mons; |
| 989 | auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) { |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 990 | for (size_t i = 0; i < visitor.monitors.size(); i++) { |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 991 | mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get())); |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 992 | } |
| 993 | return OK; |
| 994 | }; |
Alex Light | 318afe6 | 2018-03-22 16:50:10 -0700 | [diff] [blame] | 995 | jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun); |
| 996 | if (err != OK) { |
| 997 | return err; |
| 998 | } |
| 999 | auto nbytes = sizeof(jobject) * mons.size(); |
| 1000 | err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr)); |
| 1001 | if (err != OK) { |
| 1002 | return err; |
| 1003 | } |
| 1004 | *owned_monitor_count_ptr = mons.size(); |
| 1005 | for (uint32_t i = 0; i < mons.size(); i++) { |
| 1006 | (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read()); |
| 1007 | } |
| 1008 | return err; |
Alex Light | 88e1ddd | 2017-08-21 13:09:55 -0700 | [diff] [blame] | 1009 | } |
| 1010 | |
Alex Light | e814f9d | 2017-07-31 16:14:39 -0700 | [diff] [blame] | 1011 | jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) { |
| 1012 | if (depth < 0) { |
| 1013 | return ERR(ILLEGAL_ARGUMENT); |
| 1014 | } |
| 1015 | ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env); |
| 1016 | art::Thread* self = art::Thread::Current(); |
| 1017 | art::Thread* target; |
| 1018 | do { |
| 1019 | ThreadUtil::SuspendCheck(self); |
| 1020 | art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_); |
| 1021 | // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a |
| 1022 | // user-code suspension. We retry and do another SuspendCheck to clear this. |
| 1023 | if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) { |
| 1024 | continue; |
| 1025 | } |
| 1026 | // From now on we know we cannot get suspended by user-code. |
| 1027 | // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't |
| 1028 | // have the 'suspend_lock' locked here. |
| 1029 | art::ScopedObjectAccess soa(self); |
| 1030 | art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_); |
Alex Light | 7ddc23d | 2017-09-22 15:33:41 -0700 | [diff] [blame] | 1031 | jvmtiError err = ERR(INTERNAL); |
| 1032 | if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) { |
| 1033 | return err; |
| 1034 | } |
| 1035 | if (target != self) { |
Alex Light | e814f9d | 2017-07-31 16:14:39 -0700 | [diff] [blame] | 1036 | // TODO This is part of the spec but we could easily avoid needing to do it. We would just put |
| 1037 | // all the logic into a sync-checkpoint. |
| 1038 | art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_); |
| 1039 | if (target->GetUserCodeSuspendCount() == 0) { |
| 1040 | return ERR(THREAD_NOT_SUSPENDED); |
| 1041 | } |
| 1042 | } |
| 1043 | // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are |
| 1044 | // done (unless it's 'self' in which case we don't care since we aren't going to be returning). |
| 1045 | // TODO We could implement this using a synchronous checkpoint and not bother with any of the |
| 1046 | // suspension stuff. The spec does specifically say to return THREAD_NOT_SUSPENDED though. |
| 1047 | // Find the requested stack frame. |
| 1048 | std::unique_ptr<art::Context> context(art::Context::Create()); |
| 1049 | FindFrameAtDepthVisitor visitor(target, context.get(), depth); |
| 1050 | visitor.WalkStack(); |
| 1051 | if (!visitor.FoundFrame()) { |
| 1052 | return ERR(NO_MORE_FRAMES); |
| 1053 | } |
| 1054 | art::ArtMethod* method = visitor.GetMethod(); |
| 1055 | if (method->IsNative()) { |
| 1056 | return ERR(OPAQUE_FRAME); |
| 1057 | } |
| 1058 | // From here we are sure to succeed. |
| 1059 | bool needs_instrument = false; |
| 1060 | // Get/create a shadow frame |
Alex Light | 0aa7a5a | 2018-10-10 15:58:14 +0000 | [diff] [blame] | 1061 | art::ShadowFrame* shadow_frame = visitor.GetOrCreateShadowFrame(&needs_instrument); |
Alex Light | b6106d5 | 2017-10-18 15:02:15 -0700 | [diff] [blame] | 1062 | { |
| 1063 | art::WriterMutexLock lk(self, tienv->event_info_mutex_); |
| 1064 | // Mark shadow frame as needs_notify_pop_ |
| 1065 | shadow_frame->SetNotifyPop(true); |
| 1066 | tienv->notify_frames.insert(shadow_frame); |
| 1067 | } |
Alex Light | e814f9d | 2017-07-31 16:14:39 -0700 | [diff] [blame] | 1068 | // Make sure can we will go to the interpreter and use the shadow frames. |
| 1069 | if (needs_instrument) { |
| 1070 | art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target); |
| 1071 | } |
| 1072 | return OK; |
| 1073 | } while (true); |
| 1074 | } |
| 1075 | |
Alex Light | ae45cbb | 2018-10-18 15:49:56 -0700 | [diff] [blame] | 1076 | jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) { |
Alex Light | 0aa7a5a | 2018-10-10 15:58:14 +0000 | [diff] [blame] | 1077 | art::Thread* self = art::Thread::Current(); |
| 1078 | art::Thread* target; |
| 1079 | do { |
| 1080 | ThreadUtil::SuspendCheck(self); |
| 1081 | art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_); |
| 1082 | // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a |
| 1083 | // user-code suspension. We retry and do another SuspendCheck to clear this. |
| 1084 | if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) { |
| 1085 | continue; |
| 1086 | } |
| 1087 | // From now on we know we cannot get suspended by user-code. |
| 1088 | // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't |
| 1089 | // have the 'suspend_lock' locked here. |
| 1090 | art::ScopedObjectAccess soa(self); |
| 1091 | art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_); |
| 1092 | jvmtiError err = ERR(INTERNAL); |
| 1093 | if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) { |
| 1094 | return err; |
| 1095 | } |
| 1096 | { |
| 1097 | art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_); |
| 1098 | if (target == self || target->GetUserCodeSuspendCount() == 0) { |
| 1099 | // We cannot be the current thread for this function. |
| 1100 | return ERR(THREAD_NOT_SUSPENDED); |
| 1101 | } |
| 1102 | } |
| 1103 | JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target); |
| 1104 | constexpr art::StackVisitor::StackWalkKind kWalkKind = |
| 1105 | art::StackVisitor::StackWalkKind::kIncludeInlinedFrames; |
| 1106 | if (tls_data != nullptr && |
| 1107 | tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame && |
| 1108 | tls_data->disable_pop_frame_depth == art::StackVisitor::ComputeNumFrames(target, |
| 1109 | kWalkKind)) { |
Alex Light | ae45cbb | 2018-10-18 15:49:56 -0700 | [diff] [blame] | 1110 | JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. " |
| 1111 | << "Frame at depth " << tls_data->disable_pop_frame_depth << " was " |
| 1112 | << "marked as un-poppable by the jvmti plugin. See b/117615146 for " |
| 1113 | << "more information."; |
Alex Light | 0aa7a5a | 2018-10-10 15:58:14 +0000 | [diff] [blame] | 1114 | return ERR(OPAQUE_FRAME); |
| 1115 | } |
| 1116 | // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are |
| 1117 | // done. |
| 1118 | std::unique_ptr<art::Context> context(art::Context::Create()); |
| 1119 | FindFrameAtDepthVisitor final_frame(target, context.get(), 0); |
| 1120 | FindFrameAtDepthVisitor penultimate_frame(target, context.get(), 1); |
| 1121 | final_frame.WalkStack(); |
| 1122 | penultimate_frame.WalkStack(); |
| 1123 | |
| 1124 | if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) { |
| 1125 | // Cannot do it if there is only one frame! |
| 1126 | return ERR(NO_MORE_FRAMES); |
| 1127 | } |
| 1128 | |
| 1129 | art::ArtMethod* called_method = final_frame.GetMethod(); |
| 1130 | art::ArtMethod* calling_method = penultimate_frame.GetMethod(); |
| 1131 | if (calling_method->IsNative() || called_method->IsNative()) { |
| 1132 | return ERR(OPAQUE_FRAME); |
| 1133 | } |
| 1134 | // From here we are sure to succeed. |
| 1135 | |
| 1136 | // Get/create a shadow frame |
| 1137 | bool created_final_frame = false; |
| 1138 | bool created_penultimate_frame = false; |
| 1139 | art::ShadowFrame* called_shadow_frame = |
| 1140 | final_frame.GetOrCreateShadowFrame(&created_final_frame); |
| 1141 | art::ShadowFrame* calling_shadow_frame = |
| 1142 | penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame); |
| 1143 | |
| 1144 | CHECK_NE(called_shadow_frame, calling_shadow_frame) |
| 1145 | << "Frames at different depths not different!"; |
| 1146 | |
| 1147 | // Tell the shadow-frame to return immediately and skip all exit events. |
| 1148 | called_shadow_frame->SetForcePopFrame(true); |
| 1149 | calling_shadow_frame->SetForceRetryInstruction(true); |
| 1150 | |
| 1151 | // Make sure can we will go to the interpreter and use the shadow frames. The early return for |
| 1152 | // the final frame will force everything to the interpreter so we only need to instrument if it |
| 1153 | // was not present. |
| 1154 | if (created_final_frame) { |
| 1155 | DeoptManager::Get()->DeoptimizeThread(target); |
| 1156 | } |
| 1157 | return OK; |
| 1158 | } while (true); |
| 1159 | } |
| 1160 | |
Andreas Gampe | b5eb94a | 2016-10-27 19:23:09 -0700 | [diff] [blame] | 1161 | } // namespace openjdkjvmti |