blob: e2b98b302145dd734383222f2b51013166d46770 [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampea1d2f952017-04-20 22:53:58 -070039#include "art_field-inl.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070040#include "art_method-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070041#include "art_jvmti.h"
Steven Morelande431e272017-07-18 16:53:49 -070042#include "art_method-inl.h"
Andreas Gampe6237cd32017-06-22 22:17:38 -070043#include "barrier.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080044#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070045#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080046#include "base/mutex.h"
David Sehr9e734c72018-01-04 17:56:19 -080047#include "dex/code_item_accessors-inl.h"
48#include "dex/dex_file.h"
49#include "dex/dex_file_annotations.h"
50#include "dex/dex_file_types.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070051#include "gc_root.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080052#include "handle_scope-inl.h"
Vladimir Markoa3ad0cd2018-05-04 10:06:38 +010053#include "jni/jni_env_ext.h"
54#include "jni/jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070055#include "mirror/class.h"
56#include "mirror/dex_cache.h"
Andreas Gampe373a9b52017-10-18 09:01:57 -070057#include "nativehelper/scoped_local_ref.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070058#include "scoped_thread_state_change-inl.h"
59#include "stack.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070060#include "ti_thread.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070061#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080062#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070063#include "thread_pool.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070064#include "ti_thread.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070065#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070066
67namespace openjdkjvmti {
68
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070069template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070070struct GetStackTraceVisitor : public art::StackVisitor {
71 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070072 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070073 size_t stop_,
74 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070075 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070076 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070077 start(start_),
78 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070079 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
80 GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070081
Andreas Gampefa6a1b02018-09-07 08:11:55 -070082 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070083 art::ArtMethod* m = GetMethod();
84 if (m->IsRuntimeMethod()) {
85 return true;
86 }
87
88 if (start == 0) {
89 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080090 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070091
Andreas Gampe2340e3f2016-12-12 19:37:19 -080092 uint32_t dex_pc = GetDexPc(false);
Andreas Gampee2abbc62017-09-15 11:59:26 -070093 jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070094
Andreas Gampe2340e3f2016-12-12 19:37:19 -080095 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070096 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070097
98 if (stop == 1) {
99 return false; // We're done.
100 } else if (stop > 0) {
101 stop--;
102 }
103 } else {
104 start--;
105 }
106
107 return true;
108 }
109
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700110 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700111 size_t start;
112 size_t stop;
113};
114
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700115template <typename FrameFn>
116GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
117 size_t start,
118 size_t stop,
119 FrameFn fn) {
120 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
121}
122
123struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700124 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700125 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700126 : start_input(start),
127 stop_input(stop),
128 start_result(0),
129 stop_result(0) {}
130
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100131 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700132 auto frames_fn = [&](jvmtiFrameInfo info) {
133 frames.push_back(info);
134 };
135 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
136 visitor.WalkStack(/* include_transitions */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700137
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700138 start_result = visitor.start;
139 stop_result = visitor.stop;
140 }
141
142 const size_t start_input;
143 const size_t stop_input;
144
145 std::vector<jvmtiFrameInfo> frames;
146 size_t start_result;
147 size_t stop_result;
148};
149
Andreas Gampea1a27c62017-01-11 16:37:16 -0800150static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
151 jint start_depth,
152 size_t start_result,
153 jint max_frame_count,
154 jvmtiFrameInfo* frame_buffer,
155 jint* count_ptr) {
156 size_t collected_frames = frames.size();
157
158 // Assume we're here having collected something.
159 DCHECK_GT(max_frame_count, 0);
160
161 // Frames from the top.
162 if (start_depth >= 0) {
163 if (start_result != 0) {
164 // Not enough frames.
165 return ERR(ILLEGAL_ARGUMENT);
166 }
167 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
168 if (frames.size() > 0) {
169 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
170 }
171 *count_ptr = static_cast<jint>(frames.size());
172 return ERR(NONE);
173 }
174
175 // Frames from the bottom.
176 if (collected_frames < static_cast<size_t>(-start_depth)) {
177 return ERR(ILLEGAL_ARGUMENT);
178 }
179
180 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
181 memcpy(frame_buffer,
182 &frames.data()[collected_frames + start_depth],
183 count * sizeof(jvmtiFrameInfo));
184 *count_ptr = static_cast<jint>(count);
185 return ERR(NONE);
186}
187
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700188struct GetStackTraceDirectClosure : public art::Closure {
189 public:
190 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
191 : frame_buffer(frame_buffer_),
192 start_input(start),
193 stop_input(stop),
194 index(0) {
195 DCHECK_GE(start_input, 0u);
196 }
197
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100198 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700199 auto frames_fn = [&](jvmtiFrameInfo info) {
200 frame_buffer[index] = info;
201 ++index;
202 };
203 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
204 visitor.WalkStack(/* include_transitions */ false);
205 }
206
207 jvmtiFrameInfo* frame_buffer;
208
209 const size_t start_input;
210 const size_t stop_input;
211
212 size_t index = 0;
213};
214
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700215jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
216 jthread java_thread,
217 jint start_depth,
218 jint max_frame_count,
219 jvmtiFrameInfo* frame_buffer,
220 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700221 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
222 // that the thread isn't dying on us.
223 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700224 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700225
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700226 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700227 jvmtiError thread_error = ERR(INTERNAL);
228 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700229 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800230 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700231 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800232 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700233
234 art::ThreadState state = thread->GetState();
Alex Light7ddc23d2017-09-22 15:33:41 -0700235 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700236 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700237 return ERR(THREAD_NOT_ALIVE);
238 }
239
240 if (max_frame_count < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700241 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700242 return ERR(ILLEGAL_ARGUMENT);
243 }
244 if (frame_buffer == nullptr || count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700245 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700246 return ERR(NULL_POINTER);
247 }
248
249 if (max_frame_count == 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700250 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700251 *count_ptr = 0;
252 return ERR(NONE);
253 }
254
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700255 if (start_depth >= 0) {
256 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
257 GetStackTraceDirectClosure closure(frame_buffer,
258 static_cast<size_t>(start_depth),
259 static_cast<size_t>(max_frame_count));
Alex Lightb1e31a82017-10-04 16:57:36 -0700260 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700261 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700262 return ERR(THREAD_NOT_ALIVE);
263 }
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700264 *count_ptr = static_cast<jint>(closure.index);
265 if (closure.index < static_cast<size_t>(start_depth)) {
266 return ERR(ILLEGAL_ARGUMENT);
267 }
268 return ERR(NONE);
Alex Lightb1e31a82017-10-04 16:57:36 -0700269 } else {
270 GetStackTraceVectorClosure closure(0, 0);
271 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700272 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700273 return ERR(THREAD_NOT_ALIVE);
274 }
275
276 return TranslateFrameVector(closure.frames,
277 start_depth,
278 closure.start_result,
279 max_frame_count,
280 frame_buffer,
281 count_ptr);
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700282 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800283}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700284
Andreas Gampef1221a12017-06-21 21:20:47 -0700285template <typename Data>
286struct GetAllStackTracesVectorClosure : public art::Closure {
Andreas Gampe6237cd32017-06-22 22:17:38 -0700287 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
288 : barrier(0), stop_input(stop), data(data_) {}
Andreas Gampef1221a12017-06-21 21:20:47 -0700289
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100290 void Run(art::Thread* thread) override
Andreas Gampef1221a12017-06-21 21:20:47 -0700291 REQUIRES_SHARED(art::Locks::mutator_lock_)
292 REQUIRES(!data->mutex) {
293 art::Thread* self = art::Thread::Current();
Andreas Gampe6237cd32017-06-22 22:17:38 -0700294 Work(thread, self);
295 barrier.Pass(self);
296 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700297
Andreas Gampe6237cd32017-06-22 22:17:38 -0700298 void Work(art::Thread* thread, art::Thread* self)
299 REQUIRES_SHARED(art::Locks::mutator_lock_)
300 REQUIRES(!data->mutex) {
Andreas Gampef1221a12017-06-21 21:20:47 -0700301 // Skip threads that are still starting.
302 if (thread->IsStillStarting()) {
303 return;
304 }
305
306 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
307 if (thread_frames == nullptr) {
308 return;
309 }
310
311 // Now collect the data.
312 auto frames_fn = [&](jvmtiFrameInfo info) {
313 thread_frames->push_back(info);
314 };
315 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
316 visitor.WalkStack(/* include_transitions */ false);
317 }
318
Andreas Gampe6237cd32017-06-22 22:17:38 -0700319 art::Barrier barrier;
Andreas Gampef1221a12017-06-21 21:20:47 -0700320 const size_t stop_input;
321 Data* data;
322};
323
Andreas Gampe6237cd32017-06-22 22:17:38 -0700324template <typename Data>
Andreas Gampe6baa1c92018-05-25 16:17:49 -0700325static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
326 REQUIRES_SHARED(art::Locks::mutator_lock_) {
327 // Note: requires the mutator lock as the checkpoint requires the mutator lock.
Andreas Gampe6237cd32017-06-22 22:17:38 -0700328 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
329 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
330 if (barrier_count == 0) {
331 return;
332 }
333 art::Thread* self = art::Thread::Current();
334 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
335 closure.barrier.Increment(self, barrier_count);
336}
337
Andreas Gampea1a27c62017-01-11 16:37:16 -0800338jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
339 jint max_frame_count,
340 jvmtiStackInfo** stack_info_ptr,
341 jint* thread_count_ptr) {
342 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700343 return ERR(ILLEGAL_ARGUMENT);
344 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800345 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
346 return ERR(NULL_POINTER);
347 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700348
Andreas Gampef1221a12017-06-21 21:20:47 -0700349 struct AllStackTracesData {
350 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
351 ~AllStackTracesData() {
352 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
353 for (jthread global_thread_ref : thread_peers) {
354 jni_env->DeleteGlobalRef(global_thread_ref);
355 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800356 }
357
Andreas Gampef1221a12017-06-21 21:20:47 -0700358 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
359 REQUIRES_SHARED(art::Locks::mutator_lock_)
360 REQUIRES(!mutex) {
361 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800362
363 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800364
Andreas Gampef1221a12017-06-21 21:20:47 -0700365 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
366 self, thread->GetPeerFromOtherThread());
367 thread_peers.push_back(peer);
368
Andreas Gampead9173d2017-06-22 16:33:08 -0700369 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
370 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700371 }
372
373 art::Mutex mutex;
374
375 // Storage. Only access directly after completion.
376
377 std::vector<art::Thread*> threads;
378 // "thread_peers" contains global references to their peers.
379 std::vector<jthread> thread_peers;
380
Andreas Gampead9173d2017-06-22 16:33:08 -0700381 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700382 };
383
384 AllStackTracesData data;
Andreas Gampef1221a12017-06-21 21:20:47 -0700385 art::Thread* current = art::Thread::Current();
Andreas Gampe6baa1c92018-05-25 16:17:49 -0700386 {
387 art::ScopedObjectAccess soa(current);
388 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
389 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700390
391 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800392
393 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
394 // allocate one big chunk for this and the actual frames, which means we need
395 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700396 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800397 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700398 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800399
400 // Now run through and add data for each thread.
401 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700402 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800403 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
404 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
405
Andreas Gampead9173d2017-06-22 16:33:08 -0700406 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800407
Andreas Gampef1221a12017-06-21 21:20:47 -0700408 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800409 stack_info.thread = nullptr;
410 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
411
412 size_t collected_frames = thread_frames.size();
413 if (max_frame_count == 0 || collected_frames == 0) {
414 stack_info.frame_count = 0;
415 stack_info.frame_buffer = nullptr;
416 continue;
417 }
418 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
419
420 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
421 frame_infos.emplace_back(frame_info);
422
423 jint count;
424 jvmtiError translate_result = TranslateFrameVector(thread_frames,
425 0,
426 0,
427 static_cast<jint>(collected_frames),
428 frame_info,
429 &count);
430 DCHECK(translate_result == JVMTI_ERROR_NONE);
431 stack_info.frame_count = static_cast<jint>(collected_frames);
432 stack_info.frame_buffer = frame_info;
433 sum_frames += static_cast<size_t>(count);
434 }
435
436 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700437 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800438 alignof(jvmtiFrameInfo));
439 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
440 unsigned char* chunk_data;
441 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
442 if (alloc_result != ERR(NONE)) {
443 return alloc_result;
444 }
445
446 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
447 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700448 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800449
450 // Now copy the frames and fix up the pointers.
451 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
452 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700453 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800454 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
455 jvmtiStackInfo& new_stack_info = stack_info[i];
456
Andreas Gampef1221a12017-06-21 21:20:47 -0700457 // Translate the global ref into a local ref.
458 new_stack_info.thread =
459 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800460
461 if (old_stack_info.frame_count > 0) {
462 // Only copy when there's data - leave the nullptr alone.
463 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
464 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
465 new_stack_info.frame_buffer = frame_info;
466 frame_info += old_stack_info.frame_count;
467 }
468 }
469
470 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700471 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800472
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700473 return ERR(NONE);
474}
475
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800476jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
477 jint thread_count,
478 const jthread* thread_list,
479 jint max_frame_count,
480 jvmtiStackInfo** stack_info_ptr) {
481 if (max_frame_count < 0) {
482 return ERR(ILLEGAL_ARGUMENT);
483 }
484 if (thread_count < 0) {
485 return ERR(ILLEGAL_ARGUMENT);
486 }
487 if (thread_count == 0) {
488 *stack_info_ptr = nullptr;
489 return ERR(NONE);
490 }
Alex Light19a7d4f2018-03-23 10:05:49 -0700491 if (thread_list == nullptr || stack_info_ptr == nullptr) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800492 return ERR(NULL_POINTER);
493 }
494
495 art::Thread* current = art::Thread::Current();
496 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
497
Andreas Gampef1221a12017-06-21 21:20:47 -0700498 struct SelectStackTracesData {
499 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
500
501 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
502 REQUIRES_SHARED(art::Locks::mutator_lock_)
503 REQUIRES(!mutex) {
504 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
505 for (size_t index = 0; index != handles.size(); ++index) {
506 if (peer == handles[index].Get()) {
507 // Found the thread.
508 art::MutexLock mu(self, mutex);
509
510 threads.push_back(thread);
511 thread_list_indices.push_back(index);
512
Andreas Gampead9173d2017-06-22 16:33:08 -0700513 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
514 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700515 }
516 }
517 return nullptr;
518 }
519
520 art::Mutex mutex;
521
522 // Selection data.
523
524 std::vector<art::Handle<art::mirror::Object>> handles;
525
526 // Storage. Only access directly after completion.
527
528 std::vector<art::Thread*> threads;
529 std::vector<size_t> thread_list_indices;
530
Andreas Gampead9173d2017-06-22 16:33:08 -0700531 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700532 };
533
534 SelectStackTracesData data;
535
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800536 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
537 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800538 for (jint i = 0; i != thread_count; ++i) {
539 if (thread_list[i] == nullptr) {
540 return ERR(INVALID_THREAD);
541 }
542 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
543 return ERR(INVALID_THREAD);
544 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700545 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800546 }
547
Andreas Gampe6237cd32017-06-22 22:17:38 -0700548 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800549
550 // Convert the data into our output format.
551
552 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
553 // allocate one big chunk for this and the actual frames, which means we need
554 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700555 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800556 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700557 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800558
559 // Now run through and add data for each thread.
560 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700561 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800562 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
563 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
564
Andreas Gampef1221a12017-06-21 21:20:47 -0700565 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700566 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800567
568 // For the time being, set the thread to null. We don't have good ScopedLocalRef
569 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000570 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800571 stack_info.thread = nullptr;
572 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
573
574 size_t collected_frames = thread_frames.size();
575 if (max_frame_count == 0 || collected_frames == 0) {
576 stack_info.frame_count = 0;
577 stack_info.frame_buffer = nullptr;
578 continue;
579 }
580 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
581
582 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
583 frame_infos.emplace_back(frame_info);
584
585 jint count;
586 jvmtiError translate_result = TranslateFrameVector(thread_frames,
587 0,
588 0,
589 static_cast<jint>(collected_frames),
590 frame_info,
591 &count);
592 DCHECK(translate_result == JVMTI_ERROR_NONE);
593 stack_info.frame_count = static_cast<jint>(collected_frames);
594 stack_info.frame_buffer = frame_info;
595 sum_frames += static_cast<size_t>(count);
596 }
597
598 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
599 // potentially.
600 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
601 alignof(jvmtiFrameInfo));
602 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
603 unsigned char* chunk_data;
604 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
605 if (alloc_result != ERR(NONE)) {
606 return alloc_result;
607 }
608
609 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
610 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
611 chunk_data + rounded_stack_info_size);
612
613 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
614 // Check whether we found a running thread for this.
615 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
616 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700617 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
618 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800619 // No native thread. Must be new or dead. We need to fill out the stack info now.
620 // (Need to read the Java "started" field to know whether this is starting or terminated.)
621 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
622 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
623 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
624 CHECK(started_field != nullptr);
625 bool started = started_field->GetBoolean(peer) != 0;
626 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
627 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
628 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
629 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
630 stack_info[i].state = started ? kTerminatedState : kStartedState;
631 stack_info[i].frame_count = 0;
632 stack_info[i].frame_buffer = nullptr;
633 } else {
634 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700635 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800636
637 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
638 jvmtiStackInfo& new_stack_info = stack_info[i];
639
640 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
641 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
642 if (old_stack_info.frame_count > 0) {
643 // Only copy when there's data - leave the nullptr alone.
644 size_t frames_size =
645 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
646 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
647 new_stack_info.frame_buffer = frame_info;
648 frame_info += old_stack_info.frame_count;
649 }
650 }
651 }
652
Andreas Gampef1221a12017-06-21 21:20:47 -0700653 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800654
655 return ERR(NONE);
656}
657
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800658// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
659// runtime methods and transitions must not be counted.
660struct GetFrameCountVisitor : public art::StackVisitor {
661 explicit GetFrameCountVisitor(art::Thread* thread)
662 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
663 count(0) {}
664
Andreas Gampefa6a1b02018-09-07 08:11:55 -0700665 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800666 art::ArtMethod* m = GetMethod();
667 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
668 if (do_count) {
669 count++;
670 }
671 return true;
672 }
673
674 size_t count;
675};
676
677struct GetFrameCountClosure : public art::Closure {
678 public:
679 GetFrameCountClosure() : count(0) {}
680
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100681 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800682 GetFrameCountVisitor visitor(self);
683 visitor.WalkStack(false);
684
685 count = visitor.count;
686 }
687
688 size_t count;
689};
690
691jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
692 jthread java_thread,
693 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700694 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
695 // that the thread isn't dying on us.
696 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700697 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700698
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800699 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700700 jvmtiError thread_error = ERR(INTERNAL);
701 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700702 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800703 return thread_error;
704 }
Alex Light7ddc23d2017-09-22 15:33:41 -0700705
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800706 DCHECK(thread != nullptr);
Alex Light7ddc23d2017-09-22 15:33:41 -0700707 art::ThreadState state = thread->GetState();
708 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700709 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700710 return ERR(THREAD_NOT_ALIVE);
711 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800712
713 if (count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700714 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800715 return ERR(NULL_POINTER);
716 }
717
718 GetFrameCountClosure closure;
Alex Lightb1e31a82017-10-04 16:57:36 -0700719 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700720 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Light7ddc23d2017-09-22 15:33:41 -0700721 return ERR(THREAD_NOT_ALIVE);
722 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800723
724 *count_ptr = closure.count;
725 return ERR(NONE);
726}
727
728// Walks up the stack 'n' callers, when used with Thread::WalkStack.
729struct GetLocationVisitor : public art::StackVisitor {
730 GetLocationVisitor(art::Thread* thread, size_t n_in)
731 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
732 n(n_in),
733 count(0),
734 caller(nullptr),
735 caller_dex_pc(0) {}
736
Andreas Gampefa6a1b02018-09-07 08:11:55 -0700737 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800738 art::ArtMethod* m = GetMethod();
739 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
740 if (do_count) {
741 DCHECK(caller == nullptr);
742 if (count == n) {
743 caller = m;
744 caller_dex_pc = GetDexPc(false);
745 return false;
746 }
747 count++;
748 }
749 return true;
750 }
751
752 const size_t n;
753 size_t count;
754 art::ArtMethod* caller;
755 uint32_t caller_dex_pc;
756};
757
758struct GetLocationClosure : public art::Closure {
759 public:
760 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
761
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100762 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800763 GetLocationVisitor visitor(self, n);
764 visitor.WalkStack(false);
765
766 method = visitor.caller;
767 dex_pc = visitor.caller_dex_pc;
768 }
769
770 const size_t n;
771 art::ArtMethod* method;
772 uint32_t dex_pc;
773};
774
775jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
776 jthread java_thread,
777 jint depth,
778 jmethodID* method_ptr,
779 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700780 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
781 // that the thread isn't dying on us.
782 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700783 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700784
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800785 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700786 jvmtiError thread_error = ERR(INTERNAL);
787 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700788 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800789 return thread_error;
790 }
791 DCHECK(thread != nullptr);
792
Alex Light7ddc23d2017-09-22 15:33:41 -0700793 art::ThreadState state = thread->GetState();
794 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700795 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700796 return ERR(THREAD_NOT_ALIVE);
797 }
798
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800799 if (depth < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700800 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800801 return ERR(ILLEGAL_ARGUMENT);
802 }
803 if (method_ptr == nullptr || location_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700804 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800805 return ERR(NULL_POINTER);
806 }
807
808 GetLocationClosure closure(static_cast<size_t>(depth));
Alex Lightb1e31a82017-10-04 16:57:36 -0700809 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700810 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700811 return ERR(THREAD_NOT_ALIVE);
812 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800813
814 if (closure.method == nullptr) {
815 return ERR(NO_MORE_FRAMES);
816 }
817
818 *method_ptr = art::jni::EncodeArtMethod(closure.method);
Alex Light3dea2122017-10-11 15:56:48 +0000819 if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800820 *location_ptr = -1;
821 } else {
Andreas Gampee2abbc62017-09-15 11:59:26 -0700822 if (closure.dex_pc == art::dex::kDexNoIndex) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800823 return ERR(INTERNAL);
824 }
825 *location_ptr = static_cast<jlocation>(closure.dex_pc);
826 }
827
828 return ERR(NONE);
829}
830
Alex Light88e1ddd2017-08-21 13:09:55 -0700831struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
832 // We need a context because VisitLocks needs it retrieve the monitor objects.
833 explicit MonitorVisitor(art::Thread* thread)
834 REQUIRES_SHARED(art::Locks::mutator_lock_)
835 : art::StackVisitor(thread,
836 art::Context::Create(),
837 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
838 hs(art::Thread::Current()),
839 current_stack_depth(0) {}
840
841 ~MonitorVisitor() {
842 delete context_;
843 }
844
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100845 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700846 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
847 if (!GetMethod()->IsRuntimeMethod()) {
848 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
849 ++current_stack_depth;
850 }
851 return true;
852 }
853
854 static void AppendOwnedMonitors(art::mirror::Object* owned_monitor, void* arg)
855 REQUIRES_SHARED(art::Locks::mutator_lock_) {
856 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
857 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
858 art::ObjPtr<art::mirror::Object> mon(owned_monitor);
859 // Filter out duplicates.
860 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
861 if (monitor.Get() == mon.Ptr()) {
862 return;
863 }
864 }
865 visitor->monitors.push_back(visitor->hs.NewHandle(mon));
866 visitor->stack_depths.push_back(visitor->current_stack_depth);
867 }
868
869 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100870 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700871 for (const art::Handle<art::mirror::Object>& m : monitors) {
872 if (m.Get() == obj) {
873 return;
874 }
875 }
876 monitors.push_back(hs.NewHandle(obj));
877 stack_depths.push_back(-1);
878 }
879
880 art::VariableSizedHandleScope hs;
881 jint current_stack_depth;
882 std::vector<art::Handle<art::mirror::Object>> monitors;
883 std::vector<jint> stack_depths;
884};
885
886template<typename Fn>
887struct MonitorInfoClosure : public art::Closure {
888 public:
Alex Light318afe62018-03-22 16:50:10 -0700889 explicit MonitorInfoClosure(Fn handle_results)
890 : err_(OK), handle_results_(handle_results) {}
Alex Light88e1ddd2017-08-21 13:09:55 -0700891
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100892 void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700893 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
894 // Find the monitors on the stack.
895 MonitorVisitor visitor(target);
896 visitor.WalkStack(/* include_transitions */ false);
897 // Find any other monitors, including ones acquired in native code.
898 art::RootInfo root_info(art::kRootVMInternal);
Ian Rogers55256cb2017-12-21 17:07:11 -0800899 target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
Alex Light318afe62018-03-22 16:50:10 -0700900 err_ = handle_results_(visitor);
Alex Light88e1ddd2017-08-21 13:09:55 -0700901 }
902
903 jvmtiError GetError() {
904 return err_;
905 }
906
907 private:
Alex Light88e1ddd2017-08-21 13:09:55 -0700908 jvmtiError err_;
909 Fn handle_results_;
910};
911
912
913template <typename Fn>
Alex Light318afe62018-03-22 16:50:10 -0700914static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
915 jthread thread,
916 Fn handle_results)
917 REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700918 art::Thread* self = art::Thread::Current();
Alex Light318afe62018-03-22 16:50:10 -0700919 MonitorInfoClosure<Fn> closure(handle_results);
Alex Light88e1ddd2017-08-21 13:09:55 -0700920 bool called_method = false;
921 {
Alex Lightb1e31a82017-10-04 16:57:36 -0700922 art::Locks::thread_list_lock_->ExclusiveLock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700923 art::Thread* target = nullptr;
924 jvmtiError err = ERR(INTERNAL);
925 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700926 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700927 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700928 }
929 if (target != self) {
930 called_method = true;
Alex Lightb1e31a82017-10-04 16:57:36 -0700931 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Lighta5cd4c02018-03-28 16:07:39 -0700932 // Since this deals with object references we need to avoid going to sleep.
933 art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage");
934 if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700935 return ERR(THREAD_NOT_ALIVE);
936 }
Alex Lightb1e31a82017-10-04 16:57:36 -0700937 } else {
938 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light88e1ddd2017-08-21 13:09:55 -0700939 }
940 }
941 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
942 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
943 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
944 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
945 if (!called_method) {
946 closure.Run(self);
947 }
948 return closure.GetError();
949}
950
951jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
952 jthread thread,
953 jint* info_cnt,
954 jvmtiMonitorStackDepthInfo** info_ptr) {
955 if (info_cnt == nullptr || info_ptr == nullptr) {
956 return ERR(NULL_POINTER);
957 }
Alex Light318afe62018-03-22 16:50:10 -0700958 art::ScopedObjectAccess soa(art::Thread::Current());
959 std::vector<art::GcRoot<art::mirror::Object>> mons;
960 std::vector<uint32_t> depths;
961 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700962 for (size_t i = 0; i < visitor.monitors.size(); i++) {
Alex Light318afe62018-03-22 16:50:10 -0700963 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
964 depths.push_back(visitor.stack_depths[i]);
Alex Light88e1ddd2017-08-21 13:09:55 -0700965 }
966 return OK;
967 };
Alex Light318afe62018-03-22 16:50:10 -0700968 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
969 if (err != OK) {
970 return err;
971 }
972 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
973 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
974 if (err != OK) {
975 return err;
976 }
977 *info_cnt = mons.size();
978 for (uint32_t i = 0; i < mons.size(); i++) {
979 (*info_ptr)[i] = {
980 soa.AddLocalReference<jobject>(mons[i].Read()),
981 static_cast<jint>(depths[i])
982 };
983 }
984 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700985}
986
987jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
988 jthread thread,
989 jint* owned_monitor_count_ptr,
990 jobject** owned_monitors_ptr) {
Alex Light19a7d4f2018-03-23 10:05:49 -0700991 if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700992 return ERR(NULL_POINTER);
993 }
Alex Light318afe62018-03-22 16:50:10 -0700994 art::ScopedObjectAccess soa(art::Thread::Current());
995 std::vector<art::GcRoot<art::mirror::Object>> mons;
996 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700997 for (size_t i = 0; i < visitor.monitors.size(); i++) {
Alex Light318afe62018-03-22 16:50:10 -0700998 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
Alex Light88e1ddd2017-08-21 13:09:55 -0700999 }
1000 return OK;
1001 };
Alex Light318afe62018-03-22 16:50:10 -07001002 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
1003 if (err != OK) {
1004 return err;
1005 }
1006 auto nbytes = sizeof(jobject) * mons.size();
1007 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
1008 if (err != OK) {
1009 return err;
1010 }
1011 *owned_monitor_count_ptr = mons.size();
1012 for (uint32_t i = 0; i < mons.size(); i++) {
1013 (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
1014 }
1015 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -07001016}
1017
Alex Lighte814f9d2017-07-31 16:14:39 -07001018jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
1019 if (depth < 0) {
1020 return ERR(ILLEGAL_ARGUMENT);
1021 }
1022 ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
1023 art::Thread* self = art::Thread::Current();
1024 art::Thread* target;
1025 do {
1026 ThreadUtil::SuspendCheck(self);
1027 art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
1028 // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
1029 // user-code suspension. We retry and do another SuspendCheck to clear this.
1030 if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
1031 continue;
1032 }
1033 // From now on we know we cannot get suspended by user-code.
1034 // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1035 // have the 'suspend_lock' locked here.
1036 art::ScopedObjectAccess soa(self);
1037 art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
Alex Light7ddc23d2017-09-22 15:33:41 -07001038 jvmtiError err = ERR(INTERNAL);
1039 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1040 return err;
1041 }
1042 if (target != self) {
Alex Lighte814f9d2017-07-31 16:14:39 -07001043 // TODO This is part of the spec but we could easily avoid needing to do it. We would just put
1044 // all the logic into a sync-checkpoint.
1045 art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1046 if (target->GetUserCodeSuspendCount() == 0) {
1047 return ERR(THREAD_NOT_SUSPENDED);
1048 }
1049 }
1050 // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1051 // done (unless it's 'self' in which case we don't care since we aren't going to be returning).
1052 // TODO We could implement this using a synchronous checkpoint and not bother with any of the
1053 // suspension stuff. The spec does specifically say to return THREAD_NOT_SUSPENDED though.
1054 // Find the requested stack frame.
1055 std::unique_ptr<art::Context> context(art::Context::Create());
1056 FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1057 visitor.WalkStack();
1058 if (!visitor.FoundFrame()) {
1059 return ERR(NO_MORE_FRAMES);
1060 }
1061 art::ArtMethod* method = visitor.GetMethod();
1062 if (method->IsNative()) {
1063 return ERR(OPAQUE_FRAME);
1064 }
1065 // From here we are sure to succeed.
1066 bool needs_instrument = false;
1067 // Get/create a shadow frame
1068 art::ShadowFrame* shadow_frame = visitor.GetCurrentShadowFrame();
1069 if (shadow_frame == nullptr) {
1070 needs_instrument = true;
1071 const size_t frame_id = visitor.GetFrameId();
David Sehr0225f8e2018-01-31 08:52:24 +00001072 const uint16_t num_regs = method->DexInstructionData().RegistersSize();
Alex Lighte814f9d2017-07-31 16:14:39 -07001073 shadow_frame = target->FindOrCreateDebuggerShadowFrame(frame_id,
1074 num_regs,
1075 method,
1076 visitor.GetDexPc());
1077 }
Alex Lightb6106d52017-10-18 15:02:15 -07001078 {
1079 art::WriterMutexLock lk(self, tienv->event_info_mutex_);
1080 // Mark shadow frame as needs_notify_pop_
1081 shadow_frame->SetNotifyPop(true);
1082 tienv->notify_frames.insert(shadow_frame);
1083 }
Alex Lighte814f9d2017-07-31 16:14:39 -07001084 // Make sure can we will go to the interpreter and use the shadow frames.
1085 if (needs_instrument) {
1086 art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
1087 }
1088 return OK;
1089 } while (true);
1090}
1091
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001092} // namespace openjdkjvmti