blob: 8ee150ee3ea5ca3fa512e71684473dc29830d42b [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampea1d2f952017-04-20 22:53:58 -070039#include "art_field-inl.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070040#include "art_method-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070041#include "art_jvmti.h"
Steven Morelande431e272017-07-18 16:53:49 -070042#include "art_method-inl.h"
Andreas Gampe6237cd32017-06-22 22:17:38 -070043#include "barrier.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080044#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070045#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080046#include "base/mutex.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070047#include "dex_file.h"
48#include "dex_file_annotations.h"
Andreas Gampee2abbc62017-09-15 11:59:26 -070049#include "dex_file_types.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070050#include "gc_root.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080051#include "handle_scope-inl.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070052#include "jni_env_ext.h"
Andreas Gampe13b27842016-11-07 16:48:23 -080053#include "jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070054#include "mirror/class.h"
55#include "mirror/dex_cache.h"
Andreas Gampe373a9b52017-10-18 09:01:57 -070056#include "nativehelper/scoped_local_ref.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070057#include "scoped_thread_state_change-inl.h"
58#include "stack.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070059#include "ti_thread.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070060#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080061#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070062#include "thread_pool.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070063#include "ti_thread.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070064#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070065
66namespace openjdkjvmti {
67
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070068template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070069struct GetStackTraceVisitor : public art::StackVisitor {
70 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070071 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070072 size_t stop_,
73 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070074 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070075 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070076 start(start_),
77 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070078 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
79 GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070080
81 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
82 art::ArtMethod* m = GetMethod();
83 if (m->IsRuntimeMethod()) {
84 return true;
85 }
86
87 if (start == 0) {
88 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080089 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070090
Andreas Gampe2340e3f2016-12-12 19:37:19 -080091 uint32_t dex_pc = GetDexPc(false);
Andreas Gampee2abbc62017-09-15 11:59:26 -070092 jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070093
Andreas Gampe2340e3f2016-12-12 19:37:19 -080094 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070095 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070096
97 if (stop == 1) {
98 return false; // We're done.
99 } else if (stop > 0) {
100 stop--;
101 }
102 } else {
103 start--;
104 }
105
106 return true;
107 }
108
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700109 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700110 size_t start;
111 size_t stop;
112};
113
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700114template <typename FrameFn>
115GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
116 size_t start,
117 size_t stop,
118 FrameFn fn) {
119 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
120}
121
122struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700123 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700124 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700125 : start_input(start),
126 stop_input(stop),
127 start_result(0),
128 stop_result(0) {}
129
Andreas Gampea1a27c62017-01-11 16:37:16 -0800130 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700131 auto frames_fn = [&](jvmtiFrameInfo info) {
132 frames.push_back(info);
133 };
134 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
135 visitor.WalkStack(/* include_transitions */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700136
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700137 start_result = visitor.start;
138 stop_result = visitor.stop;
139 }
140
141 const size_t start_input;
142 const size_t stop_input;
143
144 std::vector<jvmtiFrameInfo> frames;
145 size_t start_result;
146 size_t stop_result;
147};
148
Andreas Gampea1a27c62017-01-11 16:37:16 -0800149static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
150 jint start_depth,
151 size_t start_result,
152 jint max_frame_count,
153 jvmtiFrameInfo* frame_buffer,
154 jint* count_ptr) {
155 size_t collected_frames = frames.size();
156
157 // Assume we're here having collected something.
158 DCHECK_GT(max_frame_count, 0);
159
160 // Frames from the top.
161 if (start_depth >= 0) {
162 if (start_result != 0) {
163 // Not enough frames.
164 return ERR(ILLEGAL_ARGUMENT);
165 }
166 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
167 if (frames.size() > 0) {
168 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
169 }
170 *count_ptr = static_cast<jint>(frames.size());
171 return ERR(NONE);
172 }
173
174 // Frames from the bottom.
175 if (collected_frames < static_cast<size_t>(-start_depth)) {
176 return ERR(ILLEGAL_ARGUMENT);
177 }
178
179 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
180 memcpy(frame_buffer,
181 &frames.data()[collected_frames + start_depth],
182 count * sizeof(jvmtiFrameInfo));
183 *count_ptr = static_cast<jint>(count);
184 return ERR(NONE);
185}
186
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700187struct GetStackTraceDirectClosure : public art::Closure {
188 public:
189 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
190 : frame_buffer(frame_buffer_),
191 start_input(start),
192 stop_input(stop),
193 index(0) {
194 DCHECK_GE(start_input, 0u);
195 }
196
197 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
198 auto frames_fn = [&](jvmtiFrameInfo info) {
199 frame_buffer[index] = info;
200 ++index;
201 };
202 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
203 visitor.WalkStack(/* include_transitions */ false);
204 }
205
206 jvmtiFrameInfo* frame_buffer;
207
208 const size_t start_input;
209 const size_t stop_input;
210
211 size_t index = 0;
212};
213
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700214jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
215 jthread java_thread,
216 jint start_depth,
217 jint max_frame_count,
218 jvmtiFrameInfo* frame_buffer,
219 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700220 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
221 // that the thread isn't dying on us.
222 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700223 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700224
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700225 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700226 jvmtiError thread_error = ERR(INTERNAL);
227 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700228 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800229 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700230 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800231 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700232
233 art::ThreadState state = thread->GetState();
Alex Light7ddc23d2017-09-22 15:33:41 -0700234 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700235 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700236 return ERR(THREAD_NOT_ALIVE);
237 }
238
239 if (max_frame_count < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700240 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700241 return ERR(ILLEGAL_ARGUMENT);
242 }
243 if (frame_buffer == nullptr || count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700244 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700245 return ERR(NULL_POINTER);
246 }
247
248 if (max_frame_count == 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700249 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700250 *count_ptr = 0;
251 return ERR(NONE);
252 }
253
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700254 if (start_depth >= 0) {
255 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
256 GetStackTraceDirectClosure closure(frame_buffer,
257 static_cast<size_t>(start_depth),
258 static_cast<size_t>(max_frame_count));
Alex Lightb1e31a82017-10-04 16:57:36 -0700259 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Lightd9aff132017-10-31 22:30:05 +0000260 if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700261 return ERR(THREAD_NOT_ALIVE);
262 }
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700263 *count_ptr = static_cast<jint>(closure.index);
264 if (closure.index < static_cast<size_t>(start_depth)) {
265 return ERR(ILLEGAL_ARGUMENT);
266 }
267 return ERR(NONE);
Alex Lightb1e31a82017-10-04 16:57:36 -0700268 } else {
269 GetStackTraceVectorClosure closure(0, 0);
270 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Lightd9aff132017-10-31 22:30:05 +0000271 if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700272 return ERR(THREAD_NOT_ALIVE);
273 }
274
275 return TranslateFrameVector(closure.frames,
276 start_depth,
277 closure.start_result,
278 max_frame_count,
279 frame_buffer,
280 count_ptr);
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700281 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800282}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700283
Andreas Gampef1221a12017-06-21 21:20:47 -0700284template <typename Data>
285struct GetAllStackTracesVectorClosure : public art::Closure {
Andreas Gampe6237cd32017-06-22 22:17:38 -0700286 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
287 : barrier(0), stop_input(stop), data(data_) {}
Andreas Gampef1221a12017-06-21 21:20:47 -0700288
289 void Run(art::Thread* thread) OVERRIDE
290 REQUIRES_SHARED(art::Locks::mutator_lock_)
291 REQUIRES(!data->mutex) {
292 art::Thread* self = art::Thread::Current();
Andreas Gampe6237cd32017-06-22 22:17:38 -0700293 Work(thread, self);
294 barrier.Pass(self);
295 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700296
Andreas Gampe6237cd32017-06-22 22:17:38 -0700297 void Work(art::Thread* thread, art::Thread* self)
298 REQUIRES_SHARED(art::Locks::mutator_lock_)
299 REQUIRES(!data->mutex) {
Andreas Gampef1221a12017-06-21 21:20:47 -0700300 // Skip threads that are still starting.
301 if (thread->IsStillStarting()) {
302 return;
303 }
304
305 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
306 if (thread_frames == nullptr) {
307 return;
308 }
309
310 // Now collect the data.
311 auto frames_fn = [&](jvmtiFrameInfo info) {
312 thread_frames->push_back(info);
313 };
314 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
315 visitor.WalkStack(/* include_transitions */ false);
316 }
317
Andreas Gampe6237cd32017-06-22 22:17:38 -0700318 art::Barrier barrier;
Andreas Gampef1221a12017-06-21 21:20:47 -0700319 const size_t stop_input;
320 Data* data;
321};
322
Andreas Gampe6237cd32017-06-22 22:17:38 -0700323template <typename Data>
324static void RunCheckpointAndWait(Data* data, size_t max_frame_count) {
325 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
326 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
327 if (barrier_count == 0) {
328 return;
329 }
330 art::Thread* self = art::Thread::Current();
331 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
332 closure.barrier.Increment(self, barrier_count);
333}
334
Andreas Gampea1a27c62017-01-11 16:37:16 -0800335jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
336 jint max_frame_count,
337 jvmtiStackInfo** stack_info_ptr,
338 jint* thread_count_ptr) {
339 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700340 return ERR(ILLEGAL_ARGUMENT);
341 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800342 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
343 return ERR(NULL_POINTER);
344 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700345
Andreas Gampef1221a12017-06-21 21:20:47 -0700346 struct AllStackTracesData {
347 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
348 ~AllStackTracesData() {
349 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
350 for (jthread global_thread_ref : thread_peers) {
351 jni_env->DeleteGlobalRef(global_thread_ref);
352 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800353 }
354
Andreas Gampef1221a12017-06-21 21:20:47 -0700355 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
356 REQUIRES_SHARED(art::Locks::mutator_lock_)
357 REQUIRES(!mutex) {
358 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800359
360 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800361
Andreas Gampef1221a12017-06-21 21:20:47 -0700362 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
363 self, thread->GetPeerFromOtherThread());
364 thread_peers.push_back(peer);
365
Andreas Gampead9173d2017-06-22 16:33:08 -0700366 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
367 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700368 }
369
370 art::Mutex mutex;
371
372 // Storage. Only access directly after completion.
373
374 std::vector<art::Thread*> threads;
375 // "thread_peers" contains global references to their peers.
376 std::vector<jthread> thread_peers;
377
Andreas Gampead9173d2017-06-22 16:33:08 -0700378 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700379 };
380
381 AllStackTracesData data;
Andreas Gampe6237cd32017-06-22 22:17:38 -0700382 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampef1221a12017-06-21 21:20:47 -0700383
384 art::Thread* current = art::Thread::Current();
385
386 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800387
388 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
389 // allocate one big chunk for this and the actual frames, which means we need
390 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700391 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800392 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700393 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800394
395 // Now run through and add data for each thread.
396 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700397 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800398 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
399 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
400
Andreas Gampead9173d2017-06-22 16:33:08 -0700401 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800402
Andreas Gampef1221a12017-06-21 21:20:47 -0700403 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800404 stack_info.thread = nullptr;
405 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
406
407 size_t collected_frames = thread_frames.size();
408 if (max_frame_count == 0 || collected_frames == 0) {
409 stack_info.frame_count = 0;
410 stack_info.frame_buffer = nullptr;
411 continue;
412 }
413 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
414
415 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
416 frame_infos.emplace_back(frame_info);
417
418 jint count;
419 jvmtiError translate_result = TranslateFrameVector(thread_frames,
420 0,
421 0,
422 static_cast<jint>(collected_frames),
423 frame_info,
424 &count);
425 DCHECK(translate_result == JVMTI_ERROR_NONE);
426 stack_info.frame_count = static_cast<jint>(collected_frames);
427 stack_info.frame_buffer = frame_info;
428 sum_frames += static_cast<size_t>(count);
429 }
430
431 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700432 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800433 alignof(jvmtiFrameInfo));
434 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
435 unsigned char* chunk_data;
436 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
437 if (alloc_result != ERR(NONE)) {
438 return alloc_result;
439 }
440
441 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
442 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700443 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800444
445 // Now copy the frames and fix up the pointers.
446 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
447 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700448 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800449 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
450 jvmtiStackInfo& new_stack_info = stack_info[i];
451
Andreas Gampef1221a12017-06-21 21:20:47 -0700452 // Translate the global ref into a local ref.
453 new_stack_info.thread =
454 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800455
456 if (old_stack_info.frame_count > 0) {
457 // Only copy when there's data - leave the nullptr alone.
458 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
459 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
460 new_stack_info.frame_buffer = frame_info;
461 frame_info += old_stack_info.frame_count;
462 }
463 }
464
465 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700466 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800467
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700468 return ERR(NONE);
469}
470
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800471jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
472 jint thread_count,
473 const jthread* thread_list,
474 jint max_frame_count,
475 jvmtiStackInfo** stack_info_ptr) {
476 if (max_frame_count < 0) {
477 return ERR(ILLEGAL_ARGUMENT);
478 }
479 if (thread_count < 0) {
480 return ERR(ILLEGAL_ARGUMENT);
481 }
482 if (thread_count == 0) {
483 *stack_info_ptr = nullptr;
484 return ERR(NONE);
485 }
486 if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
487 return ERR(NULL_POINTER);
488 }
489
490 art::Thread* current = art::Thread::Current();
491 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
492
Andreas Gampef1221a12017-06-21 21:20:47 -0700493 struct SelectStackTracesData {
494 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
495
496 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
497 REQUIRES_SHARED(art::Locks::mutator_lock_)
498 REQUIRES(!mutex) {
499 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
500 for (size_t index = 0; index != handles.size(); ++index) {
501 if (peer == handles[index].Get()) {
502 // Found the thread.
503 art::MutexLock mu(self, mutex);
504
505 threads.push_back(thread);
506 thread_list_indices.push_back(index);
507
Andreas Gampead9173d2017-06-22 16:33:08 -0700508 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
509 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700510 }
511 }
512 return nullptr;
513 }
514
515 art::Mutex mutex;
516
517 // Selection data.
518
519 std::vector<art::Handle<art::mirror::Object>> handles;
520
521 // Storage. Only access directly after completion.
522
523 std::vector<art::Thread*> threads;
524 std::vector<size_t> thread_list_indices;
525
Andreas Gampead9173d2017-06-22 16:33:08 -0700526 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700527 };
528
529 SelectStackTracesData data;
530
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800531 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
532 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800533 for (jint i = 0; i != thread_count; ++i) {
534 if (thread_list[i] == nullptr) {
535 return ERR(INVALID_THREAD);
536 }
537 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
538 return ERR(INVALID_THREAD);
539 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700540 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800541 }
542
Andreas Gampe6237cd32017-06-22 22:17:38 -0700543 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800544
545 // Convert the data into our output format.
546
547 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
548 // allocate one big chunk for this and the actual frames, which means we need
549 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700550 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800551 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700552 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800553
554 // Now run through and add data for each thread.
555 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700556 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800557 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
558 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
559
Andreas Gampef1221a12017-06-21 21:20:47 -0700560 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700561 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800562
563 // For the time being, set the thread to null. We don't have good ScopedLocalRef
564 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000565 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800566 stack_info.thread = nullptr;
567 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
568
569 size_t collected_frames = thread_frames.size();
570 if (max_frame_count == 0 || collected_frames == 0) {
571 stack_info.frame_count = 0;
572 stack_info.frame_buffer = nullptr;
573 continue;
574 }
575 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
576
577 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
578 frame_infos.emplace_back(frame_info);
579
580 jint count;
581 jvmtiError translate_result = TranslateFrameVector(thread_frames,
582 0,
583 0,
584 static_cast<jint>(collected_frames),
585 frame_info,
586 &count);
587 DCHECK(translate_result == JVMTI_ERROR_NONE);
588 stack_info.frame_count = static_cast<jint>(collected_frames);
589 stack_info.frame_buffer = frame_info;
590 sum_frames += static_cast<size_t>(count);
591 }
592
593 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
594 // potentially.
595 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
596 alignof(jvmtiFrameInfo));
597 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
598 unsigned char* chunk_data;
599 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
600 if (alloc_result != ERR(NONE)) {
601 return alloc_result;
602 }
603
604 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
605 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
606 chunk_data + rounded_stack_info_size);
607
608 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
609 // Check whether we found a running thread for this.
610 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
611 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700612 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
613 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800614 // No native thread. Must be new or dead. We need to fill out the stack info now.
615 // (Need to read the Java "started" field to know whether this is starting or terminated.)
616 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
617 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
618 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
619 CHECK(started_field != nullptr);
620 bool started = started_field->GetBoolean(peer) != 0;
621 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
622 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
623 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
624 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
625 stack_info[i].state = started ? kTerminatedState : kStartedState;
626 stack_info[i].frame_count = 0;
627 stack_info[i].frame_buffer = nullptr;
628 } else {
629 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700630 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800631
632 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
633 jvmtiStackInfo& new_stack_info = stack_info[i];
634
635 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
636 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
637 if (old_stack_info.frame_count > 0) {
638 // Only copy when there's data - leave the nullptr alone.
639 size_t frames_size =
640 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
641 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
642 new_stack_info.frame_buffer = frame_info;
643 frame_info += old_stack_info.frame_count;
644 }
645 }
646 }
647
Andreas Gampef1221a12017-06-21 21:20:47 -0700648 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800649
650 return ERR(NONE);
651}
652
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800653// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
654// runtime methods and transitions must not be counted.
655struct GetFrameCountVisitor : public art::StackVisitor {
656 explicit GetFrameCountVisitor(art::Thread* thread)
657 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
658 count(0) {}
659
660 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
661 art::ArtMethod* m = GetMethod();
662 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
663 if (do_count) {
664 count++;
665 }
666 return true;
667 }
668
669 size_t count;
670};
671
672struct GetFrameCountClosure : public art::Closure {
673 public:
674 GetFrameCountClosure() : count(0) {}
675
676 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
677 GetFrameCountVisitor visitor(self);
678 visitor.WalkStack(false);
679
680 count = visitor.count;
681 }
682
683 size_t count;
684};
685
686jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
687 jthread java_thread,
688 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700689 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
690 // that the thread isn't dying on us.
691 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700692 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700693
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800694 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700695 jvmtiError thread_error = ERR(INTERNAL);
696 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700697 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800698 return thread_error;
699 }
Alex Light7ddc23d2017-09-22 15:33:41 -0700700
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800701 DCHECK(thread != nullptr);
Alex Light7ddc23d2017-09-22 15:33:41 -0700702 art::ThreadState state = thread->GetState();
703 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700704 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700705 return ERR(THREAD_NOT_ALIVE);
706 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800707
708 if (count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700709 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800710 return ERR(NULL_POINTER);
711 }
712
713 GetFrameCountClosure closure;
Alex Lightb1e31a82017-10-04 16:57:36 -0700714 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Lightd9aff132017-10-31 22:30:05 +0000715 if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
Alex Light7ddc23d2017-09-22 15:33:41 -0700716 return ERR(THREAD_NOT_ALIVE);
717 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800718
719 *count_ptr = closure.count;
720 return ERR(NONE);
721}
722
723// Walks up the stack 'n' callers, when used with Thread::WalkStack.
724struct GetLocationVisitor : public art::StackVisitor {
725 GetLocationVisitor(art::Thread* thread, size_t n_in)
726 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
727 n(n_in),
728 count(0),
729 caller(nullptr),
730 caller_dex_pc(0) {}
731
732 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
733 art::ArtMethod* m = GetMethod();
734 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
735 if (do_count) {
736 DCHECK(caller == nullptr);
737 if (count == n) {
738 caller = m;
739 caller_dex_pc = GetDexPc(false);
740 return false;
741 }
742 count++;
743 }
744 return true;
745 }
746
747 const size_t n;
748 size_t count;
749 art::ArtMethod* caller;
750 uint32_t caller_dex_pc;
751};
752
753struct GetLocationClosure : public art::Closure {
754 public:
755 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
756
757 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
758 GetLocationVisitor visitor(self, n);
759 visitor.WalkStack(false);
760
761 method = visitor.caller;
762 dex_pc = visitor.caller_dex_pc;
763 }
764
765 const size_t n;
766 art::ArtMethod* method;
767 uint32_t dex_pc;
768};
769
770jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
771 jthread java_thread,
772 jint depth,
773 jmethodID* method_ptr,
774 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700775 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
776 // that the thread isn't dying on us.
777 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700778 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700779
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800780 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700781 jvmtiError thread_error = ERR(INTERNAL);
782 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700783 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800784 return thread_error;
785 }
786 DCHECK(thread != nullptr);
787
Alex Light7ddc23d2017-09-22 15:33:41 -0700788 art::ThreadState state = thread->GetState();
789 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700790 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700791 return ERR(THREAD_NOT_ALIVE);
792 }
793
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800794 if (depth < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700795 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800796 return ERR(ILLEGAL_ARGUMENT);
797 }
798 if (method_ptr == nullptr || location_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700799 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800800 return ERR(NULL_POINTER);
801 }
802
803 GetLocationClosure closure(static_cast<size_t>(depth));
Alex Lightb1e31a82017-10-04 16:57:36 -0700804 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Lightd9aff132017-10-31 22:30:05 +0000805 if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700806 return ERR(THREAD_NOT_ALIVE);
807 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800808
809 if (closure.method == nullptr) {
810 return ERR(NO_MORE_FRAMES);
811 }
812
813 *method_ptr = art::jni::EncodeArtMethod(closure.method);
Alex Light3dea2122017-10-11 15:56:48 +0000814 if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800815 *location_ptr = -1;
816 } else {
Andreas Gampee2abbc62017-09-15 11:59:26 -0700817 if (closure.dex_pc == art::dex::kDexNoIndex) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800818 return ERR(INTERNAL);
819 }
820 *location_ptr = static_cast<jlocation>(closure.dex_pc);
821 }
822
823 return ERR(NONE);
824}
825
Alex Light88e1ddd2017-08-21 13:09:55 -0700826struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
827 // We need a context because VisitLocks needs it retrieve the monitor objects.
828 explicit MonitorVisitor(art::Thread* thread)
829 REQUIRES_SHARED(art::Locks::mutator_lock_)
830 : art::StackVisitor(thread,
831 art::Context::Create(),
832 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
833 hs(art::Thread::Current()),
834 current_stack_depth(0) {}
835
836 ~MonitorVisitor() {
837 delete context_;
838 }
839
840 bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
841 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
842 if (!GetMethod()->IsRuntimeMethod()) {
843 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
844 ++current_stack_depth;
845 }
846 return true;
847 }
848
849 static void AppendOwnedMonitors(art::mirror::Object* owned_monitor, void* arg)
850 REQUIRES_SHARED(art::Locks::mutator_lock_) {
851 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
852 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
853 art::ObjPtr<art::mirror::Object> mon(owned_monitor);
854 // Filter out duplicates.
855 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
856 if (monitor.Get() == mon.Ptr()) {
857 return;
858 }
859 }
860 visitor->monitors.push_back(visitor->hs.NewHandle(mon));
861 visitor->stack_depths.push_back(visitor->current_stack_depth);
862 }
863
864 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
865 OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
866 for (const art::Handle<art::mirror::Object>& m : monitors) {
867 if (m.Get() == obj) {
868 return;
869 }
870 }
871 monitors.push_back(hs.NewHandle(obj));
872 stack_depths.push_back(-1);
873 }
874
875 art::VariableSizedHandleScope hs;
876 jint current_stack_depth;
877 std::vector<art::Handle<art::mirror::Object>> monitors;
878 std::vector<jint> stack_depths;
879};
880
881template<typename Fn>
882struct MonitorInfoClosure : public art::Closure {
883 public:
884 MonitorInfoClosure(art::ScopedObjectAccess& soa, Fn handle_results)
885 : soa_(soa), err_(OK), handle_results_(handle_results) {}
886
887 void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
888 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
889 // Find the monitors on the stack.
890 MonitorVisitor visitor(target);
891 visitor.WalkStack(/* include_transitions */ false);
892 // Find any other monitors, including ones acquired in native code.
893 art::RootInfo root_info(art::kRootVMInternal);
Ian Rogers55256cb2017-12-21 17:07:11 -0800894 target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
Alex Light88e1ddd2017-08-21 13:09:55 -0700895 err_ = handle_results_(soa_, visitor);
896 }
897
898 jvmtiError GetError() {
899 return err_;
900 }
901
902 private:
903 art::ScopedObjectAccess& soa_;
904 jvmtiError err_;
905 Fn handle_results_;
906};
907
908
909template <typename Fn>
910static jvmtiError GetOwnedMonitorInfoCommon(jthread thread, Fn handle_results) {
911 art::Thread* self = art::Thread::Current();
912 art::ScopedObjectAccess soa(self);
913 MonitorInfoClosure<Fn> closure(soa, handle_results);
914 bool called_method = false;
915 {
Alex Lightb1e31a82017-10-04 16:57:36 -0700916 art::Locks::thread_list_lock_->ExclusiveLock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700917 art::Thread* target = nullptr;
918 jvmtiError err = ERR(INTERNAL);
919 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700920 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700921 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700922 }
923 if (target != self) {
924 called_method = true;
Alex Lightb1e31a82017-10-04 16:57:36 -0700925 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Lightd9aff132017-10-31 22:30:05 +0000926 if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &closure)) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700927 return ERR(THREAD_NOT_ALIVE);
928 }
Alex Lightb1e31a82017-10-04 16:57:36 -0700929 } else {
930 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light88e1ddd2017-08-21 13:09:55 -0700931 }
932 }
933 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
934 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
935 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
936 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
937 if (!called_method) {
938 closure.Run(self);
939 }
940 return closure.GetError();
941}
942
943jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
944 jthread thread,
945 jint* info_cnt,
946 jvmtiMonitorStackDepthInfo** info_ptr) {
947 if (info_cnt == nullptr || info_ptr == nullptr) {
948 return ERR(NULL_POINTER);
949 }
950 auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor)
951 REQUIRES_SHARED(art::Locks::mutator_lock_) {
952 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * visitor.monitors.size();
953 jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
954 if (err != OK) {
955 return err;
956 }
957 *info_cnt = visitor.monitors.size();
958 for (size_t i = 0; i < visitor.monitors.size(); i++) {
959 (*info_ptr)[i] = {
960 soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get()),
961 visitor.stack_depths[i]
962 };
963 }
964 return OK;
965 };
966 return GetOwnedMonitorInfoCommon(thread, handle_fun);
967}
968
969jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
970 jthread thread,
971 jint* owned_monitor_count_ptr,
972 jobject** owned_monitors_ptr) {
973 if (owned_monitors_ptr == nullptr || owned_monitors_ptr == nullptr) {
974 return ERR(NULL_POINTER);
975 }
976 auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor)
977 REQUIRES_SHARED(art::Locks::mutator_lock_) {
978 auto nbytes = sizeof(jobject) * visitor.monitors.size();
979 jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
980 if (err != OK) {
981 return err;
982 }
983 *owned_monitor_count_ptr = visitor.monitors.size();
984 for (size_t i = 0; i < visitor.monitors.size(); i++) {
985 (*owned_monitors_ptr)[i] =
986 soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get());
987 }
988 return OK;
989 };
990 return GetOwnedMonitorInfoCommon(thread, handle_fun);
991}
992
Alex Lighte814f9d2017-07-31 16:14:39 -0700993jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
994 if (depth < 0) {
995 return ERR(ILLEGAL_ARGUMENT);
996 }
997 ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
998 art::Thread* self = art::Thread::Current();
999 art::Thread* target;
1000 do {
1001 ThreadUtil::SuspendCheck(self);
1002 art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
1003 // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
1004 // user-code suspension. We retry and do another SuspendCheck to clear this.
1005 if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
1006 continue;
1007 }
1008 // From now on we know we cannot get suspended by user-code.
1009 // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1010 // have the 'suspend_lock' locked here.
1011 art::ScopedObjectAccess soa(self);
1012 art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
Alex Light7ddc23d2017-09-22 15:33:41 -07001013 jvmtiError err = ERR(INTERNAL);
1014 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1015 return err;
1016 }
1017 if (target != self) {
Alex Lighte814f9d2017-07-31 16:14:39 -07001018 // TODO This is part of the spec but we could easily avoid needing to do it. We would just put
1019 // all the logic into a sync-checkpoint.
1020 art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1021 if (target->GetUserCodeSuspendCount() == 0) {
1022 return ERR(THREAD_NOT_SUSPENDED);
1023 }
1024 }
1025 // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1026 // done (unless it's 'self' in which case we don't care since we aren't going to be returning).
1027 // TODO We could implement this using a synchronous checkpoint and not bother with any of the
1028 // suspension stuff. The spec does specifically say to return THREAD_NOT_SUSPENDED though.
1029 // Find the requested stack frame.
1030 std::unique_ptr<art::Context> context(art::Context::Create());
1031 FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1032 visitor.WalkStack();
1033 if (!visitor.FoundFrame()) {
1034 return ERR(NO_MORE_FRAMES);
1035 }
1036 art::ArtMethod* method = visitor.GetMethod();
1037 if (method->IsNative()) {
1038 return ERR(OPAQUE_FRAME);
1039 }
1040 // From here we are sure to succeed.
1041 bool needs_instrument = false;
1042 // Get/create a shadow frame
1043 art::ShadowFrame* shadow_frame = visitor.GetCurrentShadowFrame();
1044 if (shadow_frame == nullptr) {
1045 needs_instrument = true;
1046 const size_t frame_id = visitor.GetFrameId();
1047 const uint16_t num_regs = method->GetCodeItem()->registers_size_;
1048 shadow_frame = target->FindOrCreateDebuggerShadowFrame(frame_id,
1049 num_regs,
1050 method,
1051 visitor.GetDexPc());
1052 }
Alex Lightb6106d52017-10-18 15:02:15 -07001053 {
1054 art::WriterMutexLock lk(self, tienv->event_info_mutex_);
1055 // Mark shadow frame as needs_notify_pop_
1056 shadow_frame->SetNotifyPop(true);
1057 tienv->notify_frames.insert(shadow_frame);
1058 }
Alex Lighte814f9d2017-07-31 16:14:39 -07001059 // Make sure can we will go to the interpreter and use the shadow frames.
1060 if (needs_instrument) {
1061 art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
1062 }
1063 return OK;
1064 } while (true);
1065}
1066
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001067} // namespace openjdkjvmti