blob: 699f6952c486c053fa79fdf28c71006c7b48b0dd [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampea1d2f952017-04-20 22:53:58 -070039#include "art_field-inl.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070040#include "art_method-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070041#include "art_jvmti.h"
Steven Morelande431e272017-07-18 16:53:49 -070042#include "art_method-inl.h"
Andreas Gampe6237cd32017-06-22 22:17:38 -070043#include "barrier.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080044#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070045#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080046#include "base/mutex.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070047#include "dex_file.h"
48#include "dex_file_annotations.h"
Andreas Gampee2abbc62017-09-15 11:59:26 -070049#include "dex_file_types.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070050#include "gc_root.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080051#include "handle_scope-inl.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070052#include "jni_env_ext.h"
Andreas Gampe13b27842016-11-07 16:48:23 -080053#include "jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070054#include "mirror/class.h"
55#include "mirror/dex_cache.h"
Steven Morelande431e272017-07-18 16:53:49 -070056#include "nativehelper/ScopedLocalRef.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070057#include "scoped_thread_state_change-inl.h"
58#include "stack.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070059#include "ti_thread.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070060#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080061#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070062#include "thread_pool.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070063#include "ti_thread.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070064#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070065
66namespace openjdkjvmti {
67
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070068template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070069struct GetStackTraceVisitor : public art::StackVisitor {
70 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070071 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070072 size_t stop_,
73 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070074 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070075 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070076 start(start_),
77 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070078 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
79 GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070080
81 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
82 art::ArtMethod* m = GetMethod();
83 if (m->IsRuntimeMethod()) {
84 return true;
85 }
86
87 if (start == 0) {
88 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080089 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070090
Andreas Gampe2340e3f2016-12-12 19:37:19 -080091 uint32_t dex_pc = GetDexPc(false);
Andreas Gampee2abbc62017-09-15 11:59:26 -070092 jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070093
Andreas Gampe2340e3f2016-12-12 19:37:19 -080094 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070095 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070096
97 if (stop == 1) {
98 return false; // We're done.
99 } else if (stop > 0) {
100 stop--;
101 }
102 } else {
103 start--;
104 }
105
106 return true;
107 }
108
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700109 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700110 size_t start;
111 size_t stop;
112};
113
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700114template <typename FrameFn>
115GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
116 size_t start,
117 size_t stop,
118 FrameFn fn) {
119 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
120}
121
122struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700123 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700124 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700125 : start_input(start),
126 stop_input(stop),
127 start_result(0),
128 stop_result(0) {}
129
Andreas Gampea1a27c62017-01-11 16:37:16 -0800130 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700131 auto frames_fn = [&](jvmtiFrameInfo info) {
132 frames.push_back(info);
133 };
134 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
135 visitor.WalkStack(/* include_transitions */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700136
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700137 start_result = visitor.start;
138 stop_result = visitor.stop;
139 }
140
141 const size_t start_input;
142 const size_t stop_input;
143
144 std::vector<jvmtiFrameInfo> frames;
145 size_t start_result;
146 size_t stop_result;
147};
148
Andreas Gampea1a27c62017-01-11 16:37:16 -0800149static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
150 jint start_depth,
151 size_t start_result,
152 jint max_frame_count,
153 jvmtiFrameInfo* frame_buffer,
154 jint* count_ptr) {
155 size_t collected_frames = frames.size();
156
157 // Assume we're here having collected something.
158 DCHECK_GT(max_frame_count, 0);
159
160 // Frames from the top.
161 if (start_depth >= 0) {
162 if (start_result != 0) {
163 // Not enough frames.
164 return ERR(ILLEGAL_ARGUMENT);
165 }
166 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
167 if (frames.size() > 0) {
168 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
169 }
170 *count_ptr = static_cast<jint>(frames.size());
171 return ERR(NONE);
172 }
173
174 // Frames from the bottom.
175 if (collected_frames < static_cast<size_t>(-start_depth)) {
176 return ERR(ILLEGAL_ARGUMENT);
177 }
178
179 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
180 memcpy(frame_buffer,
181 &frames.data()[collected_frames + start_depth],
182 count * sizeof(jvmtiFrameInfo));
183 *count_ptr = static_cast<jint>(count);
184 return ERR(NONE);
185}
186
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700187struct GetStackTraceDirectClosure : public art::Closure {
188 public:
189 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
190 : frame_buffer(frame_buffer_),
191 start_input(start),
192 stop_input(stop),
193 index(0) {
194 DCHECK_GE(start_input, 0u);
195 }
196
197 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
198 auto frames_fn = [&](jvmtiFrameInfo info) {
199 frame_buffer[index] = info;
200 ++index;
201 };
202 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
203 visitor.WalkStack(/* include_transitions */ false);
204 }
205
206 jvmtiFrameInfo* frame_buffer;
207
208 const size_t start_input;
209 const size_t stop_input;
210
211 size_t index = 0;
212};
213
Andreas Gampe28c4a232017-06-21 21:21:31 -0700214static jvmtiError GetThread(JNIEnv* env,
215 art::ScopedObjectAccessAlreadyRunnable& soa,
216 jthread java_thread,
217 art::Thread** thread)
218 REQUIRES_SHARED(art::Locks::mutator_lock_) // Needed for FromManagedThread.
219 REQUIRES(art::Locks::thread_list_lock_) { // Needed for FromManagedThread.
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800220 if (java_thread == nullptr) {
221 *thread = art::Thread::Current();
222 if (*thread == nullptr) {
223 // GetStackTrace can only be run during the live phase, so the current thread should be
224 // attached and thus available. Getting a null for current means we're starting up or
225 // dying.
226 return ERR(WRONG_PHASE);
227 }
228 } else {
229 if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) {
230 return ERR(INVALID_THREAD);
231 }
232
233 // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800234 *thread = art::Thread::FromManagedThread(soa, java_thread);
235 if (*thread == nullptr) {
236 return ERR(THREAD_NOT_ALIVE);
237 }
238 }
239 return ERR(NONE);
240}
241
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700242jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
243 jthread java_thread,
244 jint start_depth,
245 jint max_frame_count,
246 jvmtiFrameInfo* frame_buffer,
247 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700248 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
249 // that the thread isn't dying on us.
250 art::ScopedObjectAccess soa(art::Thread::Current());
251 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
252
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700253 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700254 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
255 soa,
256 java_thread,
257 &thread);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800258 if (thread_error != ERR(NONE)) {
259 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700260 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800261 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700262
263 art::ThreadState state = thread->GetState();
264 if (state == art::ThreadState::kStarting ||
265 state == art::ThreadState::kTerminated ||
266 thread->IsStillStarting()) {
267 return ERR(THREAD_NOT_ALIVE);
268 }
269
270 if (max_frame_count < 0) {
271 return ERR(ILLEGAL_ARGUMENT);
272 }
273 if (frame_buffer == nullptr || count_ptr == nullptr) {
274 return ERR(NULL_POINTER);
275 }
276
277 if (max_frame_count == 0) {
278 *count_ptr = 0;
279 return ERR(NONE);
280 }
281
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700282 if (start_depth >= 0) {
283 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
284 GetStackTraceDirectClosure closure(frame_buffer,
285 static_cast<size_t>(start_depth),
286 static_cast<size_t>(max_frame_count));
287 thread->RequestSynchronousCheckpoint(&closure);
288 *count_ptr = static_cast<jint>(closure.index);
289 if (closure.index < static_cast<size_t>(start_depth)) {
290 return ERR(ILLEGAL_ARGUMENT);
291 }
292 return ERR(NONE);
293 }
294
295 GetStackTraceVectorClosure closure(0, 0);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700296 thread->RequestSynchronousCheckpoint(&closure);
297
Andreas Gampea1a27c62017-01-11 16:37:16 -0800298 return TranslateFrameVector(closure.frames,
299 start_depth,
300 closure.start_result,
301 max_frame_count,
302 frame_buffer,
303 count_ptr);
304}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700305
Andreas Gampef1221a12017-06-21 21:20:47 -0700306template <typename Data>
307struct GetAllStackTracesVectorClosure : public art::Closure {
Andreas Gampe6237cd32017-06-22 22:17:38 -0700308 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
309 : barrier(0), stop_input(stop), data(data_) {}
Andreas Gampef1221a12017-06-21 21:20:47 -0700310
311 void Run(art::Thread* thread) OVERRIDE
312 REQUIRES_SHARED(art::Locks::mutator_lock_)
313 REQUIRES(!data->mutex) {
314 art::Thread* self = art::Thread::Current();
Andreas Gampe6237cd32017-06-22 22:17:38 -0700315 Work(thread, self);
316 barrier.Pass(self);
317 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700318
Andreas Gampe6237cd32017-06-22 22:17:38 -0700319 void Work(art::Thread* thread, art::Thread* self)
320 REQUIRES_SHARED(art::Locks::mutator_lock_)
321 REQUIRES(!data->mutex) {
Andreas Gampef1221a12017-06-21 21:20:47 -0700322 // Skip threads that are still starting.
323 if (thread->IsStillStarting()) {
324 return;
325 }
326
327 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
328 if (thread_frames == nullptr) {
329 return;
330 }
331
332 // Now collect the data.
333 auto frames_fn = [&](jvmtiFrameInfo info) {
334 thread_frames->push_back(info);
335 };
336 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
337 visitor.WalkStack(/* include_transitions */ false);
338 }
339
Andreas Gampe6237cd32017-06-22 22:17:38 -0700340 art::Barrier barrier;
Andreas Gampef1221a12017-06-21 21:20:47 -0700341 const size_t stop_input;
342 Data* data;
343};
344
Andreas Gampe6237cd32017-06-22 22:17:38 -0700345template <typename Data>
346static void RunCheckpointAndWait(Data* data, size_t max_frame_count) {
347 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
348 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
349 if (barrier_count == 0) {
350 return;
351 }
352 art::Thread* self = art::Thread::Current();
353 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
354 closure.barrier.Increment(self, barrier_count);
355}
356
Andreas Gampea1a27c62017-01-11 16:37:16 -0800357jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
358 jint max_frame_count,
359 jvmtiStackInfo** stack_info_ptr,
360 jint* thread_count_ptr) {
361 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700362 return ERR(ILLEGAL_ARGUMENT);
363 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800364 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
365 return ERR(NULL_POINTER);
366 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700367
Andreas Gampef1221a12017-06-21 21:20:47 -0700368 struct AllStackTracesData {
369 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
370 ~AllStackTracesData() {
371 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
372 for (jthread global_thread_ref : thread_peers) {
373 jni_env->DeleteGlobalRef(global_thread_ref);
374 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800375 }
376
Andreas Gampef1221a12017-06-21 21:20:47 -0700377 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
378 REQUIRES_SHARED(art::Locks::mutator_lock_)
379 REQUIRES(!mutex) {
380 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800381
382 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800383
Andreas Gampef1221a12017-06-21 21:20:47 -0700384 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
385 self, thread->GetPeerFromOtherThread());
386 thread_peers.push_back(peer);
387
Andreas Gampead9173d2017-06-22 16:33:08 -0700388 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
389 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700390 }
391
392 art::Mutex mutex;
393
394 // Storage. Only access directly after completion.
395
396 std::vector<art::Thread*> threads;
397 // "thread_peers" contains global references to their peers.
398 std::vector<jthread> thread_peers;
399
Andreas Gampead9173d2017-06-22 16:33:08 -0700400 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700401 };
402
403 AllStackTracesData data;
Andreas Gampe6237cd32017-06-22 22:17:38 -0700404 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampef1221a12017-06-21 21:20:47 -0700405
406 art::Thread* current = art::Thread::Current();
407
408 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800409
410 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
411 // allocate one big chunk for this and the actual frames, which means we need
412 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700413 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800414 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700415 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800416
417 // Now run through and add data for each thread.
418 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700419 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800420 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
421 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
422
Andreas Gampead9173d2017-06-22 16:33:08 -0700423 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800424
Andreas Gampef1221a12017-06-21 21:20:47 -0700425 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800426 stack_info.thread = nullptr;
427 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
428
429 size_t collected_frames = thread_frames.size();
430 if (max_frame_count == 0 || collected_frames == 0) {
431 stack_info.frame_count = 0;
432 stack_info.frame_buffer = nullptr;
433 continue;
434 }
435 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
436
437 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
438 frame_infos.emplace_back(frame_info);
439
440 jint count;
441 jvmtiError translate_result = TranslateFrameVector(thread_frames,
442 0,
443 0,
444 static_cast<jint>(collected_frames),
445 frame_info,
446 &count);
447 DCHECK(translate_result == JVMTI_ERROR_NONE);
448 stack_info.frame_count = static_cast<jint>(collected_frames);
449 stack_info.frame_buffer = frame_info;
450 sum_frames += static_cast<size_t>(count);
451 }
452
453 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700454 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800455 alignof(jvmtiFrameInfo));
456 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
457 unsigned char* chunk_data;
458 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
459 if (alloc_result != ERR(NONE)) {
460 return alloc_result;
461 }
462
463 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
464 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700465 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800466
467 // Now copy the frames and fix up the pointers.
468 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
469 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700470 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800471 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
472 jvmtiStackInfo& new_stack_info = stack_info[i];
473
Andreas Gampef1221a12017-06-21 21:20:47 -0700474 // Translate the global ref into a local ref.
475 new_stack_info.thread =
476 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800477
478 if (old_stack_info.frame_count > 0) {
479 // Only copy when there's data - leave the nullptr alone.
480 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
481 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
482 new_stack_info.frame_buffer = frame_info;
483 frame_info += old_stack_info.frame_count;
484 }
485 }
486
487 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700488 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800489
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700490 return ERR(NONE);
491}
492
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800493jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
494 jint thread_count,
495 const jthread* thread_list,
496 jint max_frame_count,
497 jvmtiStackInfo** stack_info_ptr) {
498 if (max_frame_count < 0) {
499 return ERR(ILLEGAL_ARGUMENT);
500 }
501 if (thread_count < 0) {
502 return ERR(ILLEGAL_ARGUMENT);
503 }
504 if (thread_count == 0) {
505 *stack_info_ptr = nullptr;
506 return ERR(NONE);
507 }
508 if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
509 return ERR(NULL_POINTER);
510 }
511
512 art::Thread* current = art::Thread::Current();
513 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
514
Andreas Gampef1221a12017-06-21 21:20:47 -0700515 struct SelectStackTracesData {
516 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
517
518 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
519 REQUIRES_SHARED(art::Locks::mutator_lock_)
520 REQUIRES(!mutex) {
521 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
522 for (size_t index = 0; index != handles.size(); ++index) {
523 if (peer == handles[index].Get()) {
524 // Found the thread.
525 art::MutexLock mu(self, mutex);
526
527 threads.push_back(thread);
528 thread_list_indices.push_back(index);
529
Andreas Gampead9173d2017-06-22 16:33:08 -0700530 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
531 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700532 }
533 }
534 return nullptr;
535 }
536
537 art::Mutex mutex;
538
539 // Selection data.
540
541 std::vector<art::Handle<art::mirror::Object>> handles;
542
543 // Storage. Only access directly after completion.
544
545 std::vector<art::Thread*> threads;
546 std::vector<size_t> thread_list_indices;
547
Andreas Gampead9173d2017-06-22 16:33:08 -0700548 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700549 };
550
551 SelectStackTracesData data;
552
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800553 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
554 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800555 for (jint i = 0; i != thread_count; ++i) {
556 if (thread_list[i] == nullptr) {
557 return ERR(INVALID_THREAD);
558 }
559 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
560 return ERR(INVALID_THREAD);
561 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700562 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800563 }
564
Andreas Gampe6237cd32017-06-22 22:17:38 -0700565 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800566
567 // Convert the data into our output format.
568
569 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
570 // allocate one big chunk for this and the actual frames, which means we need
571 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700572 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800573 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700574 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800575
576 // Now run through and add data for each thread.
577 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700578 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800579 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
580 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
581
Andreas Gampef1221a12017-06-21 21:20:47 -0700582 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700583 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800584
585 // For the time being, set the thread to null. We don't have good ScopedLocalRef
586 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000587 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800588 stack_info.thread = nullptr;
589 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
590
591 size_t collected_frames = thread_frames.size();
592 if (max_frame_count == 0 || collected_frames == 0) {
593 stack_info.frame_count = 0;
594 stack_info.frame_buffer = nullptr;
595 continue;
596 }
597 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
598
599 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
600 frame_infos.emplace_back(frame_info);
601
602 jint count;
603 jvmtiError translate_result = TranslateFrameVector(thread_frames,
604 0,
605 0,
606 static_cast<jint>(collected_frames),
607 frame_info,
608 &count);
609 DCHECK(translate_result == JVMTI_ERROR_NONE);
610 stack_info.frame_count = static_cast<jint>(collected_frames);
611 stack_info.frame_buffer = frame_info;
612 sum_frames += static_cast<size_t>(count);
613 }
614
615 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
616 // potentially.
617 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
618 alignof(jvmtiFrameInfo));
619 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
620 unsigned char* chunk_data;
621 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
622 if (alloc_result != ERR(NONE)) {
623 return alloc_result;
624 }
625
626 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
627 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
628 chunk_data + rounded_stack_info_size);
629
630 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
631 // Check whether we found a running thread for this.
632 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
633 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700634 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
635 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800636 // No native thread. Must be new or dead. We need to fill out the stack info now.
637 // (Need to read the Java "started" field to know whether this is starting or terminated.)
638 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
639 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
640 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
641 CHECK(started_field != nullptr);
642 bool started = started_field->GetBoolean(peer) != 0;
643 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
644 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
645 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
646 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
647 stack_info[i].state = started ? kTerminatedState : kStartedState;
648 stack_info[i].frame_count = 0;
649 stack_info[i].frame_buffer = nullptr;
650 } else {
651 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700652 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800653
654 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
655 jvmtiStackInfo& new_stack_info = stack_info[i];
656
657 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
658 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
659 if (old_stack_info.frame_count > 0) {
660 // Only copy when there's data - leave the nullptr alone.
661 size_t frames_size =
662 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
663 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
664 new_stack_info.frame_buffer = frame_info;
665 frame_info += old_stack_info.frame_count;
666 }
667 }
668 }
669
Andreas Gampef1221a12017-06-21 21:20:47 -0700670 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800671
672 return ERR(NONE);
673}
674
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800675// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
676// runtime methods and transitions must not be counted.
677struct GetFrameCountVisitor : public art::StackVisitor {
678 explicit GetFrameCountVisitor(art::Thread* thread)
679 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
680 count(0) {}
681
682 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
683 art::ArtMethod* m = GetMethod();
684 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
685 if (do_count) {
686 count++;
687 }
688 return true;
689 }
690
691 size_t count;
692};
693
694struct GetFrameCountClosure : public art::Closure {
695 public:
696 GetFrameCountClosure() : count(0) {}
697
698 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
699 GetFrameCountVisitor visitor(self);
700 visitor.WalkStack(false);
701
702 count = visitor.count;
703 }
704
705 size_t count;
706};
707
708jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
709 jthread java_thread,
710 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700711 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
712 // that the thread isn't dying on us.
713 art::ScopedObjectAccess soa(art::Thread::Current());
714 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
715
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800716 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700717 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
718 soa,
719 java_thread,
720 &thread);
721
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800722 if (thread_error != ERR(NONE)) {
723 return thread_error;
724 }
725 DCHECK(thread != nullptr);
726
727 if (count_ptr == nullptr) {
728 return ERR(NULL_POINTER);
729 }
730
731 GetFrameCountClosure closure;
732 thread->RequestSynchronousCheckpoint(&closure);
733
734 *count_ptr = closure.count;
735 return ERR(NONE);
736}
737
738// Walks up the stack 'n' callers, when used with Thread::WalkStack.
739struct GetLocationVisitor : public art::StackVisitor {
740 GetLocationVisitor(art::Thread* thread, size_t n_in)
741 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
742 n(n_in),
743 count(0),
744 caller(nullptr),
745 caller_dex_pc(0) {}
746
747 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
748 art::ArtMethod* m = GetMethod();
749 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
750 if (do_count) {
751 DCHECK(caller == nullptr);
752 if (count == n) {
753 caller = m;
754 caller_dex_pc = GetDexPc(false);
755 return false;
756 }
757 count++;
758 }
759 return true;
760 }
761
762 const size_t n;
763 size_t count;
764 art::ArtMethod* caller;
765 uint32_t caller_dex_pc;
766};
767
768struct GetLocationClosure : public art::Closure {
769 public:
770 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
771
772 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
773 GetLocationVisitor visitor(self, n);
774 visitor.WalkStack(false);
775
776 method = visitor.caller;
777 dex_pc = visitor.caller_dex_pc;
778 }
779
780 const size_t n;
781 art::ArtMethod* method;
782 uint32_t dex_pc;
783};
784
785jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
786 jthread java_thread,
787 jint depth,
788 jmethodID* method_ptr,
789 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700790 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
791 // that the thread isn't dying on us.
792 art::ScopedObjectAccess soa(art::Thread::Current());
793 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
794
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800795 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700796 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
797 soa,
798 java_thread,
799 &thread);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800800 if (thread_error != ERR(NONE)) {
801 return thread_error;
802 }
803 DCHECK(thread != nullptr);
804
805 if (depth < 0) {
806 return ERR(ILLEGAL_ARGUMENT);
807 }
808 if (method_ptr == nullptr || location_ptr == nullptr) {
809 return ERR(NULL_POINTER);
810 }
811
812 GetLocationClosure closure(static_cast<size_t>(depth));
813 thread->RequestSynchronousCheckpoint(&closure);
814
815 if (closure.method == nullptr) {
816 return ERR(NO_MORE_FRAMES);
817 }
818
819 *method_ptr = art::jni::EncodeArtMethod(closure.method);
820 if (closure.method->IsNative()) {
821 *location_ptr = -1;
822 } else {
Andreas Gampee2abbc62017-09-15 11:59:26 -0700823 if (closure.dex_pc == art::dex::kDexNoIndex) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800824 return ERR(INTERNAL);
825 }
826 *location_ptr = static_cast<jlocation>(closure.dex_pc);
827 }
828
829 return ERR(NONE);
830}
831
Alex Light88e1ddd2017-08-21 13:09:55 -0700832struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
833 // We need a context because VisitLocks needs it retrieve the monitor objects.
834 explicit MonitorVisitor(art::Thread* thread)
835 REQUIRES_SHARED(art::Locks::mutator_lock_)
836 : art::StackVisitor(thread,
837 art::Context::Create(),
838 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
839 hs(art::Thread::Current()),
840 current_stack_depth(0) {}
841
842 ~MonitorVisitor() {
843 delete context_;
844 }
845
846 bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
847 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
848 if (!GetMethod()->IsRuntimeMethod()) {
849 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
850 ++current_stack_depth;
851 }
852 return true;
853 }
854
855 static void AppendOwnedMonitors(art::mirror::Object* owned_monitor, void* arg)
856 REQUIRES_SHARED(art::Locks::mutator_lock_) {
857 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
858 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
859 art::ObjPtr<art::mirror::Object> mon(owned_monitor);
860 // Filter out duplicates.
861 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
862 if (monitor.Get() == mon.Ptr()) {
863 return;
864 }
865 }
866 visitor->monitors.push_back(visitor->hs.NewHandle(mon));
867 visitor->stack_depths.push_back(visitor->current_stack_depth);
868 }
869
870 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
871 OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
872 for (const art::Handle<art::mirror::Object>& m : monitors) {
873 if (m.Get() == obj) {
874 return;
875 }
876 }
877 monitors.push_back(hs.NewHandle(obj));
878 stack_depths.push_back(-1);
879 }
880
881 art::VariableSizedHandleScope hs;
882 jint current_stack_depth;
883 std::vector<art::Handle<art::mirror::Object>> monitors;
884 std::vector<jint> stack_depths;
885};
886
887template<typename Fn>
888struct MonitorInfoClosure : public art::Closure {
889 public:
890 MonitorInfoClosure(art::ScopedObjectAccess& soa, Fn handle_results)
891 : soa_(soa), err_(OK), handle_results_(handle_results) {}
892
893 void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
894 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
895 // Find the monitors on the stack.
896 MonitorVisitor visitor(target);
897 visitor.WalkStack(/* include_transitions */ false);
898 // Find any other monitors, including ones acquired in native code.
899 art::RootInfo root_info(art::kRootVMInternal);
900 target->GetJniEnv()->monitors.VisitRoots(&visitor, root_info);
901 err_ = handle_results_(soa_, visitor);
902 }
903
904 jvmtiError GetError() {
905 return err_;
906 }
907
908 private:
909 art::ScopedObjectAccess& soa_;
910 jvmtiError err_;
911 Fn handle_results_;
912};
913
914
915template <typename Fn>
916static jvmtiError GetOwnedMonitorInfoCommon(jthread thread, Fn handle_results) {
917 art::Thread* self = art::Thread::Current();
918 art::ScopedObjectAccess soa(self);
919 MonitorInfoClosure<Fn> closure(soa, handle_results);
920 bool called_method = false;
921 {
922 art::MutexLock mu(self, *art::Locks::thread_list_lock_);
923 art::Thread* target = ThreadUtil::GetNativeThread(thread, soa);
924 if (target == nullptr && thread == nullptr) {
925 return ERR(INVALID_THREAD);
926 }
927 if (target == nullptr) {
928 return ERR(THREAD_NOT_ALIVE);
929 }
930 if (target != self) {
931 called_method = true;
932 if (!target->RequestSynchronousCheckpoint(&closure)) {
933 return ERR(THREAD_NOT_ALIVE);
934 }
935 }
936 }
937 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
938 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
939 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
940 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
941 if (!called_method) {
942 closure.Run(self);
943 }
944 return closure.GetError();
945}
946
947jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
948 jthread thread,
949 jint* info_cnt,
950 jvmtiMonitorStackDepthInfo** info_ptr) {
951 if (info_cnt == nullptr || info_ptr == nullptr) {
952 return ERR(NULL_POINTER);
953 }
954 auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor)
955 REQUIRES_SHARED(art::Locks::mutator_lock_) {
956 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * visitor.monitors.size();
957 jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
958 if (err != OK) {
959 return err;
960 }
961 *info_cnt = visitor.monitors.size();
962 for (size_t i = 0; i < visitor.monitors.size(); i++) {
963 (*info_ptr)[i] = {
964 soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get()),
965 visitor.stack_depths[i]
966 };
967 }
968 return OK;
969 };
970 return GetOwnedMonitorInfoCommon(thread, handle_fun);
971}
972
973jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
974 jthread thread,
975 jint* owned_monitor_count_ptr,
976 jobject** owned_monitors_ptr) {
977 if (owned_monitors_ptr == nullptr || owned_monitors_ptr == nullptr) {
978 return ERR(NULL_POINTER);
979 }
980 auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor)
981 REQUIRES_SHARED(art::Locks::mutator_lock_) {
982 auto nbytes = sizeof(jobject) * visitor.monitors.size();
983 jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
984 if (err != OK) {
985 return err;
986 }
987 *owned_monitor_count_ptr = visitor.monitors.size();
988 for (size_t i = 0; i < visitor.monitors.size(); i++) {
989 (*owned_monitors_ptr)[i] =
990 soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get());
991 }
992 return OK;
993 };
994 return GetOwnedMonitorInfoCommon(thread, handle_fun);
995}
996
Alex Lighte814f9d2017-07-31 16:14:39 -0700997jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
998 if (depth < 0) {
999 return ERR(ILLEGAL_ARGUMENT);
1000 }
1001 ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
1002 art::Thread* self = art::Thread::Current();
1003 art::Thread* target;
1004 do {
1005 ThreadUtil::SuspendCheck(self);
1006 art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
1007 // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
1008 // user-code suspension. We retry and do another SuspendCheck to clear this.
1009 if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
1010 continue;
1011 }
1012 // From now on we know we cannot get suspended by user-code.
1013 // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1014 // have the 'suspend_lock' locked here.
1015 art::ScopedObjectAccess soa(self);
1016 art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
1017 target = ThreadUtil::GetNativeThread(thread, soa);
1018 if (target == nullptr) {
1019 return ERR(THREAD_NOT_ALIVE);
1020 } else if (target != self) {
1021 // TODO This is part of the spec but we could easily avoid needing to do it. We would just put
1022 // all the logic into a sync-checkpoint.
1023 art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1024 if (target->GetUserCodeSuspendCount() == 0) {
1025 return ERR(THREAD_NOT_SUSPENDED);
1026 }
1027 }
1028 // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1029 // done (unless it's 'self' in which case we don't care since we aren't going to be returning).
1030 // TODO We could implement this using a synchronous checkpoint and not bother with any of the
1031 // suspension stuff. The spec does specifically say to return THREAD_NOT_SUSPENDED though.
1032 // Find the requested stack frame.
1033 std::unique_ptr<art::Context> context(art::Context::Create());
1034 FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1035 visitor.WalkStack();
1036 if (!visitor.FoundFrame()) {
1037 return ERR(NO_MORE_FRAMES);
1038 }
1039 art::ArtMethod* method = visitor.GetMethod();
1040 if (method->IsNative()) {
1041 return ERR(OPAQUE_FRAME);
1042 }
1043 // From here we are sure to succeed.
1044 bool needs_instrument = false;
1045 // Get/create a shadow frame
1046 art::ShadowFrame* shadow_frame = visitor.GetCurrentShadowFrame();
1047 if (shadow_frame == nullptr) {
1048 needs_instrument = true;
1049 const size_t frame_id = visitor.GetFrameId();
1050 const uint16_t num_regs = method->GetCodeItem()->registers_size_;
1051 shadow_frame = target->FindOrCreateDebuggerShadowFrame(frame_id,
1052 num_regs,
1053 method,
1054 visitor.GetDexPc());
1055 }
1056 // Mark shadow frame as needs_notify_pop_
1057 shadow_frame->SetNotifyPop(true);
1058 tienv->notify_frames.insert(shadow_frame);
1059 // Make sure can we will go to the interpreter and use the shadow frames.
1060 if (needs_instrument) {
1061 art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
1062 }
1063 return OK;
1064 } while (true);
1065}
1066
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001067} // namespace openjdkjvmti