blob: d4cc42ae7092c7e243d1da097d599c6aff6edfa8 [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampea1d2f952017-04-20 22:53:58 -070039#include "art_field-inl.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070040#include "art_method-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070041#include "art_jvmti.h"
Steven Morelande431e272017-07-18 16:53:49 -070042#include "art_method-inl.h"
Andreas Gampe6237cd32017-06-22 22:17:38 -070043#include "barrier.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080044#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070045#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080046#include "base/mutex.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070047#include "dex_file.h"
48#include "dex_file_annotations.h"
Andreas Gampee2abbc62017-09-15 11:59:26 -070049#include "dex_file_types.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070050#include "gc_root.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080051#include "handle_scope-inl.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070052#include "jni_env_ext.h"
Andreas Gampe13b27842016-11-07 16:48:23 -080053#include "jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070054#include "mirror/class.h"
55#include "mirror/dex_cache.h"
Steven Morelande431e272017-07-18 16:53:49 -070056#include "nativehelper/ScopedLocalRef.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070057#include "scoped_thread_state_change-inl.h"
58#include "stack.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070059#include "ti_thread.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070060#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080061#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070062#include "thread_pool.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070063#include "ti_thread.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070064#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070065
66namespace openjdkjvmti {
67
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070068template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070069struct GetStackTraceVisitor : public art::StackVisitor {
70 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070071 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070072 size_t stop_,
73 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070074 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070075 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070076 start(start_),
77 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070078 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
79 GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070080
81 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
82 art::ArtMethod* m = GetMethod();
83 if (m->IsRuntimeMethod()) {
84 return true;
85 }
86
87 if (start == 0) {
88 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080089 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070090
Andreas Gampe2340e3f2016-12-12 19:37:19 -080091 uint32_t dex_pc = GetDexPc(false);
Andreas Gampee2abbc62017-09-15 11:59:26 -070092 jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070093
Andreas Gampe2340e3f2016-12-12 19:37:19 -080094 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070095 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070096
97 if (stop == 1) {
98 return false; // We're done.
99 } else if (stop > 0) {
100 stop--;
101 }
102 } else {
103 start--;
104 }
105
106 return true;
107 }
108
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700109 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700110 size_t start;
111 size_t stop;
112};
113
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700114template <typename FrameFn>
115GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
116 size_t start,
117 size_t stop,
118 FrameFn fn) {
119 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
120}
121
122struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700123 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700124 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700125 : start_input(start),
126 stop_input(stop),
127 start_result(0),
128 stop_result(0) {}
129
Andreas Gampea1a27c62017-01-11 16:37:16 -0800130 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700131 auto frames_fn = [&](jvmtiFrameInfo info) {
132 frames.push_back(info);
133 };
134 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
135 visitor.WalkStack(/* include_transitions */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700136
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700137 start_result = visitor.start;
138 stop_result = visitor.stop;
139 }
140
141 const size_t start_input;
142 const size_t stop_input;
143
144 std::vector<jvmtiFrameInfo> frames;
145 size_t start_result;
146 size_t stop_result;
147};
148
Andreas Gampea1a27c62017-01-11 16:37:16 -0800149static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
150 jint start_depth,
151 size_t start_result,
152 jint max_frame_count,
153 jvmtiFrameInfo* frame_buffer,
154 jint* count_ptr) {
155 size_t collected_frames = frames.size();
156
157 // Assume we're here having collected something.
158 DCHECK_GT(max_frame_count, 0);
159
160 // Frames from the top.
161 if (start_depth >= 0) {
162 if (start_result != 0) {
163 // Not enough frames.
164 return ERR(ILLEGAL_ARGUMENT);
165 }
166 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
167 if (frames.size() > 0) {
168 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
169 }
170 *count_ptr = static_cast<jint>(frames.size());
171 return ERR(NONE);
172 }
173
174 // Frames from the bottom.
175 if (collected_frames < static_cast<size_t>(-start_depth)) {
176 return ERR(ILLEGAL_ARGUMENT);
177 }
178
179 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
180 memcpy(frame_buffer,
181 &frames.data()[collected_frames + start_depth],
182 count * sizeof(jvmtiFrameInfo));
183 *count_ptr = static_cast<jint>(count);
184 return ERR(NONE);
185}
186
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700187struct GetStackTraceDirectClosure : public art::Closure {
188 public:
189 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
190 : frame_buffer(frame_buffer_),
191 start_input(start),
192 stop_input(stop),
193 index(0) {
194 DCHECK_GE(start_input, 0u);
195 }
196
197 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
198 auto frames_fn = [&](jvmtiFrameInfo info) {
199 frame_buffer[index] = info;
200 ++index;
201 };
202 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
203 visitor.WalkStack(/* include_transitions */ false);
204 }
205
206 jvmtiFrameInfo* frame_buffer;
207
208 const size_t start_input;
209 const size_t stop_input;
210
211 size_t index = 0;
212};
213
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700214jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
215 jthread java_thread,
216 jint start_depth,
217 jint max_frame_count,
218 jvmtiFrameInfo* frame_buffer,
219 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700220 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
221 // that the thread isn't dying on us.
222 art::ScopedObjectAccess soa(art::Thread::Current());
223 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
224
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700225 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700226 jvmtiError thread_error = ERR(INTERNAL);
227 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800228 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700229 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800230 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700231
232 art::ThreadState state = thread->GetState();
Alex Light7ddc23d2017-09-22 15:33:41 -0700233 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700234 return ERR(THREAD_NOT_ALIVE);
235 }
236
237 if (max_frame_count < 0) {
238 return ERR(ILLEGAL_ARGUMENT);
239 }
240 if (frame_buffer == nullptr || count_ptr == nullptr) {
241 return ERR(NULL_POINTER);
242 }
243
244 if (max_frame_count == 0) {
245 *count_ptr = 0;
246 return ERR(NONE);
247 }
248
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700249 if (start_depth >= 0) {
250 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
251 GetStackTraceDirectClosure closure(frame_buffer,
252 static_cast<size_t>(start_depth),
253 static_cast<size_t>(max_frame_count));
254 thread->RequestSynchronousCheckpoint(&closure);
255 *count_ptr = static_cast<jint>(closure.index);
256 if (closure.index < static_cast<size_t>(start_depth)) {
257 return ERR(ILLEGAL_ARGUMENT);
258 }
259 return ERR(NONE);
260 }
261
262 GetStackTraceVectorClosure closure(0, 0);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700263 thread->RequestSynchronousCheckpoint(&closure);
264
Andreas Gampea1a27c62017-01-11 16:37:16 -0800265 return TranslateFrameVector(closure.frames,
266 start_depth,
267 closure.start_result,
268 max_frame_count,
269 frame_buffer,
270 count_ptr);
271}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700272
Andreas Gampef1221a12017-06-21 21:20:47 -0700273template <typename Data>
274struct GetAllStackTracesVectorClosure : public art::Closure {
Andreas Gampe6237cd32017-06-22 22:17:38 -0700275 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
276 : barrier(0), stop_input(stop), data(data_) {}
Andreas Gampef1221a12017-06-21 21:20:47 -0700277
278 void Run(art::Thread* thread) OVERRIDE
279 REQUIRES_SHARED(art::Locks::mutator_lock_)
280 REQUIRES(!data->mutex) {
281 art::Thread* self = art::Thread::Current();
Andreas Gampe6237cd32017-06-22 22:17:38 -0700282 Work(thread, self);
283 barrier.Pass(self);
284 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700285
Andreas Gampe6237cd32017-06-22 22:17:38 -0700286 void Work(art::Thread* thread, art::Thread* self)
287 REQUIRES_SHARED(art::Locks::mutator_lock_)
288 REQUIRES(!data->mutex) {
Andreas Gampef1221a12017-06-21 21:20:47 -0700289 // Skip threads that are still starting.
290 if (thread->IsStillStarting()) {
291 return;
292 }
293
294 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
295 if (thread_frames == nullptr) {
296 return;
297 }
298
299 // Now collect the data.
300 auto frames_fn = [&](jvmtiFrameInfo info) {
301 thread_frames->push_back(info);
302 };
303 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
304 visitor.WalkStack(/* include_transitions */ false);
305 }
306
Andreas Gampe6237cd32017-06-22 22:17:38 -0700307 art::Barrier barrier;
Andreas Gampef1221a12017-06-21 21:20:47 -0700308 const size_t stop_input;
309 Data* data;
310};
311
Andreas Gampe6237cd32017-06-22 22:17:38 -0700312template <typename Data>
313static void RunCheckpointAndWait(Data* data, size_t max_frame_count) {
314 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
315 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
316 if (barrier_count == 0) {
317 return;
318 }
319 art::Thread* self = art::Thread::Current();
320 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
321 closure.barrier.Increment(self, barrier_count);
322}
323
Andreas Gampea1a27c62017-01-11 16:37:16 -0800324jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
325 jint max_frame_count,
326 jvmtiStackInfo** stack_info_ptr,
327 jint* thread_count_ptr) {
328 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700329 return ERR(ILLEGAL_ARGUMENT);
330 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800331 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
332 return ERR(NULL_POINTER);
333 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700334
Andreas Gampef1221a12017-06-21 21:20:47 -0700335 struct AllStackTracesData {
336 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
337 ~AllStackTracesData() {
338 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
339 for (jthread global_thread_ref : thread_peers) {
340 jni_env->DeleteGlobalRef(global_thread_ref);
341 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800342 }
343
Andreas Gampef1221a12017-06-21 21:20:47 -0700344 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
345 REQUIRES_SHARED(art::Locks::mutator_lock_)
346 REQUIRES(!mutex) {
347 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800348
349 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800350
Andreas Gampef1221a12017-06-21 21:20:47 -0700351 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
352 self, thread->GetPeerFromOtherThread());
353 thread_peers.push_back(peer);
354
Andreas Gampead9173d2017-06-22 16:33:08 -0700355 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
356 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700357 }
358
359 art::Mutex mutex;
360
361 // Storage. Only access directly after completion.
362
363 std::vector<art::Thread*> threads;
364 // "thread_peers" contains global references to their peers.
365 std::vector<jthread> thread_peers;
366
Andreas Gampead9173d2017-06-22 16:33:08 -0700367 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700368 };
369
370 AllStackTracesData data;
Andreas Gampe6237cd32017-06-22 22:17:38 -0700371 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampef1221a12017-06-21 21:20:47 -0700372
373 art::Thread* current = art::Thread::Current();
374
375 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800376
377 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
378 // allocate one big chunk for this and the actual frames, which means we need
379 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700380 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800381 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700382 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800383
384 // Now run through and add data for each thread.
385 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700386 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800387 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
388 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
389
Andreas Gampead9173d2017-06-22 16:33:08 -0700390 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800391
Andreas Gampef1221a12017-06-21 21:20:47 -0700392 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800393 stack_info.thread = nullptr;
394 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
395
396 size_t collected_frames = thread_frames.size();
397 if (max_frame_count == 0 || collected_frames == 0) {
398 stack_info.frame_count = 0;
399 stack_info.frame_buffer = nullptr;
400 continue;
401 }
402 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
403
404 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
405 frame_infos.emplace_back(frame_info);
406
407 jint count;
408 jvmtiError translate_result = TranslateFrameVector(thread_frames,
409 0,
410 0,
411 static_cast<jint>(collected_frames),
412 frame_info,
413 &count);
414 DCHECK(translate_result == JVMTI_ERROR_NONE);
415 stack_info.frame_count = static_cast<jint>(collected_frames);
416 stack_info.frame_buffer = frame_info;
417 sum_frames += static_cast<size_t>(count);
418 }
419
420 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700421 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800422 alignof(jvmtiFrameInfo));
423 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
424 unsigned char* chunk_data;
425 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
426 if (alloc_result != ERR(NONE)) {
427 return alloc_result;
428 }
429
430 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
431 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700432 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800433
434 // Now copy the frames and fix up the pointers.
435 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
436 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700437 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800438 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
439 jvmtiStackInfo& new_stack_info = stack_info[i];
440
Andreas Gampef1221a12017-06-21 21:20:47 -0700441 // Translate the global ref into a local ref.
442 new_stack_info.thread =
443 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800444
445 if (old_stack_info.frame_count > 0) {
446 // Only copy when there's data - leave the nullptr alone.
447 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
448 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
449 new_stack_info.frame_buffer = frame_info;
450 frame_info += old_stack_info.frame_count;
451 }
452 }
453
454 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700455 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800456
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700457 return ERR(NONE);
458}
459
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800460jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
461 jint thread_count,
462 const jthread* thread_list,
463 jint max_frame_count,
464 jvmtiStackInfo** stack_info_ptr) {
465 if (max_frame_count < 0) {
466 return ERR(ILLEGAL_ARGUMENT);
467 }
468 if (thread_count < 0) {
469 return ERR(ILLEGAL_ARGUMENT);
470 }
471 if (thread_count == 0) {
472 *stack_info_ptr = nullptr;
473 return ERR(NONE);
474 }
475 if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
476 return ERR(NULL_POINTER);
477 }
478
479 art::Thread* current = art::Thread::Current();
480 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
481
Andreas Gampef1221a12017-06-21 21:20:47 -0700482 struct SelectStackTracesData {
483 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
484
485 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
486 REQUIRES_SHARED(art::Locks::mutator_lock_)
487 REQUIRES(!mutex) {
488 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
489 for (size_t index = 0; index != handles.size(); ++index) {
490 if (peer == handles[index].Get()) {
491 // Found the thread.
492 art::MutexLock mu(self, mutex);
493
494 threads.push_back(thread);
495 thread_list_indices.push_back(index);
496
Andreas Gampead9173d2017-06-22 16:33:08 -0700497 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
498 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700499 }
500 }
501 return nullptr;
502 }
503
504 art::Mutex mutex;
505
506 // Selection data.
507
508 std::vector<art::Handle<art::mirror::Object>> handles;
509
510 // Storage. Only access directly after completion.
511
512 std::vector<art::Thread*> threads;
513 std::vector<size_t> thread_list_indices;
514
Andreas Gampead9173d2017-06-22 16:33:08 -0700515 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700516 };
517
518 SelectStackTracesData data;
519
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800520 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
521 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800522 for (jint i = 0; i != thread_count; ++i) {
523 if (thread_list[i] == nullptr) {
524 return ERR(INVALID_THREAD);
525 }
526 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
527 return ERR(INVALID_THREAD);
528 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700529 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800530 }
531
Andreas Gampe6237cd32017-06-22 22:17:38 -0700532 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800533
534 // Convert the data into our output format.
535
536 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
537 // allocate one big chunk for this and the actual frames, which means we need
538 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700539 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800540 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700541 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800542
543 // Now run through and add data for each thread.
544 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700545 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800546 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
547 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
548
Andreas Gampef1221a12017-06-21 21:20:47 -0700549 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700550 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800551
552 // For the time being, set the thread to null. We don't have good ScopedLocalRef
553 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000554 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800555 stack_info.thread = nullptr;
556 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
557
558 size_t collected_frames = thread_frames.size();
559 if (max_frame_count == 0 || collected_frames == 0) {
560 stack_info.frame_count = 0;
561 stack_info.frame_buffer = nullptr;
562 continue;
563 }
564 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
565
566 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
567 frame_infos.emplace_back(frame_info);
568
569 jint count;
570 jvmtiError translate_result = TranslateFrameVector(thread_frames,
571 0,
572 0,
573 static_cast<jint>(collected_frames),
574 frame_info,
575 &count);
576 DCHECK(translate_result == JVMTI_ERROR_NONE);
577 stack_info.frame_count = static_cast<jint>(collected_frames);
578 stack_info.frame_buffer = frame_info;
579 sum_frames += static_cast<size_t>(count);
580 }
581
582 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
583 // potentially.
584 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
585 alignof(jvmtiFrameInfo));
586 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
587 unsigned char* chunk_data;
588 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
589 if (alloc_result != ERR(NONE)) {
590 return alloc_result;
591 }
592
593 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
594 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
595 chunk_data + rounded_stack_info_size);
596
597 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
598 // Check whether we found a running thread for this.
599 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
600 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700601 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
602 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800603 // No native thread. Must be new or dead. We need to fill out the stack info now.
604 // (Need to read the Java "started" field to know whether this is starting or terminated.)
605 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
606 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
607 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
608 CHECK(started_field != nullptr);
609 bool started = started_field->GetBoolean(peer) != 0;
610 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
611 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
612 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
613 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
614 stack_info[i].state = started ? kTerminatedState : kStartedState;
615 stack_info[i].frame_count = 0;
616 stack_info[i].frame_buffer = nullptr;
617 } else {
618 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700619 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800620
621 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
622 jvmtiStackInfo& new_stack_info = stack_info[i];
623
624 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
625 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
626 if (old_stack_info.frame_count > 0) {
627 // Only copy when there's data - leave the nullptr alone.
628 size_t frames_size =
629 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
630 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
631 new_stack_info.frame_buffer = frame_info;
632 frame_info += old_stack_info.frame_count;
633 }
634 }
635 }
636
Andreas Gampef1221a12017-06-21 21:20:47 -0700637 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800638
639 return ERR(NONE);
640}
641
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800642// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
643// runtime methods and transitions must not be counted.
644struct GetFrameCountVisitor : public art::StackVisitor {
645 explicit GetFrameCountVisitor(art::Thread* thread)
646 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
647 count(0) {}
648
649 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
650 art::ArtMethod* m = GetMethod();
651 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
652 if (do_count) {
653 count++;
654 }
655 return true;
656 }
657
658 size_t count;
659};
660
661struct GetFrameCountClosure : public art::Closure {
662 public:
663 GetFrameCountClosure() : count(0) {}
664
665 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
666 GetFrameCountVisitor visitor(self);
667 visitor.WalkStack(false);
668
669 count = visitor.count;
670 }
671
672 size_t count;
673};
674
675jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
676 jthread java_thread,
677 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700678 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
679 // that the thread isn't dying on us.
680 art::ScopedObjectAccess soa(art::Thread::Current());
681 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
682
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800683 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700684 jvmtiError thread_error = ERR(INTERNAL);
685 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800686 return thread_error;
687 }
Alex Light7ddc23d2017-09-22 15:33:41 -0700688
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800689 DCHECK(thread != nullptr);
Alex Light7ddc23d2017-09-22 15:33:41 -0700690 art::ThreadState state = thread->GetState();
691 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
692 return ERR(THREAD_NOT_ALIVE);
693 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800694
695 if (count_ptr == nullptr) {
696 return ERR(NULL_POINTER);
697 }
698
699 GetFrameCountClosure closure;
Alex Light7ddc23d2017-09-22 15:33:41 -0700700 if (!thread->RequestSynchronousCheckpoint(&closure)) {
701 return ERR(THREAD_NOT_ALIVE);
702 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800703
704 *count_ptr = closure.count;
705 return ERR(NONE);
706}
707
708// Walks up the stack 'n' callers, when used with Thread::WalkStack.
709struct GetLocationVisitor : public art::StackVisitor {
710 GetLocationVisitor(art::Thread* thread, size_t n_in)
711 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
712 n(n_in),
713 count(0),
714 caller(nullptr),
715 caller_dex_pc(0) {}
716
717 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
718 art::ArtMethod* m = GetMethod();
719 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
720 if (do_count) {
721 DCHECK(caller == nullptr);
722 if (count == n) {
723 caller = m;
724 caller_dex_pc = GetDexPc(false);
725 return false;
726 }
727 count++;
728 }
729 return true;
730 }
731
732 const size_t n;
733 size_t count;
734 art::ArtMethod* caller;
735 uint32_t caller_dex_pc;
736};
737
738struct GetLocationClosure : public art::Closure {
739 public:
740 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
741
742 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
743 GetLocationVisitor visitor(self, n);
744 visitor.WalkStack(false);
745
746 method = visitor.caller;
747 dex_pc = visitor.caller_dex_pc;
748 }
749
750 const size_t n;
751 art::ArtMethod* method;
752 uint32_t dex_pc;
753};
754
755jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
756 jthread java_thread,
757 jint depth,
758 jmethodID* method_ptr,
759 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700760 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
761 // that the thread isn't dying on us.
762 art::ScopedObjectAccess soa(art::Thread::Current());
763 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
764
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800765 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700766 jvmtiError thread_error = ERR(INTERNAL);
767 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800768 return thread_error;
769 }
770 DCHECK(thread != nullptr);
771
Alex Light7ddc23d2017-09-22 15:33:41 -0700772 art::ThreadState state = thread->GetState();
773 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
774 return ERR(THREAD_NOT_ALIVE);
775 }
776
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800777 if (depth < 0) {
778 return ERR(ILLEGAL_ARGUMENT);
779 }
780 if (method_ptr == nullptr || location_ptr == nullptr) {
781 return ERR(NULL_POINTER);
782 }
783
784 GetLocationClosure closure(static_cast<size_t>(depth));
785 thread->RequestSynchronousCheckpoint(&closure);
786
787 if (closure.method == nullptr) {
788 return ERR(NO_MORE_FRAMES);
789 }
790
791 *method_ptr = art::jni::EncodeArtMethod(closure.method);
792 if (closure.method->IsNative()) {
793 *location_ptr = -1;
794 } else {
Andreas Gampee2abbc62017-09-15 11:59:26 -0700795 if (closure.dex_pc == art::dex::kDexNoIndex) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800796 return ERR(INTERNAL);
797 }
798 *location_ptr = static_cast<jlocation>(closure.dex_pc);
799 }
800
801 return ERR(NONE);
802}
803
Alex Light88e1ddd2017-08-21 13:09:55 -0700804struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
805 // We need a context because VisitLocks needs it retrieve the monitor objects.
806 explicit MonitorVisitor(art::Thread* thread)
807 REQUIRES_SHARED(art::Locks::mutator_lock_)
808 : art::StackVisitor(thread,
809 art::Context::Create(),
810 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
811 hs(art::Thread::Current()),
812 current_stack_depth(0) {}
813
814 ~MonitorVisitor() {
815 delete context_;
816 }
817
818 bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
819 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
820 if (!GetMethod()->IsRuntimeMethod()) {
821 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
822 ++current_stack_depth;
823 }
824 return true;
825 }
826
827 static void AppendOwnedMonitors(art::mirror::Object* owned_monitor, void* arg)
828 REQUIRES_SHARED(art::Locks::mutator_lock_) {
829 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
830 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
831 art::ObjPtr<art::mirror::Object> mon(owned_monitor);
832 // Filter out duplicates.
833 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
834 if (monitor.Get() == mon.Ptr()) {
835 return;
836 }
837 }
838 visitor->monitors.push_back(visitor->hs.NewHandle(mon));
839 visitor->stack_depths.push_back(visitor->current_stack_depth);
840 }
841
842 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
843 OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
844 for (const art::Handle<art::mirror::Object>& m : monitors) {
845 if (m.Get() == obj) {
846 return;
847 }
848 }
849 monitors.push_back(hs.NewHandle(obj));
850 stack_depths.push_back(-1);
851 }
852
853 art::VariableSizedHandleScope hs;
854 jint current_stack_depth;
855 std::vector<art::Handle<art::mirror::Object>> monitors;
856 std::vector<jint> stack_depths;
857};
858
859template<typename Fn>
860struct MonitorInfoClosure : public art::Closure {
861 public:
862 MonitorInfoClosure(art::ScopedObjectAccess& soa, Fn handle_results)
863 : soa_(soa), err_(OK), handle_results_(handle_results) {}
864
865 void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
866 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
867 // Find the monitors on the stack.
868 MonitorVisitor visitor(target);
869 visitor.WalkStack(/* include_transitions */ false);
870 // Find any other monitors, including ones acquired in native code.
871 art::RootInfo root_info(art::kRootVMInternal);
872 target->GetJniEnv()->monitors.VisitRoots(&visitor, root_info);
873 err_ = handle_results_(soa_, visitor);
874 }
875
876 jvmtiError GetError() {
877 return err_;
878 }
879
880 private:
881 art::ScopedObjectAccess& soa_;
882 jvmtiError err_;
883 Fn handle_results_;
884};
885
886
887template <typename Fn>
888static jvmtiError GetOwnedMonitorInfoCommon(jthread thread, Fn handle_results) {
889 art::Thread* self = art::Thread::Current();
890 art::ScopedObjectAccess soa(self);
891 MonitorInfoClosure<Fn> closure(soa, handle_results);
892 bool called_method = false;
893 {
894 art::MutexLock mu(self, *art::Locks::thread_list_lock_);
Alex Light7ddc23d2017-09-22 15:33:41 -0700895 art::Thread* target = nullptr;
896 jvmtiError err = ERR(INTERNAL);
897 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
898 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700899 }
900 if (target != self) {
901 called_method = true;
902 if (!target->RequestSynchronousCheckpoint(&closure)) {
903 return ERR(THREAD_NOT_ALIVE);
904 }
905 }
906 }
907 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
908 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
909 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
910 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
911 if (!called_method) {
912 closure.Run(self);
913 }
914 return closure.GetError();
915}
916
917jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
918 jthread thread,
919 jint* info_cnt,
920 jvmtiMonitorStackDepthInfo** info_ptr) {
921 if (info_cnt == nullptr || info_ptr == nullptr) {
922 return ERR(NULL_POINTER);
923 }
924 auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor)
925 REQUIRES_SHARED(art::Locks::mutator_lock_) {
926 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * visitor.monitors.size();
927 jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
928 if (err != OK) {
929 return err;
930 }
931 *info_cnt = visitor.monitors.size();
932 for (size_t i = 0; i < visitor.monitors.size(); i++) {
933 (*info_ptr)[i] = {
934 soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get()),
935 visitor.stack_depths[i]
936 };
937 }
938 return OK;
939 };
940 return GetOwnedMonitorInfoCommon(thread, handle_fun);
941}
942
943jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
944 jthread thread,
945 jint* owned_monitor_count_ptr,
946 jobject** owned_monitors_ptr) {
947 if (owned_monitors_ptr == nullptr || owned_monitors_ptr == nullptr) {
948 return ERR(NULL_POINTER);
949 }
950 auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor)
951 REQUIRES_SHARED(art::Locks::mutator_lock_) {
952 auto nbytes = sizeof(jobject) * visitor.monitors.size();
953 jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
954 if (err != OK) {
955 return err;
956 }
957 *owned_monitor_count_ptr = visitor.monitors.size();
958 for (size_t i = 0; i < visitor.monitors.size(); i++) {
959 (*owned_monitors_ptr)[i] =
960 soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get());
961 }
962 return OK;
963 };
964 return GetOwnedMonitorInfoCommon(thread, handle_fun);
965}
966
Alex Lighte814f9d2017-07-31 16:14:39 -0700967jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
968 if (depth < 0) {
969 return ERR(ILLEGAL_ARGUMENT);
970 }
971 ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
972 art::Thread* self = art::Thread::Current();
973 art::Thread* target;
974 do {
975 ThreadUtil::SuspendCheck(self);
976 art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
977 // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
978 // user-code suspension. We retry and do another SuspendCheck to clear this.
979 if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
980 continue;
981 }
982 // From now on we know we cannot get suspended by user-code.
983 // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
984 // have the 'suspend_lock' locked here.
985 art::ScopedObjectAccess soa(self);
986 art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
Alex Light7ddc23d2017-09-22 15:33:41 -0700987 jvmtiError err = ERR(INTERNAL);
988 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
989 return err;
990 }
991 if (target != self) {
Alex Lighte814f9d2017-07-31 16:14:39 -0700992 // TODO This is part of the spec but we could easily avoid needing to do it. We would just put
993 // all the logic into a sync-checkpoint.
994 art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
995 if (target->GetUserCodeSuspendCount() == 0) {
996 return ERR(THREAD_NOT_SUSPENDED);
997 }
998 }
999 // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1000 // done (unless it's 'self' in which case we don't care since we aren't going to be returning).
1001 // TODO We could implement this using a synchronous checkpoint and not bother with any of the
1002 // suspension stuff. The spec does specifically say to return THREAD_NOT_SUSPENDED though.
1003 // Find the requested stack frame.
1004 std::unique_ptr<art::Context> context(art::Context::Create());
1005 FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1006 visitor.WalkStack();
1007 if (!visitor.FoundFrame()) {
1008 return ERR(NO_MORE_FRAMES);
1009 }
1010 art::ArtMethod* method = visitor.GetMethod();
1011 if (method->IsNative()) {
1012 return ERR(OPAQUE_FRAME);
1013 }
1014 // From here we are sure to succeed.
1015 bool needs_instrument = false;
1016 // Get/create a shadow frame
1017 art::ShadowFrame* shadow_frame = visitor.GetCurrentShadowFrame();
1018 if (shadow_frame == nullptr) {
1019 needs_instrument = true;
1020 const size_t frame_id = visitor.GetFrameId();
1021 const uint16_t num_regs = method->GetCodeItem()->registers_size_;
1022 shadow_frame = target->FindOrCreateDebuggerShadowFrame(frame_id,
1023 num_regs,
1024 method,
1025 visitor.GetDexPc());
1026 }
1027 // Mark shadow frame as needs_notify_pop_
1028 shadow_frame->SetNotifyPop(true);
1029 tienv->notify_frames.insert(shadow_frame);
1030 // Make sure can we will go to the interpreter and use the shadow frames.
1031 if (needs_instrument) {
1032 art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
1033 }
1034 return OK;
1035 } while (true);
1036}
1037
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001038} // namespace openjdkjvmti