blob: a17226c55a12e97ba17a7212e24f0bb50dff9a7a [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampea1d2f952017-04-20 22:53:58 -070039#include "art_field-inl.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070040#include "art_method-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070041#include "art_jvmti.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080042#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070043#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080044#include "base/mutex.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070045#include "dex_file.h"
46#include "dex_file_annotations.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080047#include "handle_scope-inl.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070048#include "jni_env_ext.h"
Andreas Gampe13b27842016-11-07 16:48:23 -080049#include "jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070050#include "mirror/class.h"
51#include "mirror/dex_cache.h"
52#include "scoped_thread_state_change-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080053#include "ScopedLocalRef.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070054#include "stack.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070055#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080056#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070057#include "thread_pool.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070058#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070059
60namespace openjdkjvmti {
61
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070062template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070063struct GetStackTraceVisitor : public art::StackVisitor {
64 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070065 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070066 size_t stop_,
67 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070068 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070069 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070070 start(start_),
71 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070072 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
73 GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070074
75 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
76 art::ArtMethod* m = GetMethod();
77 if (m->IsRuntimeMethod()) {
78 return true;
79 }
80
81 if (start == 0) {
82 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080083 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070084
Andreas Gampe2340e3f2016-12-12 19:37:19 -080085 uint32_t dex_pc = GetDexPc(false);
86 jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070087
Andreas Gampe2340e3f2016-12-12 19:37:19 -080088 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070089 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070090
91 if (stop == 1) {
92 return false; // We're done.
93 } else if (stop > 0) {
94 stop--;
95 }
96 } else {
97 start--;
98 }
99
100 return true;
101 }
102
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700103 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700104 size_t start;
105 size_t stop;
106};
107
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700108template <typename FrameFn>
109GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
110 size_t start,
111 size_t stop,
112 FrameFn fn) {
113 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
114}
115
116struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700117 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700118 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700119 : start_input(start),
120 stop_input(stop),
121 start_result(0),
122 stop_result(0) {}
123
Andreas Gampea1a27c62017-01-11 16:37:16 -0800124 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700125 auto frames_fn = [&](jvmtiFrameInfo info) {
126 frames.push_back(info);
127 };
128 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
129 visitor.WalkStack(/* include_transitions */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700130
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700131 start_result = visitor.start;
132 stop_result = visitor.stop;
133 }
134
135 const size_t start_input;
136 const size_t stop_input;
137
138 std::vector<jvmtiFrameInfo> frames;
139 size_t start_result;
140 size_t stop_result;
141};
142
Andreas Gampea1a27c62017-01-11 16:37:16 -0800143static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
144 jint start_depth,
145 size_t start_result,
146 jint max_frame_count,
147 jvmtiFrameInfo* frame_buffer,
148 jint* count_ptr) {
149 size_t collected_frames = frames.size();
150
151 // Assume we're here having collected something.
152 DCHECK_GT(max_frame_count, 0);
153
154 // Frames from the top.
155 if (start_depth >= 0) {
156 if (start_result != 0) {
157 // Not enough frames.
158 return ERR(ILLEGAL_ARGUMENT);
159 }
160 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
161 if (frames.size() > 0) {
162 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
163 }
164 *count_ptr = static_cast<jint>(frames.size());
165 return ERR(NONE);
166 }
167
168 // Frames from the bottom.
169 if (collected_frames < static_cast<size_t>(-start_depth)) {
170 return ERR(ILLEGAL_ARGUMENT);
171 }
172
173 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
174 memcpy(frame_buffer,
175 &frames.data()[collected_frames + start_depth],
176 count * sizeof(jvmtiFrameInfo));
177 *count_ptr = static_cast<jint>(count);
178 return ERR(NONE);
179}
180
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700181struct GetStackTraceDirectClosure : public art::Closure {
182 public:
183 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
184 : frame_buffer(frame_buffer_),
185 start_input(start),
186 stop_input(stop),
187 index(0) {
188 DCHECK_GE(start_input, 0u);
189 }
190
191 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
192 auto frames_fn = [&](jvmtiFrameInfo info) {
193 frame_buffer[index] = info;
194 ++index;
195 };
196 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
197 visitor.WalkStack(/* include_transitions */ false);
198 }
199
200 jvmtiFrameInfo* frame_buffer;
201
202 const size_t start_input;
203 const size_t stop_input;
204
205 size_t index = 0;
206};
207
Andreas Gampe28c4a232017-06-21 21:21:31 -0700208static jvmtiError GetThread(JNIEnv* env,
209 art::ScopedObjectAccessAlreadyRunnable& soa,
210 jthread java_thread,
211 art::Thread** thread)
212 REQUIRES_SHARED(art::Locks::mutator_lock_) // Needed for FromManagedThread.
213 REQUIRES(art::Locks::thread_list_lock_) { // Needed for FromManagedThread.
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800214 if (java_thread == nullptr) {
215 *thread = art::Thread::Current();
216 if (*thread == nullptr) {
217 // GetStackTrace can only be run during the live phase, so the current thread should be
218 // attached and thus available. Getting a null for current means we're starting up or
219 // dying.
220 return ERR(WRONG_PHASE);
221 }
222 } else {
223 if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) {
224 return ERR(INVALID_THREAD);
225 }
226
227 // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800228 *thread = art::Thread::FromManagedThread(soa, java_thread);
229 if (*thread == nullptr) {
230 return ERR(THREAD_NOT_ALIVE);
231 }
232 }
233 return ERR(NONE);
234}
235
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700236jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
237 jthread java_thread,
238 jint start_depth,
239 jint max_frame_count,
240 jvmtiFrameInfo* frame_buffer,
241 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700242 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
243 // that the thread isn't dying on us.
244 art::ScopedObjectAccess soa(art::Thread::Current());
245 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
246
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700247 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700248 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
249 soa,
250 java_thread,
251 &thread);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800252 if (thread_error != ERR(NONE)) {
253 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700254 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800255 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700256
257 art::ThreadState state = thread->GetState();
258 if (state == art::ThreadState::kStarting ||
259 state == art::ThreadState::kTerminated ||
260 thread->IsStillStarting()) {
261 return ERR(THREAD_NOT_ALIVE);
262 }
263
264 if (max_frame_count < 0) {
265 return ERR(ILLEGAL_ARGUMENT);
266 }
267 if (frame_buffer == nullptr || count_ptr == nullptr) {
268 return ERR(NULL_POINTER);
269 }
270
271 if (max_frame_count == 0) {
272 *count_ptr = 0;
273 return ERR(NONE);
274 }
275
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700276 if (start_depth >= 0) {
277 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
278 GetStackTraceDirectClosure closure(frame_buffer,
279 static_cast<size_t>(start_depth),
280 static_cast<size_t>(max_frame_count));
281 thread->RequestSynchronousCheckpoint(&closure);
282 *count_ptr = static_cast<jint>(closure.index);
283 if (closure.index < static_cast<size_t>(start_depth)) {
284 return ERR(ILLEGAL_ARGUMENT);
285 }
286 return ERR(NONE);
287 }
288
289 GetStackTraceVectorClosure closure(0, 0);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700290 thread->RequestSynchronousCheckpoint(&closure);
291
Andreas Gampea1a27c62017-01-11 16:37:16 -0800292 return TranslateFrameVector(closure.frames,
293 start_depth,
294 closure.start_result,
295 max_frame_count,
296 frame_buffer,
297 count_ptr);
298}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700299
Andreas Gampef1221a12017-06-21 21:20:47 -0700300template <typename Data>
301struct GetAllStackTracesVectorClosure : public art::Closure {
302 GetAllStackTracesVectorClosure(size_t stop, Data* data_) : stop_input(stop), data(data_) {}
303
304 void Run(art::Thread* thread) OVERRIDE
305 REQUIRES_SHARED(art::Locks::mutator_lock_)
306 REQUIRES(!data->mutex) {
307 art::Thread* self = art::Thread::Current();
308
309 // Skip threads that are still starting.
310 if (thread->IsStillStarting()) {
311 return;
312 }
313
314 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
315 if (thread_frames == nullptr) {
316 return;
317 }
318
319 // Now collect the data.
320 auto frames_fn = [&](jvmtiFrameInfo info) {
321 thread_frames->push_back(info);
322 };
323 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
324 visitor.WalkStack(/* include_transitions */ false);
325 }
326
327 const size_t stop_input;
328 Data* data;
329};
330
Andreas Gampea1a27c62017-01-11 16:37:16 -0800331jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
332 jint max_frame_count,
333 jvmtiStackInfo** stack_info_ptr,
334 jint* thread_count_ptr) {
335 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700336 return ERR(ILLEGAL_ARGUMENT);
337 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800338 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
339 return ERR(NULL_POINTER);
340 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700341
Andreas Gampef1221a12017-06-21 21:20:47 -0700342 struct AllStackTracesData {
343 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
344 ~AllStackTracesData() {
345 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
346 for (jthread global_thread_ref : thread_peers) {
347 jni_env->DeleteGlobalRef(global_thread_ref);
348 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800349 }
350
Andreas Gampef1221a12017-06-21 21:20:47 -0700351 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
352 REQUIRES_SHARED(art::Locks::mutator_lock_)
353 REQUIRES(!mutex) {
354 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800355
356 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800357
Andreas Gampef1221a12017-06-21 21:20:47 -0700358 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
359 self, thread->GetPeerFromOtherThread());
360 thread_peers.push_back(peer);
361
Andreas Gampead9173d2017-06-22 16:33:08 -0700362 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
363 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700364 }
365
366 art::Mutex mutex;
367
368 // Storage. Only access directly after completion.
369
370 std::vector<art::Thread*> threads;
371 // "thread_peers" contains global references to their peers.
372 std::vector<jthread> thread_peers;
373
Andreas Gampead9173d2017-06-22 16:33:08 -0700374 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700375 };
376
377 AllStackTracesData data;
378 GetAllStackTracesVectorClosure<AllStackTracesData> closure(
379 static_cast<size_t>(max_frame_count), &data);
380 art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
381
382 art::Thread* current = art::Thread::Current();
383
384 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800385
386 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
387 // allocate one big chunk for this and the actual frames, which means we need
388 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700389 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800390 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700391 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800392
393 // Now run through and add data for each thread.
394 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700395 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800396 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
397 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
398
Andreas Gampead9173d2017-06-22 16:33:08 -0700399 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800400
Andreas Gampef1221a12017-06-21 21:20:47 -0700401 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800402 stack_info.thread = nullptr;
403 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
404
405 size_t collected_frames = thread_frames.size();
406 if (max_frame_count == 0 || collected_frames == 0) {
407 stack_info.frame_count = 0;
408 stack_info.frame_buffer = nullptr;
409 continue;
410 }
411 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
412
413 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
414 frame_infos.emplace_back(frame_info);
415
416 jint count;
417 jvmtiError translate_result = TranslateFrameVector(thread_frames,
418 0,
419 0,
420 static_cast<jint>(collected_frames),
421 frame_info,
422 &count);
423 DCHECK(translate_result == JVMTI_ERROR_NONE);
424 stack_info.frame_count = static_cast<jint>(collected_frames);
425 stack_info.frame_buffer = frame_info;
426 sum_frames += static_cast<size_t>(count);
427 }
428
429 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700430 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800431 alignof(jvmtiFrameInfo));
432 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
433 unsigned char* chunk_data;
434 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
435 if (alloc_result != ERR(NONE)) {
436 return alloc_result;
437 }
438
439 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
440 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700441 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800442
443 // Now copy the frames and fix up the pointers.
444 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
445 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700446 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800447 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
448 jvmtiStackInfo& new_stack_info = stack_info[i];
449
Andreas Gampef1221a12017-06-21 21:20:47 -0700450 // Translate the global ref into a local ref.
451 new_stack_info.thread =
452 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800453
454 if (old_stack_info.frame_count > 0) {
455 // Only copy when there's data - leave the nullptr alone.
456 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
457 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
458 new_stack_info.frame_buffer = frame_info;
459 frame_info += old_stack_info.frame_count;
460 }
461 }
462
463 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700464 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800465
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700466 return ERR(NONE);
467}
468
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800469jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
470 jint thread_count,
471 const jthread* thread_list,
472 jint max_frame_count,
473 jvmtiStackInfo** stack_info_ptr) {
474 if (max_frame_count < 0) {
475 return ERR(ILLEGAL_ARGUMENT);
476 }
477 if (thread_count < 0) {
478 return ERR(ILLEGAL_ARGUMENT);
479 }
480 if (thread_count == 0) {
481 *stack_info_ptr = nullptr;
482 return ERR(NONE);
483 }
484 if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
485 return ERR(NULL_POINTER);
486 }
487
488 art::Thread* current = art::Thread::Current();
489 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
490
Andreas Gampef1221a12017-06-21 21:20:47 -0700491 struct SelectStackTracesData {
492 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
493
494 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
495 REQUIRES_SHARED(art::Locks::mutator_lock_)
496 REQUIRES(!mutex) {
497 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
498 for (size_t index = 0; index != handles.size(); ++index) {
499 if (peer == handles[index].Get()) {
500 // Found the thread.
501 art::MutexLock mu(self, mutex);
502
503 threads.push_back(thread);
504 thread_list_indices.push_back(index);
505
Andreas Gampead9173d2017-06-22 16:33:08 -0700506 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
507 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700508 }
509 }
510 return nullptr;
511 }
512
513 art::Mutex mutex;
514
515 // Selection data.
516
517 std::vector<art::Handle<art::mirror::Object>> handles;
518
519 // Storage. Only access directly after completion.
520
521 std::vector<art::Thread*> threads;
522 std::vector<size_t> thread_list_indices;
523
Andreas Gampead9173d2017-06-22 16:33:08 -0700524 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700525 };
526
527 SelectStackTracesData data;
528
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800529 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
530 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800531 for (jint i = 0; i != thread_count; ++i) {
532 if (thread_list[i] == nullptr) {
533 return ERR(INVALID_THREAD);
534 }
535 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
536 return ERR(INVALID_THREAD);
537 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700538 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800539 }
540
Andreas Gampef1221a12017-06-21 21:20:47 -0700541 GetAllStackTracesVectorClosure<SelectStackTracesData> closure(
542 static_cast<size_t>(max_frame_count), &data);
543 art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800544
545 // Convert the data into our output format.
546
547 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
548 // allocate one big chunk for this and the actual frames, which means we need
549 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700550 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800551 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700552 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800553
554 // Now run through and add data for each thread.
555 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700556 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800557 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
558 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
559
Andreas Gampef1221a12017-06-21 21:20:47 -0700560 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700561 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800562
563 // For the time being, set the thread to null. We don't have good ScopedLocalRef
564 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000565 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800566 stack_info.thread = nullptr;
567 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
568
569 size_t collected_frames = thread_frames.size();
570 if (max_frame_count == 0 || collected_frames == 0) {
571 stack_info.frame_count = 0;
572 stack_info.frame_buffer = nullptr;
573 continue;
574 }
575 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
576
577 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
578 frame_infos.emplace_back(frame_info);
579
580 jint count;
581 jvmtiError translate_result = TranslateFrameVector(thread_frames,
582 0,
583 0,
584 static_cast<jint>(collected_frames),
585 frame_info,
586 &count);
587 DCHECK(translate_result == JVMTI_ERROR_NONE);
588 stack_info.frame_count = static_cast<jint>(collected_frames);
589 stack_info.frame_buffer = frame_info;
590 sum_frames += static_cast<size_t>(count);
591 }
592
593 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
594 // potentially.
595 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
596 alignof(jvmtiFrameInfo));
597 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
598 unsigned char* chunk_data;
599 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
600 if (alloc_result != ERR(NONE)) {
601 return alloc_result;
602 }
603
604 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
605 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
606 chunk_data + rounded_stack_info_size);
607
608 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
609 // Check whether we found a running thread for this.
610 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
611 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700612 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
613 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800614 // No native thread. Must be new or dead. We need to fill out the stack info now.
615 // (Need to read the Java "started" field to know whether this is starting or terminated.)
616 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
617 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
618 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
619 CHECK(started_field != nullptr);
620 bool started = started_field->GetBoolean(peer) != 0;
621 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
622 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
623 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
624 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
625 stack_info[i].state = started ? kTerminatedState : kStartedState;
626 stack_info[i].frame_count = 0;
627 stack_info[i].frame_buffer = nullptr;
628 } else {
629 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700630 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800631
632 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
633 jvmtiStackInfo& new_stack_info = stack_info[i];
634
635 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
636 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
637 if (old_stack_info.frame_count > 0) {
638 // Only copy when there's data - leave the nullptr alone.
639 size_t frames_size =
640 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
641 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
642 new_stack_info.frame_buffer = frame_info;
643 frame_info += old_stack_info.frame_count;
644 }
645 }
646 }
647
Andreas Gampef1221a12017-06-21 21:20:47 -0700648 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800649
650 return ERR(NONE);
651}
652
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800653// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
654// runtime methods and transitions must not be counted.
655struct GetFrameCountVisitor : public art::StackVisitor {
656 explicit GetFrameCountVisitor(art::Thread* thread)
657 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
658 count(0) {}
659
660 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
661 art::ArtMethod* m = GetMethod();
662 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
663 if (do_count) {
664 count++;
665 }
666 return true;
667 }
668
669 size_t count;
670};
671
672struct GetFrameCountClosure : public art::Closure {
673 public:
674 GetFrameCountClosure() : count(0) {}
675
676 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
677 GetFrameCountVisitor visitor(self);
678 visitor.WalkStack(false);
679
680 count = visitor.count;
681 }
682
683 size_t count;
684};
685
686jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
687 jthread java_thread,
688 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700689 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
690 // that the thread isn't dying on us.
691 art::ScopedObjectAccess soa(art::Thread::Current());
692 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
693
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800694 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700695 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
696 soa,
697 java_thread,
698 &thread);
699
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800700 if (thread_error != ERR(NONE)) {
701 return thread_error;
702 }
703 DCHECK(thread != nullptr);
704
705 if (count_ptr == nullptr) {
706 return ERR(NULL_POINTER);
707 }
708
709 GetFrameCountClosure closure;
710 thread->RequestSynchronousCheckpoint(&closure);
711
712 *count_ptr = closure.count;
713 return ERR(NONE);
714}
715
716// Walks up the stack 'n' callers, when used with Thread::WalkStack.
717struct GetLocationVisitor : public art::StackVisitor {
718 GetLocationVisitor(art::Thread* thread, size_t n_in)
719 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
720 n(n_in),
721 count(0),
722 caller(nullptr),
723 caller_dex_pc(0) {}
724
725 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
726 art::ArtMethod* m = GetMethod();
727 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
728 if (do_count) {
729 DCHECK(caller == nullptr);
730 if (count == n) {
731 caller = m;
732 caller_dex_pc = GetDexPc(false);
733 return false;
734 }
735 count++;
736 }
737 return true;
738 }
739
740 const size_t n;
741 size_t count;
742 art::ArtMethod* caller;
743 uint32_t caller_dex_pc;
744};
745
746struct GetLocationClosure : public art::Closure {
747 public:
748 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
749
750 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
751 GetLocationVisitor visitor(self, n);
752 visitor.WalkStack(false);
753
754 method = visitor.caller;
755 dex_pc = visitor.caller_dex_pc;
756 }
757
758 const size_t n;
759 art::ArtMethod* method;
760 uint32_t dex_pc;
761};
762
763jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
764 jthread java_thread,
765 jint depth,
766 jmethodID* method_ptr,
767 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700768 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
769 // that the thread isn't dying on us.
770 art::ScopedObjectAccess soa(art::Thread::Current());
771 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
772
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800773 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700774 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
775 soa,
776 java_thread,
777 &thread);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800778 if (thread_error != ERR(NONE)) {
779 return thread_error;
780 }
781 DCHECK(thread != nullptr);
782
783 if (depth < 0) {
784 return ERR(ILLEGAL_ARGUMENT);
785 }
786 if (method_ptr == nullptr || location_ptr == nullptr) {
787 return ERR(NULL_POINTER);
788 }
789
790 GetLocationClosure closure(static_cast<size_t>(depth));
791 thread->RequestSynchronousCheckpoint(&closure);
792
793 if (closure.method == nullptr) {
794 return ERR(NO_MORE_FRAMES);
795 }
796
797 *method_ptr = art::jni::EncodeArtMethod(closure.method);
798 if (closure.method->IsNative()) {
799 *location_ptr = -1;
800 } else {
801 if (closure.dex_pc == art::DexFile::kDexNoIndex) {
802 return ERR(INTERNAL);
803 }
804 *location_ptr = static_cast<jlocation>(closure.dex_pc);
805 }
806
807 return ERR(NONE);
808}
809
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700810} // namespace openjdkjvmti