blob: 550b97272d2c01045e21d3f2ad0e860a14724072 [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampea1d2f952017-04-20 22:53:58 -070039#include "art_field-inl.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070040#include "art_method-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070041#include "art_jvmti.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080042#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070043#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080044#include "base/mutex.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070045#include "dex_file.h"
46#include "dex_file_annotations.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080047#include "handle_scope-inl.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070048#include "jni_env_ext.h"
Andreas Gampe13b27842016-11-07 16:48:23 -080049#include "jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070050#include "mirror/class.h"
51#include "mirror/dex_cache.h"
52#include "scoped_thread_state_change-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080053#include "ScopedLocalRef.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070054#include "stack.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070055#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080056#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070057#include "thread_pool.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070058#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070059
60namespace openjdkjvmti {
61
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070062template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070063struct GetStackTraceVisitor : public art::StackVisitor {
64 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070065 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070066 size_t stop_,
67 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070068 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070069 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070070 start(start_),
71 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070072 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
73 GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070074
75 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
76 art::ArtMethod* m = GetMethod();
77 if (m->IsRuntimeMethod()) {
78 return true;
79 }
80
81 if (start == 0) {
82 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080083 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070084
Andreas Gampe2340e3f2016-12-12 19:37:19 -080085 uint32_t dex_pc = GetDexPc(false);
86 jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070087
Andreas Gampe2340e3f2016-12-12 19:37:19 -080088 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070089 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070090
91 if (stop == 1) {
92 return false; // We're done.
93 } else if (stop > 0) {
94 stop--;
95 }
96 } else {
97 start--;
98 }
99
100 return true;
101 }
102
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700103 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700104 size_t start;
105 size_t stop;
106};
107
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700108template <typename FrameFn>
109GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
110 size_t start,
111 size_t stop,
112 FrameFn fn) {
113 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
114}
115
116struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700117 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700118 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700119 : start_input(start),
120 stop_input(stop),
121 start_result(0),
122 stop_result(0) {}
123
Andreas Gampea1a27c62017-01-11 16:37:16 -0800124 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700125 auto frames_fn = [&](jvmtiFrameInfo info) {
126 frames.push_back(info);
127 };
128 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
129 visitor.WalkStack(/* include_transitions */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700130
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700131 start_result = visitor.start;
132 stop_result = visitor.stop;
133 }
134
135 const size_t start_input;
136 const size_t stop_input;
137
138 std::vector<jvmtiFrameInfo> frames;
139 size_t start_result;
140 size_t stop_result;
141};
142
Andreas Gampea1a27c62017-01-11 16:37:16 -0800143static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
144 jint start_depth,
145 size_t start_result,
146 jint max_frame_count,
147 jvmtiFrameInfo* frame_buffer,
148 jint* count_ptr) {
149 size_t collected_frames = frames.size();
150
151 // Assume we're here having collected something.
152 DCHECK_GT(max_frame_count, 0);
153
154 // Frames from the top.
155 if (start_depth >= 0) {
156 if (start_result != 0) {
157 // Not enough frames.
158 return ERR(ILLEGAL_ARGUMENT);
159 }
160 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
161 if (frames.size() > 0) {
162 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
163 }
164 *count_ptr = static_cast<jint>(frames.size());
165 return ERR(NONE);
166 }
167
168 // Frames from the bottom.
169 if (collected_frames < static_cast<size_t>(-start_depth)) {
170 return ERR(ILLEGAL_ARGUMENT);
171 }
172
173 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
174 memcpy(frame_buffer,
175 &frames.data()[collected_frames + start_depth],
176 count * sizeof(jvmtiFrameInfo));
177 *count_ptr = static_cast<jint>(count);
178 return ERR(NONE);
179}
180
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700181struct GetStackTraceDirectClosure : public art::Closure {
182 public:
183 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
184 : frame_buffer(frame_buffer_),
185 start_input(start),
186 stop_input(stop),
187 index(0) {
188 DCHECK_GE(start_input, 0u);
189 }
190
191 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
192 auto frames_fn = [&](jvmtiFrameInfo info) {
193 frame_buffer[index] = info;
194 ++index;
195 };
196 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
197 visitor.WalkStack(/* include_transitions */ false);
198 }
199
200 jvmtiFrameInfo* frame_buffer;
201
202 const size_t start_input;
203 const size_t stop_input;
204
205 size_t index = 0;
206};
207
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800208static jvmtiError GetThread(JNIEnv* env, jthread java_thread, art::Thread** thread) {
209 if (java_thread == nullptr) {
210 *thread = art::Thread::Current();
211 if (*thread == nullptr) {
212 // GetStackTrace can only be run during the live phase, so the current thread should be
213 // attached and thus available. Getting a null for current means we're starting up or
214 // dying.
215 return ERR(WRONG_PHASE);
216 }
217 } else {
218 if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) {
219 return ERR(INVALID_THREAD);
220 }
221
222 // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
223 art::ScopedObjectAccess soa(art::Thread::Current());
224 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
225 *thread = art::Thread::FromManagedThread(soa, java_thread);
226 if (*thread == nullptr) {
227 return ERR(THREAD_NOT_ALIVE);
228 }
229 }
230 return ERR(NONE);
231}
232
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700233jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
234 jthread java_thread,
235 jint start_depth,
236 jint max_frame_count,
237 jvmtiFrameInfo* frame_buffer,
238 jint* count_ptr) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700239 art::Thread* thread;
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800240 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
241 if (thread_error != ERR(NONE)) {
242 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700243 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800244 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700245
246 art::ThreadState state = thread->GetState();
247 if (state == art::ThreadState::kStarting ||
248 state == art::ThreadState::kTerminated ||
249 thread->IsStillStarting()) {
250 return ERR(THREAD_NOT_ALIVE);
251 }
252
253 if (max_frame_count < 0) {
254 return ERR(ILLEGAL_ARGUMENT);
255 }
256 if (frame_buffer == nullptr || count_ptr == nullptr) {
257 return ERR(NULL_POINTER);
258 }
259
260 if (max_frame_count == 0) {
261 *count_ptr = 0;
262 return ERR(NONE);
263 }
264
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700265 if (start_depth >= 0) {
266 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
267 GetStackTraceDirectClosure closure(frame_buffer,
268 static_cast<size_t>(start_depth),
269 static_cast<size_t>(max_frame_count));
270 thread->RequestSynchronousCheckpoint(&closure);
271 *count_ptr = static_cast<jint>(closure.index);
272 if (closure.index < static_cast<size_t>(start_depth)) {
273 return ERR(ILLEGAL_ARGUMENT);
274 }
275 return ERR(NONE);
276 }
277
278 GetStackTraceVectorClosure closure(0, 0);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700279 thread->RequestSynchronousCheckpoint(&closure);
280
Andreas Gampea1a27c62017-01-11 16:37:16 -0800281 return TranslateFrameVector(closure.frames,
282 start_depth,
283 closure.start_result,
284 max_frame_count,
285 frame_buffer,
286 count_ptr);
287}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700288
Andreas Gampea1a27c62017-01-11 16:37:16 -0800289jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
290 jint max_frame_count,
291 jvmtiStackInfo** stack_info_ptr,
292 jint* thread_count_ptr) {
293 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700294 return ERR(ILLEGAL_ARGUMENT);
295 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800296 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
297 return ERR(NULL_POINTER);
298 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700299
Andreas Gampea1a27c62017-01-11 16:37:16 -0800300
301 art::Thread* current = art::Thread::Current();
302 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
303 art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
304 art::ScopedSuspendAll ssa("GetAllStackTraces");
305
306 std::vector<art::Thread*> threads;
307 std::vector<std::vector<jvmtiFrameInfo>> frames;
308 {
309 std::list<art::Thread*> thread_list;
310 {
311 art::MutexLock mu(current, *art::Locks::thread_list_lock_);
312 thread_list = art::Runtime::Current()->GetThreadList()->GetList();
313 }
314
315 for (art::Thread* thread : thread_list) {
Andreas Gampe984efb52017-01-12 17:43:13 -0800316 // Skip threads that are still starting.
317 if (thread->IsStillStarting()) {
318 continue;
319 }
320
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700321 GetStackTraceVectorClosure closure(0u, static_cast<size_t>(max_frame_count));
Andreas Gampea1a27c62017-01-11 16:37:16 -0800322 thread->RequestSynchronousCheckpoint(&closure);
323
324 threads.push_back(thread);
325 frames.emplace_back();
326 frames.back().swap(closure.frames);
327 }
328 }
329
330 // Convert the data into our output format. Note: we need to keep the threads suspended,
331 // as we need to access them for their peers.
332
333 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
334 // allocate one big chunk for this and the actual frames, which means we need
335 // to either be conservative or rearrange things later (the latter is implemented).
336 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
337 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
338 frame_infos.reserve(frames.size());
339
340 // Now run through and add data for each thread.
341 size_t sum_frames = 0;
342 for (size_t index = 0; index < frames.size(); ++index) {
343 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
344 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
345
346 art::Thread* self = threads[index];
347 const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
348
349 // For the time being, set the thread to null. We don't have good ScopedLocalRef
350 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000351 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800352 stack_info.thread = nullptr;
353 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
354
355 size_t collected_frames = thread_frames.size();
356 if (max_frame_count == 0 || collected_frames == 0) {
357 stack_info.frame_count = 0;
358 stack_info.frame_buffer = nullptr;
359 continue;
360 }
361 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
362
363 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
364 frame_infos.emplace_back(frame_info);
365
366 jint count;
367 jvmtiError translate_result = TranslateFrameVector(thread_frames,
368 0,
369 0,
370 static_cast<jint>(collected_frames),
371 frame_info,
372 &count);
373 DCHECK(translate_result == JVMTI_ERROR_NONE);
374 stack_info.frame_count = static_cast<jint>(collected_frames);
375 stack_info.frame_buffer = frame_info;
376 sum_frames += static_cast<size_t>(count);
377 }
378
379 // No errors, yet. Now put it all into an output buffer.
380 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * frames.size(),
381 alignof(jvmtiFrameInfo));
382 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
383 unsigned char* chunk_data;
384 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
385 if (alloc_result != ERR(NONE)) {
386 return alloc_result;
387 }
388
389 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
390 // First copy in all the basic data.
391 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * frames.size());
392
393 // Now copy the frames and fix up the pointers.
394 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
395 chunk_data + rounded_stack_info_size);
396 for (size_t i = 0; i < frames.size(); ++i) {
397 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
398 jvmtiStackInfo& new_stack_info = stack_info[i];
399
Andreas Gampe202f85a2017-02-06 10:23:26 -0800400 jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(
401 threads[i]->GetPeerFromOtherThread());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800402 new_stack_info.thread = thread_peer;
403
404 if (old_stack_info.frame_count > 0) {
405 // Only copy when there's data - leave the nullptr alone.
406 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
407 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
408 new_stack_info.frame_buffer = frame_info;
409 frame_info += old_stack_info.frame_count;
410 }
411 }
412
413 *stack_info_ptr = stack_info;
414 *thread_count_ptr = static_cast<jint>(frames.size());
415
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700416 return ERR(NONE);
417}
418
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800419jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
420 jint thread_count,
421 const jthread* thread_list,
422 jint max_frame_count,
423 jvmtiStackInfo** stack_info_ptr) {
424 if (max_frame_count < 0) {
425 return ERR(ILLEGAL_ARGUMENT);
426 }
427 if (thread_count < 0) {
428 return ERR(ILLEGAL_ARGUMENT);
429 }
430 if (thread_count == 0) {
431 *stack_info_ptr = nullptr;
432 return ERR(NONE);
433 }
434 if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
435 return ERR(NULL_POINTER);
436 }
437
438 art::Thread* current = art::Thread::Current();
439 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
440
441 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
442 art::VariableSizedHandleScope hs(current);
443 std::vector<art::Handle<art::mirror::Object>> handles;
444 for (jint i = 0; i != thread_count; ++i) {
445 if (thread_list[i] == nullptr) {
446 return ERR(INVALID_THREAD);
447 }
448 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
449 return ERR(INVALID_THREAD);
450 }
451 handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
452 }
453
454 std::vector<art::Thread*> threads;
455 std::vector<size_t> thread_list_indices;
456 std::vector<std::vector<jvmtiFrameInfo>> frames;
457
458 {
459 art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
460 art::ScopedSuspendAll ssa("GetThreadListStackTraces");
461
462 {
463 std::list<art::Thread*> art_thread_list;
464 {
465 art::MutexLock mu(current, *art::Locks::thread_list_lock_);
466 art_thread_list = art::Runtime::Current()->GetThreadList()->GetList();
467 }
468
469 for (art::Thread* thread : art_thread_list) {
470 if (thread->IsStillStarting()) {
471 // Skip this. We can't get the jpeer, and if it is for a thread in the thread_list,
472 // we'll just report STARTING.
473 continue;
474 }
475
476 // Get the peer, and check whether we know it.
Andreas Gampe202f85a2017-02-06 10:23:26 -0800477 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800478 for (size_t index = 0; index != handles.size(); ++index) {
479 if (peer == handles[index].Get()) {
480 // Found the thread.
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700481 GetStackTraceVectorClosure closure(0u, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800482 thread->RequestSynchronousCheckpoint(&closure);
483
484 threads.push_back(thread);
485 thread_list_indices.push_back(index);
486 frames.emplace_back();
487 frames.back().swap(closure.frames);
488
489 continue;
490 }
491 }
492
493 // Must be not started, or dead. We'll deal with it at the end.
494 }
495 }
496 }
497
498 // Convert the data into our output format.
499
500 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
501 // allocate one big chunk for this and the actual frames, which means we need
502 // to either be conservative or rearrange things later (the latter is implemented).
503 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
504 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
505 frame_infos.reserve(frames.size());
506
507 // Now run through and add data for each thread.
508 size_t sum_frames = 0;
509 for (size_t index = 0; index < frames.size(); ++index) {
510 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
511 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
512
513 art::Thread* self = threads[index];
514 const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
515
516 // For the time being, set the thread to null. We don't have good ScopedLocalRef
517 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000518 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800519 stack_info.thread = nullptr;
520 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
521
522 size_t collected_frames = thread_frames.size();
523 if (max_frame_count == 0 || collected_frames == 0) {
524 stack_info.frame_count = 0;
525 stack_info.frame_buffer = nullptr;
526 continue;
527 }
528 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
529
530 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
531 frame_infos.emplace_back(frame_info);
532
533 jint count;
534 jvmtiError translate_result = TranslateFrameVector(thread_frames,
535 0,
536 0,
537 static_cast<jint>(collected_frames),
538 frame_info,
539 &count);
540 DCHECK(translate_result == JVMTI_ERROR_NONE);
541 stack_info.frame_count = static_cast<jint>(collected_frames);
542 stack_info.frame_buffer = frame_info;
543 sum_frames += static_cast<size_t>(count);
544 }
545
546 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
547 // potentially.
548 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
549 alignof(jvmtiFrameInfo));
550 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
551 unsigned char* chunk_data;
552 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
553 if (alloc_result != ERR(NONE)) {
554 return alloc_result;
555 }
556
557 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
558 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
559 chunk_data + rounded_stack_info_size);
560
561 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
562 // Check whether we found a running thread for this.
563 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
564 // search. (The list is *not* sorted!)
565 auto it = std::find(thread_list_indices.begin(), thread_list_indices.end(), i);
566 if (it == thread_list_indices.end()) {
567 // No native thread. Must be new or dead. We need to fill out the stack info now.
568 // (Need to read the Java "started" field to know whether this is starting or terminated.)
569 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
570 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
571 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
572 CHECK(started_field != nullptr);
573 bool started = started_field->GetBoolean(peer) != 0;
574 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
575 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
576 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
577 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
578 stack_info[i].state = started ? kTerminatedState : kStartedState;
579 stack_info[i].frame_count = 0;
580 stack_info[i].frame_buffer = nullptr;
581 } else {
582 // Had a native thread and frames.
583 size_t f_index = it - thread_list_indices.begin();
584
585 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
586 jvmtiStackInfo& new_stack_info = stack_info[i];
587
588 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
589 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
590 if (old_stack_info.frame_count > 0) {
591 // Only copy when there's data - leave the nullptr alone.
592 size_t frames_size =
593 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
594 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
595 new_stack_info.frame_buffer = frame_info;
596 frame_info += old_stack_info.frame_count;
597 }
598 }
599 }
600
601 * stack_info_ptr = stack_info;
602
603 return ERR(NONE);
604}
605
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800606// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
607// runtime methods and transitions must not be counted.
608struct GetFrameCountVisitor : public art::StackVisitor {
609 explicit GetFrameCountVisitor(art::Thread* thread)
610 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
611 count(0) {}
612
613 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
614 art::ArtMethod* m = GetMethod();
615 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
616 if (do_count) {
617 count++;
618 }
619 return true;
620 }
621
622 size_t count;
623};
624
625struct GetFrameCountClosure : public art::Closure {
626 public:
627 GetFrameCountClosure() : count(0) {}
628
629 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
630 GetFrameCountVisitor visitor(self);
631 visitor.WalkStack(false);
632
633 count = visitor.count;
634 }
635
636 size_t count;
637};
638
639jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
640 jthread java_thread,
641 jint* count_ptr) {
642 art::Thread* thread;
643 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
644 if (thread_error != ERR(NONE)) {
645 return thread_error;
646 }
647 DCHECK(thread != nullptr);
648
649 if (count_ptr == nullptr) {
650 return ERR(NULL_POINTER);
651 }
652
653 GetFrameCountClosure closure;
654 thread->RequestSynchronousCheckpoint(&closure);
655
656 *count_ptr = closure.count;
657 return ERR(NONE);
658}
659
660// Walks up the stack 'n' callers, when used with Thread::WalkStack.
661struct GetLocationVisitor : public art::StackVisitor {
662 GetLocationVisitor(art::Thread* thread, size_t n_in)
663 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
664 n(n_in),
665 count(0),
666 caller(nullptr),
667 caller_dex_pc(0) {}
668
669 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
670 art::ArtMethod* m = GetMethod();
671 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
672 if (do_count) {
673 DCHECK(caller == nullptr);
674 if (count == n) {
675 caller = m;
676 caller_dex_pc = GetDexPc(false);
677 return false;
678 }
679 count++;
680 }
681 return true;
682 }
683
684 const size_t n;
685 size_t count;
686 art::ArtMethod* caller;
687 uint32_t caller_dex_pc;
688};
689
690struct GetLocationClosure : public art::Closure {
691 public:
692 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
693
694 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
695 GetLocationVisitor visitor(self, n);
696 visitor.WalkStack(false);
697
698 method = visitor.caller;
699 dex_pc = visitor.caller_dex_pc;
700 }
701
702 const size_t n;
703 art::ArtMethod* method;
704 uint32_t dex_pc;
705};
706
707jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
708 jthread java_thread,
709 jint depth,
710 jmethodID* method_ptr,
711 jlocation* location_ptr) {
712 art::Thread* thread;
713 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
714 if (thread_error != ERR(NONE)) {
715 return thread_error;
716 }
717 DCHECK(thread != nullptr);
718
719 if (depth < 0) {
720 return ERR(ILLEGAL_ARGUMENT);
721 }
722 if (method_ptr == nullptr || location_ptr == nullptr) {
723 return ERR(NULL_POINTER);
724 }
725
726 GetLocationClosure closure(static_cast<size_t>(depth));
727 thread->RequestSynchronousCheckpoint(&closure);
728
729 if (closure.method == nullptr) {
730 return ERR(NO_MORE_FRAMES);
731 }
732
733 *method_ptr = art::jni::EncodeArtMethod(closure.method);
734 if (closure.method->IsNative()) {
735 *location_ptr = -1;
736 } else {
737 if (closure.dex_pc == art::DexFile::kDexNoIndex) {
738 return ERR(INTERNAL);
739 }
740 *location_ptr = static_cast<jlocation>(closure.dex_pc);
741 }
742
743 return ERR(NONE);
744}
745
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700746} // namespace openjdkjvmti