blob: ff2de8dac67d3716d771c18ce42124fb6e201903 [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampea1d2f952017-04-20 22:53:58 -070039#include "art_field-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070040#include "art_jvmti.h"
Steven Morelande431e272017-07-18 16:53:49 -070041#include "art_method-inl.h"
Andreas Gampe6237cd32017-06-22 22:17:38 -070042#include "barrier.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080043#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070044#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080045#include "base/mutex.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070046#include "dex_file.h"
47#include "dex_file_annotations.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080048#include "handle_scope-inl.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070049#include "jni_env_ext.h"
Andreas Gampe13b27842016-11-07 16:48:23 -080050#include "jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070051#include "mirror/class.h"
52#include "mirror/dex_cache.h"
Steven Morelande431e272017-07-18 16:53:49 -070053#include "nativehelper/ScopedLocalRef.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070054#include "scoped_thread_state_change-inl.h"
55#include "stack.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070056#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080057#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070058#include "thread_pool.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070059#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070060
61namespace openjdkjvmti {
62
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070063template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070064struct GetStackTraceVisitor : public art::StackVisitor {
65 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070066 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070067 size_t stop_,
68 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070069 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070070 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070071 start(start_),
72 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070073 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
74 GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070075
76 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
77 art::ArtMethod* m = GetMethod();
78 if (m->IsRuntimeMethod()) {
79 return true;
80 }
81
82 if (start == 0) {
83 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080084 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070085
Andreas Gampe2340e3f2016-12-12 19:37:19 -080086 uint32_t dex_pc = GetDexPc(false);
87 jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070088
Andreas Gampe2340e3f2016-12-12 19:37:19 -080089 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070090 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070091
92 if (stop == 1) {
93 return false; // We're done.
94 } else if (stop > 0) {
95 stop--;
96 }
97 } else {
98 start--;
99 }
100
101 return true;
102 }
103
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700104 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700105 size_t start;
106 size_t stop;
107};
108
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700109template <typename FrameFn>
110GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
111 size_t start,
112 size_t stop,
113 FrameFn fn) {
114 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
115}
116
117struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700118 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700119 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700120 : start_input(start),
121 stop_input(stop),
122 start_result(0),
123 stop_result(0) {}
124
Andreas Gampea1a27c62017-01-11 16:37:16 -0800125 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700126 auto frames_fn = [&](jvmtiFrameInfo info) {
127 frames.push_back(info);
128 };
129 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
130 visitor.WalkStack(/* include_transitions */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700131
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700132 start_result = visitor.start;
133 stop_result = visitor.stop;
134 }
135
136 const size_t start_input;
137 const size_t stop_input;
138
139 std::vector<jvmtiFrameInfo> frames;
140 size_t start_result;
141 size_t stop_result;
142};
143
Andreas Gampea1a27c62017-01-11 16:37:16 -0800144static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
145 jint start_depth,
146 size_t start_result,
147 jint max_frame_count,
148 jvmtiFrameInfo* frame_buffer,
149 jint* count_ptr) {
150 size_t collected_frames = frames.size();
151
152 // Assume we're here having collected something.
153 DCHECK_GT(max_frame_count, 0);
154
155 // Frames from the top.
156 if (start_depth >= 0) {
157 if (start_result != 0) {
158 // Not enough frames.
159 return ERR(ILLEGAL_ARGUMENT);
160 }
161 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
162 if (frames.size() > 0) {
163 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
164 }
165 *count_ptr = static_cast<jint>(frames.size());
166 return ERR(NONE);
167 }
168
169 // Frames from the bottom.
170 if (collected_frames < static_cast<size_t>(-start_depth)) {
171 return ERR(ILLEGAL_ARGUMENT);
172 }
173
174 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
175 memcpy(frame_buffer,
176 &frames.data()[collected_frames + start_depth],
177 count * sizeof(jvmtiFrameInfo));
178 *count_ptr = static_cast<jint>(count);
179 return ERR(NONE);
180}
181
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700182struct GetStackTraceDirectClosure : public art::Closure {
183 public:
184 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
185 : frame_buffer(frame_buffer_),
186 start_input(start),
187 stop_input(stop),
188 index(0) {
189 DCHECK_GE(start_input, 0u);
190 }
191
192 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
193 auto frames_fn = [&](jvmtiFrameInfo info) {
194 frame_buffer[index] = info;
195 ++index;
196 };
197 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
198 visitor.WalkStack(/* include_transitions */ false);
199 }
200
201 jvmtiFrameInfo* frame_buffer;
202
203 const size_t start_input;
204 const size_t stop_input;
205
206 size_t index = 0;
207};
208
Andreas Gampe28c4a232017-06-21 21:21:31 -0700209static jvmtiError GetThread(JNIEnv* env,
210 art::ScopedObjectAccessAlreadyRunnable& soa,
211 jthread java_thread,
212 art::Thread** thread)
213 REQUIRES_SHARED(art::Locks::mutator_lock_) // Needed for FromManagedThread.
214 REQUIRES(art::Locks::thread_list_lock_) { // Needed for FromManagedThread.
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800215 if (java_thread == nullptr) {
216 *thread = art::Thread::Current();
217 if (*thread == nullptr) {
218 // GetStackTrace can only be run during the live phase, so the current thread should be
219 // attached and thus available. Getting a null for current means we're starting up or
220 // dying.
221 return ERR(WRONG_PHASE);
222 }
223 } else {
224 if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) {
225 return ERR(INVALID_THREAD);
226 }
227
228 // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800229 *thread = art::Thread::FromManagedThread(soa, java_thread);
230 if (*thread == nullptr) {
231 return ERR(THREAD_NOT_ALIVE);
232 }
233 }
234 return ERR(NONE);
235}
236
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700237jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
238 jthread java_thread,
239 jint start_depth,
240 jint max_frame_count,
241 jvmtiFrameInfo* frame_buffer,
242 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700243 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
244 // that the thread isn't dying on us.
245 art::ScopedObjectAccess soa(art::Thread::Current());
246 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
247
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700248 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700249 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
250 soa,
251 java_thread,
252 &thread);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800253 if (thread_error != ERR(NONE)) {
254 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700255 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800256 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700257
258 art::ThreadState state = thread->GetState();
259 if (state == art::ThreadState::kStarting ||
260 state == art::ThreadState::kTerminated ||
261 thread->IsStillStarting()) {
262 return ERR(THREAD_NOT_ALIVE);
263 }
264
265 if (max_frame_count < 0) {
266 return ERR(ILLEGAL_ARGUMENT);
267 }
268 if (frame_buffer == nullptr || count_ptr == nullptr) {
269 return ERR(NULL_POINTER);
270 }
271
272 if (max_frame_count == 0) {
273 *count_ptr = 0;
274 return ERR(NONE);
275 }
276
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700277 if (start_depth >= 0) {
278 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
279 GetStackTraceDirectClosure closure(frame_buffer,
280 static_cast<size_t>(start_depth),
281 static_cast<size_t>(max_frame_count));
282 thread->RequestSynchronousCheckpoint(&closure);
283 *count_ptr = static_cast<jint>(closure.index);
284 if (closure.index < static_cast<size_t>(start_depth)) {
285 return ERR(ILLEGAL_ARGUMENT);
286 }
287 return ERR(NONE);
288 }
289
290 GetStackTraceVectorClosure closure(0, 0);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700291 thread->RequestSynchronousCheckpoint(&closure);
292
Andreas Gampea1a27c62017-01-11 16:37:16 -0800293 return TranslateFrameVector(closure.frames,
294 start_depth,
295 closure.start_result,
296 max_frame_count,
297 frame_buffer,
298 count_ptr);
299}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700300
Andreas Gampef1221a12017-06-21 21:20:47 -0700301template <typename Data>
302struct GetAllStackTracesVectorClosure : public art::Closure {
Andreas Gampe6237cd32017-06-22 22:17:38 -0700303 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
304 : barrier(0), stop_input(stop), data(data_) {}
Andreas Gampef1221a12017-06-21 21:20:47 -0700305
306 void Run(art::Thread* thread) OVERRIDE
307 REQUIRES_SHARED(art::Locks::mutator_lock_)
308 REQUIRES(!data->mutex) {
309 art::Thread* self = art::Thread::Current();
Andreas Gampe6237cd32017-06-22 22:17:38 -0700310 Work(thread, self);
311 barrier.Pass(self);
312 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700313
Andreas Gampe6237cd32017-06-22 22:17:38 -0700314 void Work(art::Thread* thread, art::Thread* self)
315 REQUIRES_SHARED(art::Locks::mutator_lock_)
316 REQUIRES(!data->mutex) {
Andreas Gampef1221a12017-06-21 21:20:47 -0700317 // Skip threads that are still starting.
318 if (thread->IsStillStarting()) {
319 return;
320 }
321
322 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
323 if (thread_frames == nullptr) {
324 return;
325 }
326
327 // Now collect the data.
328 auto frames_fn = [&](jvmtiFrameInfo info) {
329 thread_frames->push_back(info);
330 };
331 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
332 visitor.WalkStack(/* include_transitions */ false);
333 }
334
Andreas Gampe6237cd32017-06-22 22:17:38 -0700335 art::Barrier barrier;
Andreas Gampef1221a12017-06-21 21:20:47 -0700336 const size_t stop_input;
337 Data* data;
338};
339
Andreas Gampe6237cd32017-06-22 22:17:38 -0700340template <typename Data>
341static void RunCheckpointAndWait(Data* data, size_t max_frame_count) {
342 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
343 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
344 if (barrier_count == 0) {
345 return;
346 }
347 art::Thread* self = art::Thread::Current();
348 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
349 closure.barrier.Increment(self, barrier_count);
350}
351
Andreas Gampea1a27c62017-01-11 16:37:16 -0800352jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
353 jint max_frame_count,
354 jvmtiStackInfo** stack_info_ptr,
355 jint* thread_count_ptr) {
356 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700357 return ERR(ILLEGAL_ARGUMENT);
358 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800359 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
360 return ERR(NULL_POINTER);
361 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700362
Andreas Gampef1221a12017-06-21 21:20:47 -0700363 struct AllStackTracesData {
364 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
365 ~AllStackTracesData() {
366 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
367 for (jthread global_thread_ref : thread_peers) {
368 jni_env->DeleteGlobalRef(global_thread_ref);
369 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800370 }
371
Andreas Gampef1221a12017-06-21 21:20:47 -0700372 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
373 REQUIRES_SHARED(art::Locks::mutator_lock_)
374 REQUIRES(!mutex) {
375 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800376
377 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800378
Andreas Gampef1221a12017-06-21 21:20:47 -0700379 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
380 self, thread->GetPeerFromOtherThread());
381 thread_peers.push_back(peer);
382
Andreas Gampead9173d2017-06-22 16:33:08 -0700383 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
384 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700385 }
386
387 art::Mutex mutex;
388
389 // Storage. Only access directly after completion.
390
391 std::vector<art::Thread*> threads;
392 // "thread_peers" contains global references to their peers.
393 std::vector<jthread> thread_peers;
394
Andreas Gampead9173d2017-06-22 16:33:08 -0700395 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700396 };
397
398 AllStackTracesData data;
Andreas Gampe6237cd32017-06-22 22:17:38 -0700399 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampef1221a12017-06-21 21:20:47 -0700400
401 art::Thread* current = art::Thread::Current();
402
403 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800404
405 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
406 // allocate one big chunk for this and the actual frames, which means we need
407 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700408 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800409 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700410 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800411
412 // Now run through and add data for each thread.
413 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700414 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800415 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
416 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
417
Andreas Gampead9173d2017-06-22 16:33:08 -0700418 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800419
Andreas Gampef1221a12017-06-21 21:20:47 -0700420 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800421 stack_info.thread = nullptr;
422 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
423
424 size_t collected_frames = thread_frames.size();
425 if (max_frame_count == 0 || collected_frames == 0) {
426 stack_info.frame_count = 0;
427 stack_info.frame_buffer = nullptr;
428 continue;
429 }
430 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
431
432 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
433 frame_infos.emplace_back(frame_info);
434
435 jint count;
436 jvmtiError translate_result = TranslateFrameVector(thread_frames,
437 0,
438 0,
439 static_cast<jint>(collected_frames),
440 frame_info,
441 &count);
442 DCHECK(translate_result == JVMTI_ERROR_NONE);
443 stack_info.frame_count = static_cast<jint>(collected_frames);
444 stack_info.frame_buffer = frame_info;
445 sum_frames += static_cast<size_t>(count);
446 }
447
448 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700449 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800450 alignof(jvmtiFrameInfo));
451 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
452 unsigned char* chunk_data;
453 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
454 if (alloc_result != ERR(NONE)) {
455 return alloc_result;
456 }
457
458 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
459 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700460 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800461
462 // Now copy the frames and fix up the pointers.
463 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
464 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700465 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800466 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
467 jvmtiStackInfo& new_stack_info = stack_info[i];
468
Andreas Gampef1221a12017-06-21 21:20:47 -0700469 // Translate the global ref into a local ref.
470 new_stack_info.thread =
471 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800472
473 if (old_stack_info.frame_count > 0) {
474 // Only copy when there's data - leave the nullptr alone.
475 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
476 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
477 new_stack_info.frame_buffer = frame_info;
478 frame_info += old_stack_info.frame_count;
479 }
480 }
481
482 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700483 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800484
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700485 return ERR(NONE);
486}
487
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800488jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
489 jint thread_count,
490 const jthread* thread_list,
491 jint max_frame_count,
492 jvmtiStackInfo** stack_info_ptr) {
493 if (max_frame_count < 0) {
494 return ERR(ILLEGAL_ARGUMENT);
495 }
496 if (thread_count < 0) {
497 return ERR(ILLEGAL_ARGUMENT);
498 }
499 if (thread_count == 0) {
500 *stack_info_ptr = nullptr;
501 return ERR(NONE);
502 }
503 if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
504 return ERR(NULL_POINTER);
505 }
506
507 art::Thread* current = art::Thread::Current();
508 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
509
Andreas Gampef1221a12017-06-21 21:20:47 -0700510 struct SelectStackTracesData {
511 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
512
513 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
514 REQUIRES_SHARED(art::Locks::mutator_lock_)
515 REQUIRES(!mutex) {
516 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
517 for (size_t index = 0; index != handles.size(); ++index) {
518 if (peer == handles[index].Get()) {
519 // Found the thread.
520 art::MutexLock mu(self, mutex);
521
522 threads.push_back(thread);
523 thread_list_indices.push_back(index);
524
Andreas Gampead9173d2017-06-22 16:33:08 -0700525 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
526 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700527 }
528 }
529 return nullptr;
530 }
531
532 art::Mutex mutex;
533
534 // Selection data.
535
536 std::vector<art::Handle<art::mirror::Object>> handles;
537
538 // Storage. Only access directly after completion.
539
540 std::vector<art::Thread*> threads;
541 std::vector<size_t> thread_list_indices;
542
Andreas Gampead9173d2017-06-22 16:33:08 -0700543 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700544 };
545
546 SelectStackTracesData data;
547
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800548 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
549 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800550 for (jint i = 0; i != thread_count; ++i) {
551 if (thread_list[i] == nullptr) {
552 return ERR(INVALID_THREAD);
553 }
554 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
555 return ERR(INVALID_THREAD);
556 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700557 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800558 }
559
Andreas Gampe6237cd32017-06-22 22:17:38 -0700560 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800561
562 // Convert the data into our output format.
563
564 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
565 // allocate one big chunk for this and the actual frames, which means we need
566 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700567 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800568 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700569 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800570
571 // Now run through and add data for each thread.
572 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700573 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800574 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
575 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
576
Andreas Gampef1221a12017-06-21 21:20:47 -0700577 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700578 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800579
580 // For the time being, set the thread to null. We don't have good ScopedLocalRef
581 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000582 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800583 stack_info.thread = nullptr;
584 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
585
586 size_t collected_frames = thread_frames.size();
587 if (max_frame_count == 0 || collected_frames == 0) {
588 stack_info.frame_count = 0;
589 stack_info.frame_buffer = nullptr;
590 continue;
591 }
592 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
593
594 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
595 frame_infos.emplace_back(frame_info);
596
597 jint count;
598 jvmtiError translate_result = TranslateFrameVector(thread_frames,
599 0,
600 0,
601 static_cast<jint>(collected_frames),
602 frame_info,
603 &count);
604 DCHECK(translate_result == JVMTI_ERROR_NONE);
605 stack_info.frame_count = static_cast<jint>(collected_frames);
606 stack_info.frame_buffer = frame_info;
607 sum_frames += static_cast<size_t>(count);
608 }
609
610 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
611 // potentially.
612 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
613 alignof(jvmtiFrameInfo));
614 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
615 unsigned char* chunk_data;
616 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
617 if (alloc_result != ERR(NONE)) {
618 return alloc_result;
619 }
620
621 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
622 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
623 chunk_data + rounded_stack_info_size);
624
625 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
626 // Check whether we found a running thread for this.
627 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
628 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700629 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
630 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800631 // No native thread. Must be new or dead. We need to fill out the stack info now.
632 // (Need to read the Java "started" field to know whether this is starting or terminated.)
633 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
634 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
635 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
636 CHECK(started_field != nullptr);
637 bool started = started_field->GetBoolean(peer) != 0;
638 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
639 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
640 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
641 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
642 stack_info[i].state = started ? kTerminatedState : kStartedState;
643 stack_info[i].frame_count = 0;
644 stack_info[i].frame_buffer = nullptr;
645 } else {
646 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700647 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800648
649 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
650 jvmtiStackInfo& new_stack_info = stack_info[i];
651
652 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
653 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
654 if (old_stack_info.frame_count > 0) {
655 // Only copy when there's data - leave the nullptr alone.
656 size_t frames_size =
657 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
658 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
659 new_stack_info.frame_buffer = frame_info;
660 frame_info += old_stack_info.frame_count;
661 }
662 }
663 }
664
Andreas Gampef1221a12017-06-21 21:20:47 -0700665 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800666
667 return ERR(NONE);
668}
669
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800670// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
671// runtime methods and transitions must not be counted.
672struct GetFrameCountVisitor : public art::StackVisitor {
673 explicit GetFrameCountVisitor(art::Thread* thread)
674 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
675 count(0) {}
676
677 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
678 art::ArtMethod* m = GetMethod();
679 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
680 if (do_count) {
681 count++;
682 }
683 return true;
684 }
685
686 size_t count;
687};
688
689struct GetFrameCountClosure : public art::Closure {
690 public:
691 GetFrameCountClosure() : count(0) {}
692
693 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
694 GetFrameCountVisitor visitor(self);
695 visitor.WalkStack(false);
696
697 count = visitor.count;
698 }
699
700 size_t count;
701};
702
703jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
704 jthread java_thread,
705 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700706 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
707 // that the thread isn't dying on us.
708 art::ScopedObjectAccess soa(art::Thread::Current());
709 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
710
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800711 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700712 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
713 soa,
714 java_thread,
715 &thread);
716
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800717 if (thread_error != ERR(NONE)) {
718 return thread_error;
719 }
720 DCHECK(thread != nullptr);
721
722 if (count_ptr == nullptr) {
723 return ERR(NULL_POINTER);
724 }
725
726 GetFrameCountClosure closure;
727 thread->RequestSynchronousCheckpoint(&closure);
728
729 *count_ptr = closure.count;
730 return ERR(NONE);
731}
732
733// Walks up the stack 'n' callers, when used with Thread::WalkStack.
734struct GetLocationVisitor : public art::StackVisitor {
735 GetLocationVisitor(art::Thread* thread, size_t n_in)
736 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
737 n(n_in),
738 count(0),
739 caller(nullptr),
740 caller_dex_pc(0) {}
741
742 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
743 art::ArtMethod* m = GetMethod();
744 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
745 if (do_count) {
746 DCHECK(caller == nullptr);
747 if (count == n) {
748 caller = m;
749 caller_dex_pc = GetDexPc(false);
750 return false;
751 }
752 count++;
753 }
754 return true;
755 }
756
757 const size_t n;
758 size_t count;
759 art::ArtMethod* caller;
760 uint32_t caller_dex_pc;
761};
762
763struct GetLocationClosure : public art::Closure {
764 public:
765 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
766
767 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
768 GetLocationVisitor visitor(self, n);
769 visitor.WalkStack(false);
770
771 method = visitor.caller;
772 dex_pc = visitor.caller_dex_pc;
773 }
774
775 const size_t n;
776 art::ArtMethod* method;
777 uint32_t dex_pc;
778};
779
780jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
781 jthread java_thread,
782 jint depth,
783 jmethodID* method_ptr,
784 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700785 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
786 // that the thread isn't dying on us.
787 art::ScopedObjectAccess soa(art::Thread::Current());
788 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
789
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800790 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700791 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
792 soa,
793 java_thread,
794 &thread);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800795 if (thread_error != ERR(NONE)) {
796 return thread_error;
797 }
798 DCHECK(thread != nullptr);
799
800 if (depth < 0) {
801 return ERR(ILLEGAL_ARGUMENT);
802 }
803 if (method_ptr == nullptr || location_ptr == nullptr) {
804 return ERR(NULL_POINTER);
805 }
806
807 GetLocationClosure closure(static_cast<size_t>(depth));
808 thread->RequestSynchronousCheckpoint(&closure);
809
810 if (closure.method == nullptr) {
811 return ERR(NO_MORE_FRAMES);
812 }
813
814 *method_ptr = art::jni::EncodeArtMethod(closure.method);
815 if (closure.method->IsNative()) {
816 *location_ptr = -1;
817 } else {
818 if (closure.dex_pc == art::DexFile::kDexNoIndex) {
819 return ERR(INTERNAL);
820 }
821 *location_ptr = static_cast<jlocation>(closure.dex_pc);
822 }
823
824 return ERR(NONE);
825}
826
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700827} // namespace openjdkjvmti