blob: 1279f3bde50037943d29784221f62e2f7307cc00 [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampea1d2f952017-04-20 22:53:58 -070039#include "art_field-inl.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070040#include "art_method-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070041#include "art_jvmti.h"
Steven Morelande431e272017-07-18 16:53:49 -070042#include "art_method-inl.h"
Andreas Gampe6237cd32017-06-22 22:17:38 -070043#include "barrier.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080044#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070045#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080046#include "base/mutex.h"
David Sehr9e734c72018-01-04 17:56:19 -080047#include "dex/code_item_accessors-inl.h"
48#include "dex/dex_file.h"
49#include "dex/dex_file_annotations.h"
50#include "dex/dex_file_types.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070051#include "gc_root.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080052#include "handle_scope-inl.h"
Vladimir Markoa3ad0cd2018-05-04 10:06:38 +010053#include "jni/jni_env_ext.h"
54#include "jni/jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070055#include "mirror/class.h"
56#include "mirror/dex_cache.h"
Andreas Gampe373a9b52017-10-18 09:01:57 -070057#include "nativehelper/scoped_local_ref.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070058#include "scoped_thread_state_change-inl.h"
59#include "stack.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070060#include "ti_thread.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070061#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080062#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070063#include "thread_pool.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070064#include "ti_thread.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070065#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070066
67namespace openjdkjvmti {
68
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070069template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070070struct GetStackTraceVisitor : public art::StackVisitor {
71 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070072 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070073 size_t stop_,
74 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070075 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070076 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070077 start(start_),
78 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070079 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
Andreas Gampe44b31742018-10-01 19:30:57 -070080 GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070081
Andreas Gampefa6a1b02018-09-07 08:11:55 -070082 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070083 art::ArtMethod* m = GetMethod();
84 if (m->IsRuntimeMethod()) {
85 return true;
86 }
87
88 if (start == 0) {
89 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080090 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070091
Andreas Gampe2340e3f2016-12-12 19:37:19 -080092 uint32_t dex_pc = GetDexPc(false);
Andreas Gampee2abbc62017-09-15 11:59:26 -070093 jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070094
Andreas Gampe2340e3f2016-12-12 19:37:19 -080095 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070096 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070097
98 if (stop == 1) {
99 return false; // We're done.
100 } else if (stop > 0) {
101 stop--;
102 }
103 } else {
104 start--;
105 }
106
107 return true;
108 }
109
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700110 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700111 size_t start;
112 size_t stop;
113};
114
Alex Light0aa7a5a2018-10-10 15:58:14 +0000115art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) {
116 art::ShadowFrame* cur = GetCurrentShadowFrame();
117 if (cur == nullptr) {
118 *created_frame = true;
119 art::ArtMethod* method = GetMethod();
120 const uint16_t num_regs = method->DexInstructionData().RegistersSize();
121 cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(),
122 num_regs,
123 method,
124 GetDexPc());
125 DCHECK(cur != nullptr);
126 } else {
127 *created_frame = false;
128 }
129 return cur;
130}
131
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700132template <typename FrameFn>
133GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
134 size_t start,
135 size_t stop,
136 FrameFn fn) {
137 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
138}
139
140struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700141 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700142 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700143 : start_input(start),
144 stop_input(stop),
145 start_result(0),
146 stop_result(0) {}
147
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100148 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700149 auto frames_fn = [&](jvmtiFrameInfo info) {
150 frames.push_back(info);
151 };
152 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700153 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700154
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700155 start_result = visitor.start;
156 stop_result = visitor.stop;
157 }
158
159 const size_t start_input;
160 const size_t stop_input;
161
162 std::vector<jvmtiFrameInfo> frames;
163 size_t start_result;
164 size_t stop_result;
165};
166
Andreas Gampea1a27c62017-01-11 16:37:16 -0800167static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
168 jint start_depth,
169 size_t start_result,
170 jint max_frame_count,
171 jvmtiFrameInfo* frame_buffer,
172 jint* count_ptr) {
173 size_t collected_frames = frames.size();
174
175 // Assume we're here having collected something.
176 DCHECK_GT(max_frame_count, 0);
177
178 // Frames from the top.
179 if (start_depth >= 0) {
180 if (start_result != 0) {
181 // Not enough frames.
182 return ERR(ILLEGAL_ARGUMENT);
183 }
184 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
185 if (frames.size() > 0) {
186 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
187 }
188 *count_ptr = static_cast<jint>(frames.size());
189 return ERR(NONE);
190 }
191
192 // Frames from the bottom.
193 if (collected_frames < static_cast<size_t>(-start_depth)) {
194 return ERR(ILLEGAL_ARGUMENT);
195 }
196
197 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
198 memcpy(frame_buffer,
199 &frames.data()[collected_frames + start_depth],
200 count * sizeof(jvmtiFrameInfo));
201 *count_ptr = static_cast<jint>(count);
202 return ERR(NONE);
203}
204
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700205struct GetStackTraceDirectClosure : public art::Closure {
206 public:
207 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
208 : frame_buffer(frame_buffer_),
209 start_input(start),
210 stop_input(stop),
211 index(0) {
212 DCHECK_GE(start_input, 0u);
213 }
214
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100215 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700216 auto frames_fn = [&](jvmtiFrameInfo info) {
217 frame_buffer[index] = info;
218 ++index;
219 };
220 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700221 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700222 }
223
224 jvmtiFrameInfo* frame_buffer;
225
226 const size_t start_input;
227 const size_t stop_input;
228
229 size_t index = 0;
230};
231
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700232jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
233 jthread java_thread,
234 jint start_depth,
235 jint max_frame_count,
236 jvmtiFrameInfo* frame_buffer,
237 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700238 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
239 // that the thread isn't dying on us.
240 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700241 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700242
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700243 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700244 jvmtiError thread_error = ERR(INTERNAL);
245 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700246 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800247 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700248 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800249 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700250
251 art::ThreadState state = thread->GetState();
Alex Light7ddc23d2017-09-22 15:33:41 -0700252 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700253 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700254 return ERR(THREAD_NOT_ALIVE);
255 }
256
257 if (max_frame_count < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700258 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700259 return ERR(ILLEGAL_ARGUMENT);
260 }
261 if (frame_buffer == nullptr || count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700262 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700263 return ERR(NULL_POINTER);
264 }
265
266 if (max_frame_count == 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700267 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700268 *count_ptr = 0;
269 return ERR(NONE);
270 }
271
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700272 if (start_depth >= 0) {
273 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
274 GetStackTraceDirectClosure closure(frame_buffer,
275 static_cast<size_t>(start_depth),
276 static_cast<size_t>(max_frame_count));
Alex Lightb1e31a82017-10-04 16:57:36 -0700277 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700278 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700279 return ERR(THREAD_NOT_ALIVE);
280 }
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700281 *count_ptr = static_cast<jint>(closure.index);
282 if (closure.index < static_cast<size_t>(start_depth)) {
283 return ERR(ILLEGAL_ARGUMENT);
284 }
285 return ERR(NONE);
Alex Lightb1e31a82017-10-04 16:57:36 -0700286 } else {
287 GetStackTraceVectorClosure closure(0, 0);
288 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700289 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700290 return ERR(THREAD_NOT_ALIVE);
291 }
292
293 return TranslateFrameVector(closure.frames,
294 start_depth,
295 closure.start_result,
296 max_frame_count,
297 frame_buffer,
298 count_ptr);
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700299 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800300}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700301
Andreas Gampef1221a12017-06-21 21:20:47 -0700302template <typename Data>
303struct GetAllStackTracesVectorClosure : public art::Closure {
Andreas Gampe6237cd32017-06-22 22:17:38 -0700304 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
305 : barrier(0), stop_input(stop), data(data_) {}
Andreas Gampef1221a12017-06-21 21:20:47 -0700306
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100307 void Run(art::Thread* thread) override
Andreas Gampef1221a12017-06-21 21:20:47 -0700308 REQUIRES_SHARED(art::Locks::mutator_lock_)
309 REQUIRES(!data->mutex) {
310 art::Thread* self = art::Thread::Current();
Andreas Gampe6237cd32017-06-22 22:17:38 -0700311 Work(thread, self);
312 barrier.Pass(self);
313 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700314
Andreas Gampe6237cd32017-06-22 22:17:38 -0700315 void Work(art::Thread* thread, art::Thread* self)
316 REQUIRES_SHARED(art::Locks::mutator_lock_)
317 REQUIRES(!data->mutex) {
Andreas Gampef1221a12017-06-21 21:20:47 -0700318 // Skip threads that are still starting.
319 if (thread->IsStillStarting()) {
320 return;
321 }
322
323 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
324 if (thread_frames == nullptr) {
325 return;
326 }
327
328 // Now collect the data.
329 auto frames_fn = [&](jvmtiFrameInfo info) {
330 thread_frames->push_back(info);
331 };
332 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700333 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampef1221a12017-06-21 21:20:47 -0700334 }
335
Andreas Gampe6237cd32017-06-22 22:17:38 -0700336 art::Barrier barrier;
Andreas Gampef1221a12017-06-21 21:20:47 -0700337 const size_t stop_input;
338 Data* data;
339};
340
Andreas Gampe6237cd32017-06-22 22:17:38 -0700341template <typename Data>
Andreas Gampe6baa1c92018-05-25 16:17:49 -0700342static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
343 REQUIRES_SHARED(art::Locks::mutator_lock_) {
344 // Note: requires the mutator lock as the checkpoint requires the mutator lock.
Andreas Gampe6237cd32017-06-22 22:17:38 -0700345 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
346 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
347 if (barrier_count == 0) {
348 return;
349 }
350 art::Thread* self = art::Thread::Current();
351 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
352 closure.barrier.Increment(self, barrier_count);
353}
354
Andreas Gampea1a27c62017-01-11 16:37:16 -0800355jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
356 jint max_frame_count,
357 jvmtiStackInfo** stack_info_ptr,
358 jint* thread_count_ptr) {
359 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700360 return ERR(ILLEGAL_ARGUMENT);
361 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800362 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
363 return ERR(NULL_POINTER);
364 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700365
Andreas Gampef1221a12017-06-21 21:20:47 -0700366 struct AllStackTracesData {
367 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
368 ~AllStackTracesData() {
369 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
370 for (jthread global_thread_ref : thread_peers) {
371 jni_env->DeleteGlobalRef(global_thread_ref);
372 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800373 }
374
Andreas Gampef1221a12017-06-21 21:20:47 -0700375 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
376 REQUIRES_SHARED(art::Locks::mutator_lock_)
377 REQUIRES(!mutex) {
378 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800379
380 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800381
Andreas Gampef1221a12017-06-21 21:20:47 -0700382 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
383 self, thread->GetPeerFromOtherThread());
384 thread_peers.push_back(peer);
385
Andreas Gampead9173d2017-06-22 16:33:08 -0700386 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
387 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700388 }
389
390 art::Mutex mutex;
391
392 // Storage. Only access directly after completion.
393
394 std::vector<art::Thread*> threads;
395 // "thread_peers" contains global references to their peers.
396 std::vector<jthread> thread_peers;
397
Andreas Gampead9173d2017-06-22 16:33:08 -0700398 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700399 };
400
401 AllStackTracesData data;
Andreas Gampef1221a12017-06-21 21:20:47 -0700402 art::Thread* current = art::Thread::Current();
Andreas Gampe6baa1c92018-05-25 16:17:49 -0700403 {
404 art::ScopedObjectAccess soa(current);
405 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
406 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700407
408 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800409
410 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
411 // allocate one big chunk for this and the actual frames, which means we need
412 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700413 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800414 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700415 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800416
417 // Now run through and add data for each thread.
418 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700419 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800420 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
421 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
422
Andreas Gampead9173d2017-06-22 16:33:08 -0700423 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800424
Andreas Gampef1221a12017-06-21 21:20:47 -0700425 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800426 stack_info.thread = nullptr;
427 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
428
429 size_t collected_frames = thread_frames.size();
430 if (max_frame_count == 0 || collected_frames == 0) {
431 stack_info.frame_count = 0;
432 stack_info.frame_buffer = nullptr;
433 continue;
434 }
435 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
436
437 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
438 frame_infos.emplace_back(frame_info);
439
440 jint count;
441 jvmtiError translate_result = TranslateFrameVector(thread_frames,
442 0,
443 0,
444 static_cast<jint>(collected_frames),
445 frame_info,
446 &count);
447 DCHECK(translate_result == JVMTI_ERROR_NONE);
448 stack_info.frame_count = static_cast<jint>(collected_frames);
449 stack_info.frame_buffer = frame_info;
450 sum_frames += static_cast<size_t>(count);
451 }
452
453 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700454 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800455 alignof(jvmtiFrameInfo));
456 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
457 unsigned char* chunk_data;
458 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
459 if (alloc_result != ERR(NONE)) {
460 return alloc_result;
461 }
462
463 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
464 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700465 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800466
467 // Now copy the frames and fix up the pointers.
468 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
469 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700470 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800471 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
472 jvmtiStackInfo& new_stack_info = stack_info[i];
473
Andreas Gampef1221a12017-06-21 21:20:47 -0700474 // Translate the global ref into a local ref.
475 new_stack_info.thread =
476 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800477
478 if (old_stack_info.frame_count > 0) {
479 // Only copy when there's data - leave the nullptr alone.
480 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
481 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
482 new_stack_info.frame_buffer = frame_info;
483 frame_info += old_stack_info.frame_count;
484 }
485 }
486
487 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700488 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800489
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700490 return ERR(NONE);
491}
492
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800493jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
494 jint thread_count,
495 const jthread* thread_list,
496 jint max_frame_count,
497 jvmtiStackInfo** stack_info_ptr) {
498 if (max_frame_count < 0) {
499 return ERR(ILLEGAL_ARGUMENT);
500 }
501 if (thread_count < 0) {
502 return ERR(ILLEGAL_ARGUMENT);
503 }
504 if (thread_count == 0) {
505 *stack_info_ptr = nullptr;
506 return ERR(NONE);
507 }
Alex Light19a7d4f2018-03-23 10:05:49 -0700508 if (thread_list == nullptr || stack_info_ptr == nullptr) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800509 return ERR(NULL_POINTER);
510 }
511
512 art::Thread* current = art::Thread::Current();
513 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
514
Andreas Gampef1221a12017-06-21 21:20:47 -0700515 struct SelectStackTracesData {
516 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
517
518 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
519 REQUIRES_SHARED(art::Locks::mutator_lock_)
520 REQUIRES(!mutex) {
521 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
522 for (size_t index = 0; index != handles.size(); ++index) {
523 if (peer == handles[index].Get()) {
524 // Found the thread.
525 art::MutexLock mu(self, mutex);
526
527 threads.push_back(thread);
528 thread_list_indices.push_back(index);
529
Andreas Gampead9173d2017-06-22 16:33:08 -0700530 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
531 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700532 }
533 }
534 return nullptr;
535 }
536
537 art::Mutex mutex;
538
539 // Selection data.
540
541 std::vector<art::Handle<art::mirror::Object>> handles;
542
543 // Storage. Only access directly after completion.
544
545 std::vector<art::Thread*> threads;
546 std::vector<size_t> thread_list_indices;
547
Andreas Gampead9173d2017-06-22 16:33:08 -0700548 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700549 };
550
551 SelectStackTracesData data;
552
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800553 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
554 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800555 for (jint i = 0; i != thread_count; ++i) {
556 if (thread_list[i] == nullptr) {
557 return ERR(INVALID_THREAD);
558 }
559 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
560 return ERR(INVALID_THREAD);
561 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700562 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800563 }
564
Andreas Gampe6237cd32017-06-22 22:17:38 -0700565 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800566
567 // Convert the data into our output format.
568
569 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
570 // allocate one big chunk for this and the actual frames, which means we need
571 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700572 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800573 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700574 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800575
576 // Now run through and add data for each thread.
577 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700578 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800579 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
580 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
581
Andreas Gampef1221a12017-06-21 21:20:47 -0700582 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700583 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800584
585 // For the time being, set the thread to null. We don't have good ScopedLocalRef
586 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000587 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800588 stack_info.thread = nullptr;
589 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
590
591 size_t collected_frames = thread_frames.size();
592 if (max_frame_count == 0 || collected_frames == 0) {
593 stack_info.frame_count = 0;
594 stack_info.frame_buffer = nullptr;
595 continue;
596 }
597 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
598
599 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
600 frame_infos.emplace_back(frame_info);
601
602 jint count;
603 jvmtiError translate_result = TranslateFrameVector(thread_frames,
604 0,
605 0,
606 static_cast<jint>(collected_frames),
607 frame_info,
608 &count);
609 DCHECK(translate_result == JVMTI_ERROR_NONE);
610 stack_info.frame_count = static_cast<jint>(collected_frames);
611 stack_info.frame_buffer = frame_info;
612 sum_frames += static_cast<size_t>(count);
613 }
614
615 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
616 // potentially.
617 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
618 alignof(jvmtiFrameInfo));
619 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
620 unsigned char* chunk_data;
621 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
622 if (alloc_result != ERR(NONE)) {
623 return alloc_result;
624 }
625
626 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
627 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
628 chunk_data + rounded_stack_info_size);
629
630 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
631 // Check whether we found a running thread for this.
632 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
633 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700634 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
635 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800636 // No native thread. Must be new or dead. We need to fill out the stack info now.
637 // (Need to read the Java "started" field to know whether this is starting or terminated.)
638 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
639 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
640 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
641 CHECK(started_field != nullptr);
642 bool started = started_field->GetBoolean(peer) != 0;
643 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
644 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
645 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
646 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
647 stack_info[i].state = started ? kTerminatedState : kStartedState;
648 stack_info[i].frame_count = 0;
649 stack_info[i].frame_buffer = nullptr;
650 } else {
651 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700652 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800653
654 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
655 jvmtiStackInfo& new_stack_info = stack_info[i];
656
657 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
658 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
659 if (old_stack_info.frame_count > 0) {
660 // Only copy when there's data - leave the nullptr alone.
661 size_t frames_size =
662 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
663 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
664 new_stack_info.frame_buffer = frame_info;
665 frame_info += old_stack_info.frame_count;
666 }
667 }
668 }
669
Andreas Gampef1221a12017-06-21 21:20:47 -0700670 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800671
672 return ERR(NONE);
673}
674
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800675// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
676// runtime methods and transitions must not be counted.
677struct GetFrameCountVisitor : public art::StackVisitor {
678 explicit GetFrameCountVisitor(art::Thread* thread)
679 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
680 count(0) {}
681
Andreas Gampefa6a1b02018-09-07 08:11:55 -0700682 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800683 art::ArtMethod* m = GetMethod();
684 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
685 if (do_count) {
686 count++;
687 }
688 return true;
689 }
690
691 size_t count;
692};
693
694struct GetFrameCountClosure : public art::Closure {
695 public:
696 GetFrameCountClosure() : count(0) {}
697
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100698 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800699 GetFrameCountVisitor visitor(self);
700 visitor.WalkStack(false);
701
702 count = visitor.count;
703 }
704
705 size_t count;
706};
707
708jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
709 jthread java_thread,
710 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700711 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
712 // that the thread isn't dying on us.
713 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700714 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700715
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800716 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700717 jvmtiError thread_error = ERR(INTERNAL);
718 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700719 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800720 return thread_error;
721 }
Alex Light7ddc23d2017-09-22 15:33:41 -0700722
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800723 DCHECK(thread != nullptr);
Alex Light7ddc23d2017-09-22 15:33:41 -0700724 art::ThreadState state = thread->GetState();
725 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700726 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700727 return ERR(THREAD_NOT_ALIVE);
728 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800729
730 if (count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700731 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800732 return ERR(NULL_POINTER);
733 }
734
735 GetFrameCountClosure closure;
Alex Lightb1e31a82017-10-04 16:57:36 -0700736 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700737 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Light7ddc23d2017-09-22 15:33:41 -0700738 return ERR(THREAD_NOT_ALIVE);
739 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800740
741 *count_ptr = closure.count;
742 return ERR(NONE);
743}
744
745// Walks up the stack 'n' callers, when used with Thread::WalkStack.
746struct GetLocationVisitor : public art::StackVisitor {
747 GetLocationVisitor(art::Thread* thread, size_t n_in)
748 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
749 n(n_in),
750 count(0),
751 caller(nullptr),
752 caller_dex_pc(0) {}
753
Andreas Gampefa6a1b02018-09-07 08:11:55 -0700754 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800755 art::ArtMethod* m = GetMethod();
756 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
757 if (do_count) {
758 DCHECK(caller == nullptr);
759 if (count == n) {
760 caller = m;
761 caller_dex_pc = GetDexPc(false);
762 return false;
763 }
764 count++;
765 }
766 return true;
767 }
768
769 const size_t n;
770 size_t count;
771 art::ArtMethod* caller;
772 uint32_t caller_dex_pc;
773};
774
775struct GetLocationClosure : public art::Closure {
776 public:
777 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
778
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100779 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800780 GetLocationVisitor visitor(self, n);
781 visitor.WalkStack(false);
782
783 method = visitor.caller;
784 dex_pc = visitor.caller_dex_pc;
785 }
786
787 const size_t n;
788 art::ArtMethod* method;
789 uint32_t dex_pc;
790};
791
792jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
793 jthread java_thread,
794 jint depth,
795 jmethodID* method_ptr,
796 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700797 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
798 // that the thread isn't dying on us.
799 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700800 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700801
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800802 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700803 jvmtiError thread_error = ERR(INTERNAL);
804 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700805 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800806 return thread_error;
807 }
808 DCHECK(thread != nullptr);
809
Alex Light7ddc23d2017-09-22 15:33:41 -0700810 art::ThreadState state = thread->GetState();
811 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700812 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700813 return ERR(THREAD_NOT_ALIVE);
814 }
815
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800816 if (depth < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700817 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800818 return ERR(ILLEGAL_ARGUMENT);
819 }
820 if (method_ptr == nullptr || location_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700821 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800822 return ERR(NULL_POINTER);
823 }
824
825 GetLocationClosure closure(static_cast<size_t>(depth));
Alex Lightb1e31a82017-10-04 16:57:36 -0700826 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700827 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700828 return ERR(THREAD_NOT_ALIVE);
829 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800830
831 if (closure.method == nullptr) {
832 return ERR(NO_MORE_FRAMES);
833 }
834
835 *method_ptr = art::jni::EncodeArtMethod(closure.method);
Alex Light3dea2122017-10-11 15:56:48 +0000836 if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800837 *location_ptr = -1;
838 } else {
Andreas Gampee2abbc62017-09-15 11:59:26 -0700839 if (closure.dex_pc == art::dex::kDexNoIndex) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800840 return ERR(INTERNAL);
841 }
842 *location_ptr = static_cast<jlocation>(closure.dex_pc);
843 }
844
845 return ERR(NONE);
846}
847
Alex Light88e1ddd2017-08-21 13:09:55 -0700848struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
849 // We need a context because VisitLocks needs it retrieve the monitor objects.
850 explicit MonitorVisitor(art::Thread* thread)
851 REQUIRES_SHARED(art::Locks::mutator_lock_)
852 : art::StackVisitor(thread,
853 art::Context::Create(),
854 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
855 hs(art::Thread::Current()),
856 current_stack_depth(0) {}
857
858 ~MonitorVisitor() {
859 delete context_;
860 }
861
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100862 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700863 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
864 if (!GetMethod()->IsRuntimeMethod()) {
865 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
866 ++current_stack_depth;
867 }
868 return true;
869 }
870
871 static void AppendOwnedMonitors(art::mirror::Object* owned_monitor, void* arg)
872 REQUIRES_SHARED(art::Locks::mutator_lock_) {
873 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
874 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
875 art::ObjPtr<art::mirror::Object> mon(owned_monitor);
876 // Filter out duplicates.
877 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
878 if (monitor.Get() == mon.Ptr()) {
879 return;
880 }
881 }
882 visitor->monitors.push_back(visitor->hs.NewHandle(mon));
883 visitor->stack_depths.push_back(visitor->current_stack_depth);
884 }
885
886 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100887 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700888 for (const art::Handle<art::mirror::Object>& m : monitors) {
889 if (m.Get() == obj) {
890 return;
891 }
892 }
893 monitors.push_back(hs.NewHandle(obj));
894 stack_depths.push_back(-1);
895 }
896
897 art::VariableSizedHandleScope hs;
898 jint current_stack_depth;
899 std::vector<art::Handle<art::mirror::Object>> monitors;
900 std::vector<jint> stack_depths;
901};
902
903template<typename Fn>
904struct MonitorInfoClosure : public art::Closure {
905 public:
Alex Light318afe62018-03-22 16:50:10 -0700906 explicit MonitorInfoClosure(Fn handle_results)
907 : err_(OK), handle_results_(handle_results) {}
Alex Light88e1ddd2017-08-21 13:09:55 -0700908
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100909 void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700910 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
911 // Find the monitors on the stack.
912 MonitorVisitor visitor(target);
Andreas Gampe6e897762018-10-16 13:09:32 -0700913 visitor.WalkStack(/* include_transitions= */ false);
Alex Light88e1ddd2017-08-21 13:09:55 -0700914 // Find any other monitors, including ones acquired in native code.
915 art::RootInfo root_info(art::kRootVMInternal);
Ian Rogers55256cb2017-12-21 17:07:11 -0800916 target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
Alex Light318afe62018-03-22 16:50:10 -0700917 err_ = handle_results_(visitor);
Alex Light88e1ddd2017-08-21 13:09:55 -0700918 }
919
920 jvmtiError GetError() {
921 return err_;
922 }
923
924 private:
Alex Light88e1ddd2017-08-21 13:09:55 -0700925 jvmtiError err_;
926 Fn handle_results_;
927};
928
929
930template <typename Fn>
Alex Light318afe62018-03-22 16:50:10 -0700931static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
932 jthread thread,
933 Fn handle_results)
934 REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700935 art::Thread* self = art::Thread::Current();
Alex Light318afe62018-03-22 16:50:10 -0700936 MonitorInfoClosure<Fn> closure(handle_results);
Alex Light88e1ddd2017-08-21 13:09:55 -0700937 bool called_method = false;
938 {
Alex Lightb1e31a82017-10-04 16:57:36 -0700939 art::Locks::thread_list_lock_->ExclusiveLock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700940 art::Thread* target = nullptr;
941 jvmtiError err = ERR(INTERNAL);
942 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700943 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700944 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700945 }
946 if (target != self) {
947 called_method = true;
Alex Lightb1e31a82017-10-04 16:57:36 -0700948 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Lighta5cd4c02018-03-28 16:07:39 -0700949 // Since this deals with object references we need to avoid going to sleep.
950 art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage");
951 if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700952 return ERR(THREAD_NOT_ALIVE);
953 }
Alex Lightb1e31a82017-10-04 16:57:36 -0700954 } else {
955 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light88e1ddd2017-08-21 13:09:55 -0700956 }
957 }
958 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
959 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
960 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
961 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
962 if (!called_method) {
963 closure.Run(self);
964 }
965 return closure.GetError();
966}
967
968jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
969 jthread thread,
970 jint* info_cnt,
971 jvmtiMonitorStackDepthInfo** info_ptr) {
972 if (info_cnt == nullptr || info_ptr == nullptr) {
973 return ERR(NULL_POINTER);
974 }
Alex Light318afe62018-03-22 16:50:10 -0700975 art::ScopedObjectAccess soa(art::Thread::Current());
976 std::vector<art::GcRoot<art::mirror::Object>> mons;
977 std::vector<uint32_t> depths;
978 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700979 for (size_t i = 0; i < visitor.monitors.size(); i++) {
Alex Light318afe62018-03-22 16:50:10 -0700980 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
981 depths.push_back(visitor.stack_depths[i]);
Alex Light88e1ddd2017-08-21 13:09:55 -0700982 }
983 return OK;
984 };
Alex Light318afe62018-03-22 16:50:10 -0700985 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
986 if (err != OK) {
987 return err;
988 }
989 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
990 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
991 if (err != OK) {
992 return err;
993 }
994 *info_cnt = mons.size();
995 for (uint32_t i = 0; i < mons.size(); i++) {
996 (*info_ptr)[i] = {
997 soa.AddLocalReference<jobject>(mons[i].Read()),
998 static_cast<jint>(depths[i])
999 };
1000 }
1001 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -07001002}
1003
1004jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
1005 jthread thread,
1006 jint* owned_monitor_count_ptr,
1007 jobject** owned_monitors_ptr) {
Alex Light19a7d4f2018-03-23 10:05:49 -07001008 if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
Alex Light88e1ddd2017-08-21 13:09:55 -07001009 return ERR(NULL_POINTER);
1010 }
Alex Light318afe62018-03-22 16:50:10 -07001011 art::ScopedObjectAccess soa(art::Thread::Current());
1012 std::vector<art::GcRoot<art::mirror::Object>> mons;
1013 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -07001014 for (size_t i = 0; i < visitor.monitors.size(); i++) {
Alex Light318afe62018-03-22 16:50:10 -07001015 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
Alex Light88e1ddd2017-08-21 13:09:55 -07001016 }
1017 return OK;
1018 };
Alex Light318afe62018-03-22 16:50:10 -07001019 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
1020 if (err != OK) {
1021 return err;
1022 }
1023 auto nbytes = sizeof(jobject) * mons.size();
1024 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
1025 if (err != OK) {
1026 return err;
1027 }
1028 *owned_monitor_count_ptr = mons.size();
1029 for (uint32_t i = 0; i < mons.size(); i++) {
1030 (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
1031 }
1032 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -07001033}
1034
Alex Lighte814f9d2017-07-31 16:14:39 -07001035jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
1036 if (depth < 0) {
1037 return ERR(ILLEGAL_ARGUMENT);
1038 }
1039 ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
1040 art::Thread* self = art::Thread::Current();
1041 art::Thread* target;
1042 do {
1043 ThreadUtil::SuspendCheck(self);
1044 art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
1045 // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
1046 // user-code suspension. We retry and do another SuspendCheck to clear this.
1047 if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
1048 continue;
1049 }
1050 // From now on we know we cannot get suspended by user-code.
1051 // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1052 // have the 'suspend_lock' locked here.
1053 art::ScopedObjectAccess soa(self);
1054 art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
Alex Light7ddc23d2017-09-22 15:33:41 -07001055 jvmtiError err = ERR(INTERNAL);
1056 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1057 return err;
1058 }
1059 if (target != self) {
Alex Lighte814f9d2017-07-31 16:14:39 -07001060 // TODO This is part of the spec but we could easily avoid needing to do it. We would just put
1061 // all the logic into a sync-checkpoint.
1062 art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1063 if (target->GetUserCodeSuspendCount() == 0) {
1064 return ERR(THREAD_NOT_SUSPENDED);
1065 }
1066 }
1067 // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1068 // done (unless it's 'self' in which case we don't care since we aren't going to be returning).
1069 // TODO We could implement this using a synchronous checkpoint and not bother with any of the
1070 // suspension stuff. The spec does specifically say to return THREAD_NOT_SUSPENDED though.
1071 // Find the requested stack frame.
1072 std::unique_ptr<art::Context> context(art::Context::Create());
1073 FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1074 visitor.WalkStack();
1075 if (!visitor.FoundFrame()) {
1076 return ERR(NO_MORE_FRAMES);
1077 }
1078 art::ArtMethod* method = visitor.GetMethod();
1079 if (method->IsNative()) {
1080 return ERR(OPAQUE_FRAME);
1081 }
1082 // From here we are sure to succeed.
1083 bool needs_instrument = false;
1084 // Get/create a shadow frame
Alex Light0aa7a5a2018-10-10 15:58:14 +00001085 art::ShadowFrame* shadow_frame = visitor.GetOrCreateShadowFrame(&needs_instrument);
Alex Lightb6106d52017-10-18 15:02:15 -07001086 {
1087 art::WriterMutexLock lk(self, tienv->event_info_mutex_);
1088 // Mark shadow frame as needs_notify_pop_
1089 shadow_frame->SetNotifyPop(true);
1090 tienv->notify_frames.insert(shadow_frame);
1091 }
Alex Lighte814f9d2017-07-31 16:14:39 -07001092 // Make sure can we will go to the interpreter and use the shadow frames.
1093 if (needs_instrument) {
1094 art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
1095 }
1096 return OK;
1097 } while (true);
1098}
1099
Alex Light0aa7a5a2018-10-10 15:58:14 +00001100jvmtiError StackUtil::PopFrame(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread thread) {
1101 art::Thread* self = art::Thread::Current();
1102 art::Thread* target;
1103 do {
1104 ThreadUtil::SuspendCheck(self);
1105 art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
1106 // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
1107 // user-code suspension. We retry and do another SuspendCheck to clear this.
1108 if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
1109 continue;
1110 }
1111 // From now on we know we cannot get suspended by user-code.
1112 // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1113 // have the 'suspend_lock' locked here.
1114 art::ScopedObjectAccess soa(self);
1115 art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
1116 jvmtiError err = ERR(INTERNAL);
1117 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1118 return err;
1119 }
1120 {
1121 art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1122 if (target == self || target->GetUserCodeSuspendCount() == 0) {
1123 // We cannot be the current thread for this function.
1124 return ERR(THREAD_NOT_SUSPENDED);
1125 }
1126 }
1127 JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target);
1128 constexpr art::StackVisitor::StackWalkKind kWalkKind =
1129 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
1130 if (tls_data != nullptr &&
1131 tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
1132 tls_data->disable_pop_frame_depth == art::StackVisitor::ComputeNumFrames(target,
1133 kWalkKind)) {
1134 LOG(WARNING) << "Disallowing frame pop due to in-progress class-load/prepare. Frame at depth "
1135 << tls_data->disable_pop_frame_depth << " was marked as un-poppable by the "
1136 << "jvmti plugin. See b/117615146 for more information.";
1137 return ERR(OPAQUE_FRAME);
1138 }
1139 // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1140 // done.
1141 std::unique_ptr<art::Context> context(art::Context::Create());
1142 FindFrameAtDepthVisitor final_frame(target, context.get(), 0);
1143 FindFrameAtDepthVisitor penultimate_frame(target, context.get(), 1);
1144 final_frame.WalkStack();
1145 penultimate_frame.WalkStack();
1146
1147 if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
1148 // Cannot do it if there is only one frame!
1149 return ERR(NO_MORE_FRAMES);
1150 }
1151
1152 art::ArtMethod* called_method = final_frame.GetMethod();
1153 art::ArtMethod* calling_method = penultimate_frame.GetMethod();
1154 if (calling_method->IsNative() || called_method->IsNative()) {
1155 return ERR(OPAQUE_FRAME);
1156 }
1157 // From here we are sure to succeed.
1158
1159 // Get/create a shadow frame
1160 bool created_final_frame = false;
1161 bool created_penultimate_frame = false;
1162 art::ShadowFrame* called_shadow_frame =
1163 final_frame.GetOrCreateShadowFrame(&created_final_frame);
1164 art::ShadowFrame* calling_shadow_frame =
1165 penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame);
1166
1167 CHECK_NE(called_shadow_frame, calling_shadow_frame)
1168 << "Frames at different depths not different!";
1169
1170 // Tell the shadow-frame to return immediately and skip all exit events.
1171 called_shadow_frame->SetForcePopFrame(true);
1172 calling_shadow_frame->SetForceRetryInstruction(true);
1173
1174 // Make sure can we will go to the interpreter and use the shadow frames. The early return for
1175 // the final frame will force everything to the interpreter so we only need to instrument if it
1176 // was not present.
1177 if (created_final_frame) {
1178 DeoptManager::Get()->DeoptimizeThread(target);
1179 }
1180 return OK;
1181 } while (true);
1182}
1183
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001184} // namespace openjdkjvmti