blob: 75f055652ee2e5107bebe7a8d4504ccb06d9f6ec [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampee5d23982019-01-08 10:34:26 -080039#include "arch/context.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070040#include "art_field-inl.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070041#include "art_method-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070042#include "art_jvmti.h"
Steven Morelande431e272017-07-18 16:53:49 -070043#include "art_method-inl.h"
Andreas Gampe6237cd32017-06-22 22:17:38 -070044#include "barrier.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080045#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070046#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080047#include "base/mutex.h"
Alex Lighta4cdd362019-04-18 09:17:10 -070048#include "deopt_manager.h"
David Sehr9e734c72018-01-04 17:56:19 -080049#include "dex/code_item_accessors-inl.h"
50#include "dex/dex_file.h"
51#include "dex/dex_file_annotations.h"
52#include "dex/dex_file_types.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070053#include "gc_root.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080054#include "handle_scope-inl.h"
Vladimir Markoa3ad0cd2018-05-04 10:06:38 +010055#include "jni/jni_env_ext.h"
56#include "jni/jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070057#include "mirror/class.h"
58#include "mirror/dex_cache.h"
Andreas Gampe373a9b52017-10-18 09:01:57 -070059#include "nativehelper/scoped_local_ref.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070060#include "scoped_thread_state_change-inl.h"
61#include "stack.h"
Alex Lightae45cbb2018-10-18 15:49:56 -070062#include "ti_logging.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070063#include "ti_thread.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070064#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080065#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070066#include "thread_pool.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070067#include "ti_thread.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070068#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070069
70namespace openjdkjvmti {
71
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070072template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070073struct GetStackTraceVisitor : public art::StackVisitor {
74 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070075 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070076 size_t stop_,
77 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070078 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070079 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070080 start(start_),
81 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070082 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
Andreas Gampe44b31742018-10-01 19:30:57 -070083 GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070084
Andreas Gampefa6a1b02018-09-07 08:11:55 -070085 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070086 art::ArtMethod* m = GetMethod();
87 if (m->IsRuntimeMethod()) {
88 return true;
89 }
90
91 if (start == 0) {
92 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080093 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070094
Andreas Gampe2340e3f2016-12-12 19:37:19 -080095 uint32_t dex_pc = GetDexPc(false);
Andreas Gampee2abbc62017-09-15 11:59:26 -070096 jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070097
Andreas Gampe2340e3f2016-12-12 19:37:19 -080098 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070099 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700100
101 if (stop == 1) {
102 return false; // We're done.
103 } else if (stop > 0) {
104 stop--;
105 }
106 } else {
107 start--;
108 }
109
110 return true;
111 }
112
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700113 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700114 size_t start;
115 size_t stop;
116};
117
Alex Light0aa7a5a2018-10-10 15:58:14 +0000118art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) {
119 art::ShadowFrame* cur = GetCurrentShadowFrame();
120 if (cur == nullptr) {
121 *created_frame = true;
122 art::ArtMethod* method = GetMethod();
123 const uint16_t num_regs = method->DexInstructionData().RegistersSize();
124 cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(),
125 num_regs,
126 method,
127 GetDexPc());
128 DCHECK(cur != nullptr);
129 } else {
130 *created_frame = false;
131 }
132 return cur;
133}
134
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700135template <typename FrameFn>
136GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
137 size_t start,
138 size_t stop,
139 FrameFn fn) {
140 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
141}
142
143struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700144 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700145 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700146 : start_input(start),
147 stop_input(stop),
148 start_result(0),
149 stop_result(0) {}
150
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100151 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700152 auto frames_fn = [&](jvmtiFrameInfo info) {
153 frames.push_back(info);
154 };
155 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700156 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700157
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700158 start_result = visitor.start;
159 stop_result = visitor.stop;
160 }
161
162 const size_t start_input;
163 const size_t stop_input;
164
165 std::vector<jvmtiFrameInfo> frames;
166 size_t start_result;
167 size_t stop_result;
168};
169
Andreas Gampea1a27c62017-01-11 16:37:16 -0800170static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
171 jint start_depth,
172 size_t start_result,
173 jint max_frame_count,
174 jvmtiFrameInfo* frame_buffer,
175 jint* count_ptr) {
176 size_t collected_frames = frames.size();
177
178 // Assume we're here having collected something.
179 DCHECK_GT(max_frame_count, 0);
180
181 // Frames from the top.
182 if (start_depth >= 0) {
183 if (start_result != 0) {
184 // Not enough frames.
185 return ERR(ILLEGAL_ARGUMENT);
186 }
187 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
188 if (frames.size() > 0) {
189 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
190 }
191 *count_ptr = static_cast<jint>(frames.size());
192 return ERR(NONE);
193 }
194
195 // Frames from the bottom.
196 if (collected_frames < static_cast<size_t>(-start_depth)) {
197 return ERR(ILLEGAL_ARGUMENT);
198 }
199
200 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
201 memcpy(frame_buffer,
202 &frames.data()[collected_frames + start_depth],
203 count * sizeof(jvmtiFrameInfo));
204 *count_ptr = static_cast<jint>(count);
205 return ERR(NONE);
206}
207
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700208struct GetStackTraceDirectClosure : public art::Closure {
209 public:
210 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
211 : frame_buffer(frame_buffer_),
212 start_input(start),
213 stop_input(stop),
214 index(0) {
215 DCHECK_GE(start_input, 0u);
216 }
217
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100218 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700219 auto frames_fn = [&](jvmtiFrameInfo info) {
220 frame_buffer[index] = info;
221 ++index;
222 };
223 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700224 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700225 }
226
227 jvmtiFrameInfo* frame_buffer;
228
229 const size_t start_input;
230 const size_t stop_input;
231
232 size_t index = 0;
233};
234
Alex Light342b6942019-05-07 15:28:39 -0700235jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700236 jthread java_thread,
237 jint start_depth,
238 jint max_frame_count,
239 jvmtiFrameInfo* frame_buffer,
240 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700241 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
242 // that the thread isn't dying on us.
243 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700244 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700245
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700246 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700247 jvmtiError thread_error = ERR(INTERNAL);
248 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700249 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800250 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700251 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800252 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700253
254 art::ThreadState state = thread->GetState();
Alex Light7ddc23d2017-09-22 15:33:41 -0700255 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700256 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700257 return ERR(THREAD_NOT_ALIVE);
258 }
259
260 if (max_frame_count < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700261 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700262 return ERR(ILLEGAL_ARGUMENT);
263 }
264 if (frame_buffer == nullptr || count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700265 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700266 return ERR(NULL_POINTER);
267 }
268
269 if (max_frame_count == 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700270 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700271 *count_ptr = 0;
272 return ERR(NONE);
273 }
274
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700275 if (start_depth >= 0) {
276 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
277 GetStackTraceDirectClosure closure(frame_buffer,
278 static_cast<size_t>(start_depth),
279 static_cast<size_t>(max_frame_count));
Alex Lightb1e31a82017-10-04 16:57:36 -0700280 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700281 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700282 return ERR(THREAD_NOT_ALIVE);
283 }
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700284 *count_ptr = static_cast<jint>(closure.index);
Alex Light342b6942019-05-07 15:28:39 -0700285 if (closure.index == 0) {
286 JVMTI_LOG(INFO, jvmti_env) << "The stack is not large enough for a start_depth of "
287 << start_depth << ".";
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700288 return ERR(ILLEGAL_ARGUMENT);
289 }
290 return ERR(NONE);
Alex Lightb1e31a82017-10-04 16:57:36 -0700291 } else {
292 GetStackTraceVectorClosure closure(0, 0);
293 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700294 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700295 return ERR(THREAD_NOT_ALIVE);
296 }
297
298 return TranslateFrameVector(closure.frames,
299 start_depth,
300 closure.start_result,
301 max_frame_count,
302 frame_buffer,
303 count_ptr);
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700304 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800305}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700306
Andreas Gampef1221a12017-06-21 21:20:47 -0700307template <typename Data>
308struct GetAllStackTracesVectorClosure : public art::Closure {
Andreas Gampe6237cd32017-06-22 22:17:38 -0700309 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
310 : barrier(0), stop_input(stop), data(data_) {}
Andreas Gampef1221a12017-06-21 21:20:47 -0700311
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100312 void Run(art::Thread* thread) override
Andreas Gampef1221a12017-06-21 21:20:47 -0700313 REQUIRES_SHARED(art::Locks::mutator_lock_)
314 REQUIRES(!data->mutex) {
315 art::Thread* self = art::Thread::Current();
Andreas Gampe6237cd32017-06-22 22:17:38 -0700316 Work(thread, self);
317 barrier.Pass(self);
318 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700319
Andreas Gampe6237cd32017-06-22 22:17:38 -0700320 void Work(art::Thread* thread, art::Thread* self)
321 REQUIRES_SHARED(art::Locks::mutator_lock_)
322 REQUIRES(!data->mutex) {
Andreas Gampef1221a12017-06-21 21:20:47 -0700323 // Skip threads that are still starting.
324 if (thread->IsStillStarting()) {
325 return;
326 }
327
328 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
329 if (thread_frames == nullptr) {
330 return;
331 }
332
333 // Now collect the data.
334 auto frames_fn = [&](jvmtiFrameInfo info) {
335 thread_frames->push_back(info);
336 };
337 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700338 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampef1221a12017-06-21 21:20:47 -0700339 }
340
Andreas Gampe6237cd32017-06-22 22:17:38 -0700341 art::Barrier barrier;
Andreas Gampef1221a12017-06-21 21:20:47 -0700342 const size_t stop_input;
343 Data* data;
344};
345
Andreas Gampe6237cd32017-06-22 22:17:38 -0700346template <typename Data>
Andreas Gampe6baa1c92018-05-25 16:17:49 -0700347static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
348 REQUIRES_SHARED(art::Locks::mutator_lock_) {
349 // Note: requires the mutator lock as the checkpoint requires the mutator lock.
Andreas Gampe6237cd32017-06-22 22:17:38 -0700350 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
351 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
352 if (barrier_count == 0) {
353 return;
354 }
355 art::Thread* self = art::Thread::Current();
356 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
357 closure.barrier.Increment(self, barrier_count);
358}
359
Andreas Gampea1a27c62017-01-11 16:37:16 -0800360jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
361 jint max_frame_count,
362 jvmtiStackInfo** stack_info_ptr,
363 jint* thread_count_ptr) {
364 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700365 return ERR(ILLEGAL_ARGUMENT);
366 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800367 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
368 return ERR(NULL_POINTER);
369 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700370
Andreas Gampef1221a12017-06-21 21:20:47 -0700371 struct AllStackTracesData {
372 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
373 ~AllStackTracesData() {
374 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
375 for (jthread global_thread_ref : thread_peers) {
376 jni_env->DeleteGlobalRef(global_thread_ref);
377 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800378 }
379
Andreas Gampef1221a12017-06-21 21:20:47 -0700380 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
381 REQUIRES_SHARED(art::Locks::mutator_lock_)
382 REQUIRES(!mutex) {
383 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800384
385 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800386
Andreas Gampef1221a12017-06-21 21:20:47 -0700387 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
388 self, thread->GetPeerFromOtherThread());
389 thread_peers.push_back(peer);
390
Andreas Gampead9173d2017-06-22 16:33:08 -0700391 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
392 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700393 }
394
395 art::Mutex mutex;
396
397 // Storage. Only access directly after completion.
398
399 std::vector<art::Thread*> threads;
400 // "thread_peers" contains global references to their peers.
401 std::vector<jthread> thread_peers;
402
Andreas Gampead9173d2017-06-22 16:33:08 -0700403 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700404 };
405
406 AllStackTracesData data;
Andreas Gampef1221a12017-06-21 21:20:47 -0700407 art::Thread* current = art::Thread::Current();
Andreas Gampe6baa1c92018-05-25 16:17:49 -0700408 {
409 art::ScopedObjectAccess soa(current);
410 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
411 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700412
413 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800414
415 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
416 // allocate one big chunk for this and the actual frames, which means we need
417 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700418 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800419 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700420 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800421
422 // Now run through and add data for each thread.
423 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700424 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800425 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
426 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
427
Andreas Gampead9173d2017-06-22 16:33:08 -0700428 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800429
Andreas Gampef1221a12017-06-21 21:20:47 -0700430 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800431 stack_info.thread = nullptr;
432 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
433
434 size_t collected_frames = thread_frames.size();
435 if (max_frame_count == 0 || collected_frames == 0) {
436 stack_info.frame_count = 0;
437 stack_info.frame_buffer = nullptr;
438 continue;
439 }
440 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
441
442 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
443 frame_infos.emplace_back(frame_info);
444
445 jint count;
446 jvmtiError translate_result = TranslateFrameVector(thread_frames,
447 0,
448 0,
449 static_cast<jint>(collected_frames),
450 frame_info,
451 &count);
452 DCHECK(translate_result == JVMTI_ERROR_NONE);
453 stack_info.frame_count = static_cast<jint>(collected_frames);
454 stack_info.frame_buffer = frame_info;
455 sum_frames += static_cast<size_t>(count);
456 }
457
458 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700459 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800460 alignof(jvmtiFrameInfo));
461 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
462 unsigned char* chunk_data;
463 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
464 if (alloc_result != ERR(NONE)) {
465 return alloc_result;
466 }
467
468 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
469 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700470 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800471
472 // Now copy the frames and fix up the pointers.
473 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
474 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700475 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800476 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
477 jvmtiStackInfo& new_stack_info = stack_info[i];
478
Andreas Gampef1221a12017-06-21 21:20:47 -0700479 // Translate the global ref into a local ref.
480 new_stack_info.thread =
481 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800482
483 if (old_stack_info.frame_count > 0) {
484 // Only copy when there's data - leave the nullptr alone.
485 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
486 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
487 new_stack_info.frame_buffer = frame_info;
488 frame_info += old_stack_info.frame_count;
489 }
490 }
491
492 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700493 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800494
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700495 return ERR(NONE);
496}
497
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800498jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
499 jint thread_count,
500 const jthread* thread_list,
501 jint max_frame_count,
502 jvmtiStackInfo** stack_info_ptr) {
503 if (max_frame_count < 0) {
504 return ERR(ILLEGAL_ARGUMENT);
505 }
506 if (thread_count < 0) {
507 return ERR(ILLEGAL_ARGUMENT);
508 }
509 if (thread_count == 0) {
510 *stack_info_ptr = nullptr;
511 return ERR(NONE);
512 }
Alex Light19a7d4f2018-03-23 10:05:49 -0700513 if (thread_list == nullptr || stack_info_ptr == nullptr) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800514 return ERR(NULL_POINTER);
515 }
516
517 art::Thread* current = art::Thread::Current();
518 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
519
Andreas Gampef1221a12017-06-21 21:20:47 -0700520 struct SelectStackTracesData {
521 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
522
523 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
524 REQUIRES_SHARED(art::Locks::mutator_lock_)
525 REQUIRES(!mutex) {
526 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
527 for (size_t index = 0; index != handles.size(); ++index) {
528 if (peer == handles[index].Get()) {
529 // Found the thread.
530 art::MutexLock mu(self, mutex);
531
532 threads.push_back(thread);
533 thread_list_indices.push_back(index);
534
Andreas Gampead9173d2017-06-22 16:33:08 -0700535 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
536 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700537 }
538 }
539 return nullptr;
540 }
541
542 art::Mutex mutex;
543
544 // Selection data.
545
546 std::vector<art::Handle<art::mirror::Object>> handles;
547
548 // Storage. Only access directly after completion.
549
550 std::vector<art::Thread*> threads;
551 std::vector<size_t> thread_list_indices;
552
Andreas Gampead9173d2017-06-22 16:33:08 -0700553 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700554 };
555
556 SelectStackTracesData data;
557
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800558 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
559 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800560 for (jint i = 0; i != thread_count; ++i) {
561 if (thread_list[i] == nullptr) {
562 return ERR(INVALID_THREAD);
563 }
564 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
565 return ERR(INVALID_THREAD);
566 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700567 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800568 }
569
Andreas Gampe6237cd32017-06-22 22:17:38 -0700570 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800571
572 // Convert the data into our output format.
573
574 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
575 // allocate one big chunk for this and the actual frames, which means we need
576 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700577 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800578 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700579 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800580
581 // Now run through and add data for each thread.
582 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700583 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800584 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
585 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
586
Andreas Gampef1221a12017-06-21 21:20:47 -0700587 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700588 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800589
590 // For the time being, set the thread to null. We don't have good ScopedLocalRef
591 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000592 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800593 stack_info.thread = nullptr;
594 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
595
596 size_t collected_frames = thread_frames.size();
597 if (max_frame_count == 0 || collected_frames == 0) {
598 stack_info.frame_count = 0;
599 stack_info.frame_buffer = nullptr;
600 continue;
601 }
602 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
603
604 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
605 frame_infos.emplace_back(frame_info);
606
607 jint count;
608 jvmtiError translate_result = TranslateFrameVector(thread_frames,
609 0,
610 0,
611 static_cast<jint>(collected_frames),
612 frame_info,
613 &count);
614 DCHECK(translate_result == JVMTI_ERROR_NONE);
615 stack_info.frame_count = static_cast<jint>(collected_frames);
616 stack_info.frame_buffer = frame_info;
617 sum_frames += static_cast<size_t>(count);
618 }
619
620 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
621 // potentially.
622 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
623 alignof(jvmtiFrameInfo));
624 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
625 unsigned char* chunk_data;
626 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
627 if (alloc_result != ERR(NONE)) {
628 return alloc_result;
629 }
630
631 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
632 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
633 chunk_data + rounded_stack_info_size);
634
635 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
636 // Check whether we found a running thread for this.
637 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
638 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700639 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
640 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800641 // No native thread. Must be new or dead. We need to fill out the stack info now.
642 // (Need to read the Java "started" field to know whether this is starting or terminated.)
643 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
644 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
645 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
646 CHECK(started_field != nullptr);
647 bool started = started_field->GetBoolean(peer) != 0;
648 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
649 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
650 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
651 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
652 stack_info[i].state = started ? kTerminatedState : kStartedState;
653 stack_info[i].frame_count = 0;
654 stack_info[i].frame_buffer = nullptr;
655 } else {
656 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700657 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800658
659 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
660 jvmtiStackInfo& new_stack_info = stack_info[i];
661
662 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
663 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
664 if (old_stack_info.frame_count > 0) {
665 // Only copy when there's data - leave the nullptr alone.
666 size_t frames_size =
667 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
668 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
669 new_stack_info.frame_buffer = frame_info;
670 frame_info += old_stack_info.frame_count;
671 }
672 }
673 }
674
Andreas Gampef1221a12017-06-21 21:20:47 -0700675 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800676
677 return ERR(NONE);
678}
679
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800680struct GetFrameCountClosure : public art::Closure {
681 public:
682 GetFrameCountClosure() : count(0) {}
683
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100684 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampec7d878d2018-11-19 18:42:06 +0000685 // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be
686 // counted.
687 art::StackVisitor::WalkStack(
688 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
689 art::ArtMethod* m = stack_visitor->GetMethod();
690 if (m != nullptr && !m->IsRuntimeMethod()) {
691 count++;
692 }
693 return true;
694 },
695 self,
696 /* context= */ nullptr,
697 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800698 }
699
700 size_t count;
701};
702
703jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
704 jthread java_thread,
705 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700706 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
707 // that the thread isn't dying on us.
708 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700709 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700710
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800711 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700712 jvmtiError thread_error = ERR(INTERNAL);
713 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700714 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800715 return thread_error;
716 }
Alex Light7ddc23d2017-09-22 15:33:41 -0700717
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800718 DCHECK(thread != nullptr);
Alex Light7ddc23d2017-09-22 15:33:41 -0700719 art::ThreadState state = thread->GetState();
720 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700721 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700722 return ERR(THREAD_NOT_ALIVE);
723 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800724
725 if (count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700726 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800727 return ERR(NULL_POINTER);
728 }
729
730 GetFrameCountClosure closure;
Alex Lightb1e31a82017-10-04 16:57:36 -0700731 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700732 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Light7ddc23d2017-09-22 15:33:41 -0700733 return ERR(THREAD_NOT_ALIVE);
734 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800735
736 *count_ptr = closure.count;
737 return ERR(NONE);
738}
739
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800740struct GetLocationClosure : public art::Closure {
741 public:
742 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
743
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100744 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampec7d878d2018-11-19 18:42:06 +0000745 // Walks up the stack 'n' callers.
746 size_t count = 0u;
747 art::StackVisitor::WalkStack(
748 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
749 art::ArtMethod* m = stack_visitor->GetMethod();
750 if (m != nullptr && !m->IsRuntimeMethod()) {
751 DCHECK(method == nullptr);
752 if (count == n) {
753 method = m;
754 dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false);
755 return false;
756 }
757 count++;
758 }
759 return true;
760 },
761 self,
762 /* context= */ nullptr,
763 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800764 }
765
766 const size_t n;
767 art::ArtMethod* method;
768 uint32_t dex_pc;
769};
770
771jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
772 jthread java_thread,
773 jint depth,
774 jmethodID* method_ptr,
775 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700776 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
777 // that the thread isn't dying on us.
778 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700779 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700780
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800781 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700782 jvmtiError thread_error = ERR(INTERNAL);
783 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700784 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800785 return thread_error;
786 }
787 DCHECK(thread != nullptr);
788
Alex Light7ddc23d2017-09-22 15:33:41 -0700789 art::ThreadState state = thread->GetState();
790 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700791 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700792 return ERR(THREAD_NOT_ALIVE);
793 }
794
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800795 if (depth < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700796 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800797 return ERR(ILLEGAL_ARGUMENT);
798 }
799 if (method_ptr == nullptr || location_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700800 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800801 return ERR(NULL_POINTER);
802 }
803
804 GetLocationClosure closure(static_cast<size_t>(depth));
Alex Lightb1e31a82017-10-04 16:57:36 -0700805 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700806 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700807 return ERR(THREAD_NOT_ALIVE);
808 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800809
810 if (closure.method == nullptr) {
811 return ERR(NO_MORE_FRAMES);
812 }
813
814 *method_ptr = art::jni::EncodeArtMethod(closure.method);
Alex Light3dea2122017-10-11 15:56:48 +0000815 if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800816 *location_ptr = -1;
817 } else {
Andreas Gampee2abbc62017-09-15 11:59:26 -0700818 if (closure.dex_pc == art::dex::kDexNoIndex) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800819 return ERR(INTERNAL);
820 }
821 *location_ptr = static_cast<jlocation>(closure.dex_pc);
822 }
823
824 return ERR(NONE);
825}
826
Alex Light88e1ddd2017-08-21 13:09:55 -0700827struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
828 // We need a context because VisitLocks needs it retrieve the monitor objects.
829 explicit MonitorVisitor(art::Thread* thread)
830 REQUIRES_SHARED(art::Locks::mutator_lock_)
831 : art::StackVisitor(thread,
832 art::Context::Create(),
833 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
834 hs(art::Thread::Current()),
835 current_stack_depth(0) {}
836
837 ~MonitorVisitor() {
838 delete context_;
839 }
840
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100841 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700842 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
843 if (!GetMethod()->IsRuntimeMethod()) {
844 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
845 ++current_stack_depth;
846 }
847 return true;
848 }
849
Vladimir Markof52d92f2019-03-29 12:33:02 +0000850 static void AppendOwnedMonitors(art::ObjPtr<art::mirror::Object> owned_monitor, void* arg)
Alex Light88e1ddd2017-08-21 13:09:55 -0700851 REQUIRES_SHARED(art::Locks::mutator_lock_) {
852 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
853 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
Alex Light88e1ddd2017-08-21 13:09:55 -0700854 // Filter out duplicates.
855 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
Vladimir Markof52d92f2019-03-29 12:33:02 +0000856 if (monitor.Get() == owned_monitor) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700857 return;
858 }
859 }
Vladimir Markof52d92f2019-03-29 12:33:02 +0000860 visitor->monitors.push_back(visitor->hs.NewHandle(owned_monitor));
Alex Light88e1ddd2017-08-21 13:09:55 -0700861 visitor->stack_depths.push_back(visitor->current_stack_depth);
862 }
863
864 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100865 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700866 for (const art::Handle<art::mirror::Object>& m : monitors) {
867 if (m.Get() == obj) {
868 return;
869 }
870 }
871 monitors.push_back(hs.NewHandle(obj));
872 stack_depths.push_back(-1);
873 }
874
875 art::VariableSizedHandleScope hs;
876 jint current_stack_depth;
877 std::vector<art::Handle<art::mirror::Object>> monitors;
878 std::vector<jint> stack_depths;
879};
880
881template<typename Fn>
882struct MonitorInfoClosure : public art::Closure {
883 public:
Alex Light318afe62018-03-22 16:50:10 -0700884 explicit MonitorInfoClosure(Fn handle_results)
885 : err_(OK), handle_results_(handle_results) {}
Alex Light88e1ddd2017-08-21 13:09:55 -0700886
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100887 void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700888 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
889 // Find the monitors on the stack.
890 MonitorVisitor visitor(target);
Andreas Gampe6e897762018-10-16 13:09:32 -0700891 visitor.WalkStack(/* include_transitions= */ false);
Alex Light88e1ddd2017-08-21 13:09:55 -0700892 // Find any other monitors, including ones acquired in native code.
893 art::RootInfo root_info(art::kRootVMInternal);
Ian Rogers55256cb2017-12-21 17:07:11 -0800894 target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
Alex Light318afe62018-03-22 16:50:10 -0700895 err_ = handle_results_(visitor);
Alex Light88e1ddd2017-08-21 13:09:55 -0700896 }
897
898 jvmtiError GetError() {
899 return err_;
900 }
901
902 private:
Alex Light88e1ddd2017-08-21 13:09:55 -0700903 jvmtiError err_;
904 Fn handle_results_;
905};
906
907
908template <typename Fn>
Alex Light318afe62018-03-22 16:50:10 -0700909static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
910 jthread thread,
911 Fn handle_results)
912 REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700913 art::Thread* self = art::Thread::Current();
Alex Light318afe62018-03-22 16:50:10 -0700914 MonitorInfoClosure<Fn> closure(handle_results);
Alex Light88e1ddd2017-08-21 13:09:55 -0700915 bool called_method = false;
916 {
Alex Lightb1e31a82017-10-04 16:57:36 -0700917 art::Locks::thread_list_lock_->ExclusiveLock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700918 art::Thread* target = nullptr;
919 jvmtiError err = ERR(INTERNAL);
920 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700921 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700922 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700923 }
924 if (target != self) {
925 called_method = true;
Alex Lightb1e31a82017-10-04 16:57:36 -0700926 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Lighta5cd4c02018-03-28 16:07:39 -0700927 // Since this deals with object references we need to avoid going to sleep.
928 art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage");
929 if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700930 return ERR(THREAD_NOT_ALIVE);
931 }
Alex Lightb1e31a82017-10-04 16:57:36 -0700932 } else {
933 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light88e1ddd2017-08-21 13:09:55 -0700934 }
935 }
936 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
937 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
938 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
939 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
940 if (!called_method) {
941 closure.Run(self);
942 }
943 return closure.GetError();
944}
945
946jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
947 jthread thread,
948 jint* info_cnt,
949 jvmtiMonitorStackDepthInfo** info_ptr) {
950 if (info_cnt == nullptr || info_ptr == nullptr) {
951 return ERR(NULL_POINTER);
952 }
Alex Light318afe62018-03-22 16:50:10 -0700953 art::ScopedObjectAccess soa(art::Thread::Current());
954 std::vector<art::GcRoot<art::mirror::Object>> mons;
955 std::vector<uint32_t> depths;
956 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700957 for (size_t i = 0; i < visitor.monitors.size(); i++) {
Alex Light318afe62018-03-22 16:50:10 -0700958 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
959 depths.push_back(visitor.stack_depths[i]);
Alex Light88e1ddd2017-08-21 13:09:55 -0700960 }
961 return OK;
962 };
Alex Light318afe62018-03-22 16:50:10 -0700963 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
964 if (err != OK) {
965 return err;
966 }
967 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
968 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
969 if (err != OK) {
970 return err;
971 }
972 *info_cnt = mons.size();
973 for (uint32_t i = 0; i < mons.size(); i++) {
974 (*info_ptr)[i] = {
975 soa.AddLocalReference<jobject>(mons[i].Read()),
976 static_cast<jint>(depths[i])
977 };
978 }
979 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700980}
981
982jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
983 jthread thread,
984 jint* owned_monitor_count_ptr,
985 jobject** owned_monitors_ptr) {
Alex Light19a7d4f2018-03-23 10:05:49 -0700986 if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700987 return ERR(NULL_POINTER);
988 }
Alex Light318afe62018-03-22 16:50:10 -0700989 art::ScopedObjectAccess soa(art::Thread::Current());
990 std::vector<art::GcRoot<art::mirror::Object>> mons;
991 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700992 for (size_t i = 0; i < visitor.monitors.size(); i++) {
Alex Light318afe62018-03-22 16:50:10 -0700993 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
Alex Light88e1ddd2017-08-21 13:09:55 -0700994 }
995 return OK;
996 };
Alex Light318afe62018-03-22 16:50:10 -0700997 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
998 if (err != OK) {
999 return err;
1000 }
1001 auto nbytes = sizeof(jobject) * mons.size();
1002 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
1003 if (err != OK) {
1004 return err;
1005 }
1006 *owned_monitor_count_ptr = mons.size();
1007 for (uint32_t i = 0; i < mons.size(); i++) {
1008 (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
1009 }
1010 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -07001011}
1012
Alex Lighte814f9d2017-07-31 16:14:39 -07001013jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
1014 if (depth < 0) {
1015 return ERR(ILLEGAL_ARGUMENT);
1016 }
1017 ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
1018 art::Thread* self = art::Thread::Current();
1019 art::Thread* target;
Alex Lightc723b812019-04-08 16:13:24 +00001020
Alex Light679dec12019-04-08 16:30:14 +00001021 ScopedNoUserCodeSuspension snucs(self);
1022 // From now on we know we cannot get suspended by user-code.
1023 // NB This does a SuspendCheck (during thread state change) so we need to make
1024 // sure we don't have the 'suspend_lock' locked here.
1025 art::ScopedObjectAccess soa(self);
Alex Lighta4cdd362019-04-18 09:17:10 -07001026 art::Locks::thread_list_lock_->ExclusiveLock(self);
Alex Light679dec12019-04-08 16:30:14 +00001027 jvmtiError err = ERR(INTERNAL);
1028 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001029 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001030 return err;
1031 }
1032 if (target != self) {
1033 // TODO This is part of the spec but we could easily avoid needing to do it.
1034 // We would just put all the logic into a sync-checkpoint.
Alex Lighta4cdd362019-04-18 09:17:10 -07001035 art::Locks::thread_suspend_count_lock_->ExclusiveLock(self);
Alex Light679dec12019-04-08 16:30:14 +00001036 if (target->GetUserCodeSuspendCount() == 0) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001037 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1038 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001039 return ERR(THREAD_NOT_SUSPENDED);
Alex Lightc723b812019-04-08 16:13:24 +00001040 }
Alex Lighta4cdd362019-04-18 09:17:10 -07001041 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001042 }
1043 // We hold the user_code_suspension_lock_ so the target thread is staying
1044 // suspended until we are done (unless it's 'self' in which case we don't care
1045 // since we aren't going to be returning).
1046 // TODO We could implement this using a synchronous checkpoint and not bother
1047 // with any of the suspension stuff. The spec does specifically say to return
1048 // THREAD_NOT_SUSPENDED though. Find the requested stack frame.
1049 std::unique_ptr<art::Context> context(art::Context::Create());
1050 FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1051 visitor.WalkStack();
1052 if (!visitor.FoundFrame()) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001053 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001054 return ERR(NO_MORE_FRAMES);
1055 }
1056 art::ArtMethod* method = visitor.GetMethod();
1057 if (method->IsNative()) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001058 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001059 return ERR(OPAQUE_FRAME);
1060 }
1061 // From here we are sure to succeed.
1062 bool needs_instrument = false;
1063 // Get/create a shadow frame
1064 art::ShadowFrame* shadow_frame =
1065 visitor.GetOrCreateShadowFrame(&needs_instrument);
1066 {
1067 art::WriterMutexLock lk(self, tienv->event_info_mutex_);
1068 if (LIKELY(!shadow_frame->NeedsNotifyPop())) {
1069 // Ensure we won't miss exceptions being thrown if we get jit-compiled. We
1070 // only do this for the first NotifyPopFrame.
1071 target->IncrementForceInterpreterCount();
1072
1073 // Mark shadow frame as needs_notify_pop_
1074 shadow_frame->SetNotifyPop(true);
Alex Lightc723b812019-04-08 16:13:24 +00001075 }
Alex Light679dec12019-04-08 16:30:14 +00001076 tienv->notify_frames.insert(shadow_frame);
1077 }
1078 // Make sure can we will go to the interpreter and use the shadow frames.
1079 if (needs_instrument) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001080 art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1081 DeoptManager::Get()->DeoptimizeThread(self);
1082 });
1083 target->RequestSynchronousCheckpoint(&fc);
1084 } else {
1085 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001086 }
1087 return OK;
Alex Lighte814f9d2017-07-31 16:14:39 -07001088}
1089
Alex Lightae45cbb2018-10-18 15:49:56 -07001090jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
Alex Light0aa7a5a2018-10-10 15:58:14 +00001091 art::Thread* self = art::Thread::Current();
1092 art::Thread* target;
Alex Light0aa7a5a2018-10-10 15:58:14 +00001093
Alex Light679dec12019-04-08 16:30:14 +00001094 ScopedNoUserCodeSuspension snucs(self);
1095 // From now on we know we cannot get suspended by user-code.
1096 // NB This does a SuspendCheck (during thread state change) so we need to make
1097 // sure we don't have the 'suspend_lock' locked here.
1098 art::ScopedObjectAccess soa(self);
Alex Lighta4cdd362019-04-18 09:17:10 -07001099 art::Locks::thread_list_lock_->ExclusiveLock(self);
Alex Light679dec12019-04-08 16:30:14 +00001100 jvmtiError err = ERR(INTERNAL);
1101 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001102 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001103 return err;
1104 }
1105 {
Alex Lighta4cdd362019-04-18 09:17:10 -07001106 art::Locks::thread_suspend_count_lock_->ExclusiveLock(self);
Alex Light679dec12019-04-08 16:30:14 +00001107 if (target == self || target->GetUserCodeSuspendCount() == 0) {
1108 // We cannot be the current thread for this function.
Alex Lighta4cdd362019-04-18 09:17:10 -07001109 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1110 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001111 return ERR(THREAD_NOT_SUSPENDED);
Alex Lightc723b812019-04-08 16:13:24 +00001112 }
Alex Lighta4cdd362019-04-18 09:17:10 -07001113 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001114 }
1115 JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target);
1116 constexpr art::StackVisitor::StackWalkKind kWalkKind =
1117 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
1118 if (tls_data != nullptr &&
1119 tls_data->disable_pop_frame_depth !=
1120 JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
1121 tls_data->disable_pop_frame_depth ==
1122 art::StackVisitor::ComputeNumFrames(target, kWalkKind)) {
1123 JVMTI_LOG(WARNING, env)
1124 << "Disallowing frame pop due to in-progress class-load/prepare. "
1125 << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
1126 << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
1127 << "more information.";
Alex Lighta4cdd362019-04-18 09:17:10 -07001128 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001129 return ERR(OPAQUE_FRAME);
1130 }
1131 // We hold the user_code_suspension_lock_ so the target thread is staying
1132 // suspended until we are done.
1133 std::unique_ptr<art::Context> context(art::Context::Create());
1134 FindFrameAtDepthVisitor final_frame(target, context.get(), 0);
1135 FindFrameAtDepthVisitor penultimate_frame(target, context.get(), 1);
1136 final_frame.WalkStack();
1137 penultimate_frame.WalkStack();
Alex Light0aa7a5a2018-10-10 15:58:14 +00001138
Alex Light679dec12019-04-08 16:30:14 +00001139 if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
1140 // Cannot do it if there is only one frame!
Alex Lighta4cdd362019-04-18 09:17:10 -07001141 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001142 return ERR(NO_MORE_FRAMES);
1143 }
Alex Light0aa7a5a2018-10-10 15:58:14 +00001144
Alex Light679dec12019-04-08 16:30:14 +00001145 art::ArtMethod* called_method = final_frame.GetMethod();
1146 art::ArtMethod* calling_method = penultimate_frame.GetMethod();
1147 if (calling_method->IsNative() || called_method->IsNative()) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001148 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001149 return ERR(OPAQUE_FRAME);
1150 }
1151 // From here we are sure to succeed.
Alex Light0aa7a5a2018-10-10 15:58:14 +00001152
Alex Light679dec12019-04-08 16:30:14 +00001153 // Get/create a shadow frame
1154 bool created_final_frame = false;
1155 bool created_penultimate_frame = false;
1156 art::ShadowFrame* called_shadow_frame =
1157 final_frame.GetOrCreateShadowFrame(&created_final_frame);
1158 art::ShadowFrame* calling_shadow_frame =
1159 penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame);
Alex Light0aa7a5a2018-10-10 15:58:14 +00001160
Alex Light679dec12019-04-08 16:30:14 +00001161 CHECK_NE(called_shadow_frame, calling_shadow_frame)
1162 << "Frames at different depths not different!";
Alex Light9c8f3442019-04-04 15:01:42 -07001163
Alex Light679dec12019-04-08 16:30:14 +00001164 // Tell the shadow-frame to return immediately and skip all exit events.
1165 called_shadow_frame->SetForcePopFrame(true);
1166 calling_shadow_frame->SetForceRetryInstruction(true);
1167
1168 // Make sure can we will go to the interpreter and use the shadow frames. The
1169 // early return for the final frame will force everything to the interpreter
1170 // so we only need to instrument if it was not present.
1171 if (created_final_frame) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001172 art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1173 DeoptManager::Get()->DeoptimizeThread(self);
1174 });
1175 target->RequestSynchronousCheckpoint(&fc);
1176 } else {
1177 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001178 }
1179 return OK;
Alex Light0aa7a5a2018-10-10 15:58:14 +00001180}
1181
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001182} // namespace openjdkjvmti