blob: 38257f1d6a2034d82d8b194c20f174d5916dd52c [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Alex Lightb7c640d2019-03-20 15:52:13 -070035#include <initializer_list>
Andreas Gampea1a27c62017-01-11 16:37:16 -080036#include <list>
37#include <unordered_map>
38#include <vector>
39
Alex Lightb7c640d2019-03-20 15:52:13 -070040#include "android-base/macros.h"
41#include "android-base/thread_annotations.h"
Andreas Gampee5d23982019-01-08 10:34:26 -080042#include "arch/context.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070043#include "art_field-inl.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070044#include "art_method-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070045#include "art_jvmti.h"
Steven Morelande431e272017-07-18 16:53:49 -070046#include "art_method-inl.h"
Andreas Gampe6237cd32017-06-22 22:17:38 -070047#include "barrier.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080048#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070049#include "base/enums.h"
Alex Lightb7c640d2019-03-20 15:52:13 -070050#include "base/locks.h"
51#include "base/macros.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080052#include "base/mutex.h"
Alex Lighta4cdd362019-04-18 09:17:10 -070053#include "deopt_manager.h"
David Sehr9e734c72018-01-04 17:56:19 -080054#include "dex/code_item_accessors-inl.h"
55#include "dex/dex_file.h"
56#include "dex/dex_file_annotations.h"
57#include "dex/dex_file_types.h"
Alex Lightb7c640d2019-03-20 15:52:13 -070058#include "dex/dex_instruction-inl.h"
59#include "dex/primitive.h"
60#include "events.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070061#include "gc_root.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080062#include "handle_scope-inl.h"
Alex Lightb7c640d2019-03-20 15:52:13 -070063#include "instrumentation.h"
64#include "interpreter/shadow_frame-inl.h"
65#include "interpreter/shadow_frame.h"
Vladimir Markoa3ad0cd2018-05-04 10:06:38 +010066#include "jni/jni_env_ext.h"
67#include "jni/jni_internal.h"
Alex Lightb7c640d2019-03-20 15:52:13 -070068#include "jvalue-inl.h"
69#include "jvalue.h"
70#include "jvmti.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070071#include "mirror/class.h"
72#include "mirror/dex_cache.h"
Andreas Gampe373a9b52017-10-18 09:01:57 -070073#include "nativehelper/scoped_local_ref.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070074#include "scoped_thread_state_change-inl.h"
Alex Lightb7c640d2019-03-20 15:52:13 -070075#include "scoped_thread_state_change.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070076#include "stack.h"
Alex Lightb7c640d2019-03-20 15:52:13 -070077#include "thread.h"
78#include "thread_state.h"
Alex Lightae45cbb2018-10-18 15:49:56 -070079#include "ti_logging.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070080#include "ti_thread.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070081#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080082#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070083#include "thread_pool.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070084#include "ti_thread.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070085#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070086
87namespace openjdkjvmti {
88
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070089template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070090struct GetStackTraceVisitor : public art::StackVisitor {
91 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070092 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070093 size_t stop_,
94 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070095 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070096 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070097 start(start_),
98 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070099 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
Andreas Gampe44b31742018-10-01 19:30:57 -0700100 GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700101
Andreas Gampefa6a1b02018-09-07 08:11:55 -0700102 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700103 art::ArtMethod* m = GetMethod();
104 if (m->IsRuntimeMethod()) {
105 return true;
106 }
107
108 if (start == 0) {
109 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -0800110 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700111
Andreas Gampe2340e3f2016-12-12 19:37:19 -0800112 uint32_t dex_pc = GetDexPc(false);
Andreas Gampee2abbc62017-09-15 11:59:26 -0700113 jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700114
Andreas Gampe2340e3f2016-12-12 19:37:19 -0800115 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700116 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700117
118 if (stop == 1) {
119 return false; // We're done.
120 } else if (stop > 0) {
121 stop--;
122 }
123 } else {
124 start--;
125 }
126
127 return true;
128 }
129
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700130 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700131 size_t start;
132 size_t stop;
133};
134
Alex Light0aa7a5a2018-10-10 15:58:14 +0000135art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) {
136 art::ShadowFrame* cur = GetCurrentShadowFrame();
137 if (cur == nullptr) {
138 *created_frame = true;
139 art::ArtMethod* method = GetMethod();
140 const uint16_t num_regs = method->DexInstructionData().RegistersSize();
141 cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(),
142 num_regs,
143 method,
144 GetDexPc());
145 DCHECK(cur != nullptr);
146 } else {
147 *created_frame = false;
148 }
149 return cur;
150}
151
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700152template <typename FrameFn>
153GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
154 size_t start,
155 size_t stop,
156 FrameFn fn) {
157 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
158}
159
160struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700161 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700162 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700163 : start_input(start),
164 stop_input(stop),
165 start_result(0),
166 stop_result(0) {}
167
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100168 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700169 auto frames_fn = [&](jvmtiFrameInfo info) {
170 frames.push_back(info);
171 };
172 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700173 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700174
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700175 start_result = visitor.start;
176 stop_result = visitor.stop;
177 }
178
179 const size_t start_input;
180 const size_t stop_input;
181
182 std::vector<jvmtiFrameInfo> frames;
183 size_t start_result;
184 size_t stop_result;
185};
186
Andreas Gampea1a27c62017-01-11 16:37:16 -0800187static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
188 jint start_depth,
189 size_t start_result,
190 jint max_frame_count,
191 jvmtiFrameInfo* frame_buffer,
192 jint* count_ptr) {
193 size_t collected_frames = frames.size();
194
195 // Assume we're here having collected something.
196 DCHECK_GT(max_frame_count, 0);
197
198 // Frames from the top.
199 if (start_depth >= 0) {
200 if (start_result != 0) {
201 // Not enough frames.
202 return ERR(ILLEGAL_ARGUMENT);
203 }
204 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
205 if (frames.size() > 0) {
206 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
207 }
208 *count_ptr = static_cast<jint>(frames.size());
209 return ERR(NONE);
210 }
211
212 // Frames from the bottom.
213 if (collected_frames < static_cast<size_t>(-start_depth)) {
214 return ERR(ILLEGAL_ARGUMENT);
215 }
216
217 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
218 memcpy(frame_buffer,
219 &frames.data()[collected_frames + start_depth],
220 count * sizeof(jvmtiFrameInfo));
221 *count_ptr = static_cast<jint>(count);
222 return ERR(NONE);
223}
224
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700225struct GetStackTraceDirectClosure : public art::Closure {
226 public:
227 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
228 : frame_buffer(frame_buffer_),
229 start_input(start),
230 stop_input(stop),
231 index(0) {
232 DCHECK_GE(start_input, 0u);
233 }
234
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100235 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700236 auto frames_fn = [&](jvmtiFrameInfo info) {
237 frame_buffer[index] = info;
238 ++index;
239 };
240 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700241 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700242 }
243
244 jvmtiFrameInfo* frame_buffer;
245
246 const size_t start_input;
247 const size_t stop_input;
248
249 size_t index = 0;
250};
251
Alex Light342b6942019-05-07 15:28:39 -0700252jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700253 jthread java_thread,
254 jint start_depth,
255 jint max_frame_count,
256 jvmtiFrameInfo* frame_buffer,
257 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700258 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
259 // that the thread isn't dying on us.
260 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700261 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700262
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700263 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700264 jvmtiError thread_error = ERR(INTERNAL);
265 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700266 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800267 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700268 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800269 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700270
271 art::ThreadState state = thread->GetState();
Alex Light7ddc23d2017-09-22 15:33:41 -0700272 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700273 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700274 return ERR(THREAD_NOT_ALIVE);
275 }
276
277 if (max_frame_count < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700278 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700279 return ERR(ILLEGAL_ARGUMENT);
280 }
281 if (frame_buffer == nullptr || count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700282 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700283 return ERR(NULL_POINTER);
284 }
285
286 if (max_frame_count == 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700287 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700288 *count_ptr = 0;
289 return ERR(NONE);
290 }
291
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700292 if (start_depth >= 0) {
293 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
294 GetStackTraceDirectClosure closure(frame_buffer,
295 static_cast<size_t>(start_depth),
296 static_cast<size_t>(max_frame_count));
Alex Lightb1e31a82017-10-04 16:57:36 -0700297 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700298 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700299 return ERR(THREAD_NOT_ALIVE);
300 }
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700301 *count_ptr = static_cast<jint>(closure.index);
Alex Light342b6942019-05-07 15:28:39 -0700302 if (closure.index == 0) {
303 JVMTI_LOG(INFO, jvmti_env) << "The stack is not large enough for a start_depth of "
304 << start_depth << ".";
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700305 return ERR(ILLEGAL_ARGUMENT);
306 }
307 return ERR(NONE);
Alex Lightb1e31a82017-10-04 16:57:36 -0700308 } else {
309 GetStackTraceVectorClosure closure(0, 0);
310 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700311 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700312 return ERR(THREAD_NOT_ALIVE);
313 }
314
315 return TranslateFrameVector(closure.frames,
316 start_depth,
317 closure.start_result,
318 max_frame_count,
319 frame_buffer,
320 count_ptr);
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700321 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800322}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700323
Andreas Gampef1221a12017-06-21 21:20:47 -0700324template <typename Data>
325struct GetAllStackTracesVectorClosure : public art::Closure {
Andreas Gampe6237cd32017-06-22 22:17:38 -0700326 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
327 : barrier(0), stop_input(stop), data(data_) {}
Andreas Gampef1221a12017-06-21 21:20:47 -0700328
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100329 void Run(art::Thread* thread) override
Andreas Gampef1221a12017-06-21 21:20:47 -0700330 REQUIRES_SHARED(art::Locks::mutator_lock_)
331 REQUIRES(!data->mutex) {
332 art::Thread* self = art::Thread::Current();
Andreas Gampe6237cd32017-06-22 22:17:38 -0700333 Work(thread, self);
334 barrier.Pass(self);
335 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700336
Andreas Gampe6237cd32017-06-22 22:17:38 -0700337 void Work(art::Thread* thread, art::Thread* self)
338 REQUIRES_SHARED(art::Locks::mutator_lock_)
339 REQUIRES(!data->mutex) {
Andreas Gampef1221a12017-06-21 21:20:47 -0700340 // Skip threads that are still starting.
341 if (thread->IsStillStarting()) {
342 return;
343 }
344
345 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
346 if (thread_frames == nullptr) {
347 return;
348 }
349
350 // Now collect the data.
351 auto frames_fn = [&](jvmtiFrameInfo info) {
352 thread_frames->push_back(info);
353 };
354 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700355 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampef1221a12017-06-21 21:20:47 -0700356 }
357
Andreas Gampe6237cd32017-06-22 22:17:38 -0700358 art::Barrier barrier;
Andreas Gampef1221a12017-06-21 21:20:47 -0700359 const size_t stop_input;
360 Data* data;
361};
362
Andreas Gampe6237cd32017-06-22 22:17:38 -0700363template <typename Data>
Andreas Gampe6baa1c92018-05-25 16:17:49 -0700364static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
365 REQUIRES_SHARED(art::Locks::mutator_lock_) {
366 // Note: requires the mutator lock as the checkpoint requires the mutator lock.
Andreas Gampe6237cd32017-06-22 22:17:38 -0700367 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
368 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
369 if (barrier_count == 0) {
370 return;
371 }
372 art::Thread* self = art::Thread::Current();
373 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
374 closure.barrier.Increment(self, barrier_count);
375}
376
Andreas Gampea1a27c62017-01-11 16:37:16 -0800377jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
378 jint max_frame_count,
379 jvmtiStackInfo** stack_info_ptr,
380 jint* thread_count_ptr) {
381 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700382 return ERR(ILLEGAL_ARGUMENT);
383 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800384 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
385 return ERR(NULL_POINTER);
386 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700387
Andreas Gampef1221a12017-06-21 21:20:47 -0700388 struct AllStackTracesData {
389 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
390 ~AllStackTracesData() {
391 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
392 for (jthread global_thread_ref : thread_peers) {
393 jni_env->DeleteGlobalRef(global_thread_ref);
394 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800395 }
396
Andreas Gampef1221a12017-06-21 21:20:47 -0700397 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
398 REQUIRES_SHARED(art::Locks::mutator_lock_)
399 REQUIRES(!mutex) {
400 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800401
402 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800403
Andreas Gampef1221a12017-06-21 21:20:47 -0700404 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
405 self, thread->GetPeerFromOtherThread());
406 thread_peers.push_back(peer);
407
Andreas Gampead9173d2017-06-22 16:33:08 -0700408 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
409 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700410 }
411
412 art::Mutex mutex;
413
414 // Storage. Only access directly after completion.
415
416 std::vector<art::Thread*> threads;
417 // "thread_peers" contains global references to their peers.
418 std::vector<jthread> thread_peers;
419
Andreas Gampead9173d2017-06-22 16:33:08 -0700420 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700421 };
422
423 AllStackTracesData data;
Andreas Gampef1221a12017-06-21 21:20:47 -0700424 art::Thread* current = art::Thread::Current();
Andreas Gampe6baa1c92018-05-25 16:17:49 -0700425 {
426 art::ScopedObjectAccess soa(current);
427 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
428 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700429
430 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800431
432 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
433 // allocate one big chunk for this and the actual frames, which means we need
434 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700435 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800436 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700437 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800438
439 // Now run through and add data for each thread.
440 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700441 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800442 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
443 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
444
Andreas Gampead9173d2017-06-22 16:33:08 -0700445 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800446
Andreas Gampef1221a12017-06-21 21:20:47 -0700447 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800448 stack_info.thread = nullptr;
449 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
450
451 size_t collected_frames = thread_frames.size();
452 if (max_frame_count == 0 || collected_frames == 0) {
453 stack_info.frame_count = 0;
454 stack_info.frame_buffer = nullptr;
455 continue;
456 }
457 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
458
459 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
460 frame_infos.emplace_back(frame_info);
461
462 jint count;
463 jvmtiError translate_result = TranslateFrameVector(thread_frames,
464 0,
465 0,
466 static_cast<jint>(collected_frames),
467 frame_info,
468 &count);
469 DCHECK(translate_result == JVMTI_ERROR_NONE);
470 stack_info.frame_count = static_cast<jint>(collected_frames);
471 stack_info.frame_buffer = frame_info;
472 sum_frames += static_cast<size_t>(count);
473 }
474
475 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700476 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800477 alignof(jvmtiFrameInfo));
478 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
479 unsigned char* chunk_data;
480 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
481 if (alloc_result != ERR(NONE)) {
482 return alloc_result;
483 }
484
485 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
486 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700487 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800488
489 // Now copy the frames and fix up the pointers.
490 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
491 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700492 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800493 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
494 jvmtiStackInfo& new_stack_info = stack_info[i];
495
Andreas Gampef1221a12017-06-21 21:20:47 -0700496 // Translate the global ref into a local ref.
497 new_stack_info.thread =
498 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800499
500 if (old_stack_info.frame_count > 0) {
501 // Only copy when there's data - leave the nullptr alone.
502 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
503 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
504 new_stack_info.frame_buffer = frame_info;
505 frame_info += old_stack_info.frame_count;
506 }
507 }
508
509 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700510 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800511
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700512 return ERR(NONE);
513}
514
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800515jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
516 jint thread_count,
517 const jthread* thread_list,
518 jint max_frame_count,
519 jvmtiStackInfo** stack_info_ptr) {
520 if (max_frame_count < 0) {
521 return ERR(ILLEGAL_ARGUMENT);
522 }
523 if (thread_count < 0) {
524 return ERR(ILLEGAL_ARGUMENT);
525 }
526 if (thread_count == 0) {
527 *stack_info_ptr = nullptr;
528 return ERR(NONE);
529 }
Alex Light19a7d4f2018-03-23 10:05:49 -0700530 if (thread_list == nullptr || stack_info_ptr == nullptr) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800531 return ERR(NULL_POINTER);
532 }
533
534 art::Thread* current = art::Thread::Current();
535 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
536
Andreas Gampef1221a12017-06-21 21:20:47 -0700537 struct SelectStackTracesData {
538 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
539
540 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
541 REQUIRES_SHARED(art::Locks::mutator_lock_)
542 REQUIRES(!mutex) {
543 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
544 for (size_t index = 0; index != handles.size(); ++index) {
545 if (peer == handles[index].Get()) {
546 // Found the thread.
547 art::MutexLock mu(self, mutex);
548
549 threads.push_back(thread);
550 thread_list_indices.push_back(index);
551
Andreas Gampead9173d2017-06-22 16:33:08 -0700552 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
553 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700554 }
555 }
556 return nullptr;
557 }
558
559 art::Mutex mutex;
560
561 // Selection data.
562
563 std::vector<art::Handle<art::mirror::Object>> handles;
564
565 // Storage. Only access directly after completion.
566
567 std::vector<art::Thread*> threads;
568 std::vector<size_t> thread_list_indices;
569
Andreas Gampead9173d2017-06-22 16:33:08 -0700570 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700571 };
572
573 SelectStackTracesData data;
574
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800575 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
576 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800577 for (jint i = 0; i != thread_count; ++i) {
578 if (thread_list[i] == nullptr) {
579 return ERR(INVALID_THREAD);
580 }
581 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
582 return ERR(INVALID_THREAD);
583 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700584 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800585 }
586
Andreas Gampe6237cd32017-06-22 22:17:38 -0700587 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800588
589 // Convert the data into our output format.
590
591 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
592 // allocate one big chunk for this and the actual frames, which means we need
593 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700594 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800595 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700596 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800597
598 // Now run through and add data for each thread.
599 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700600 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800601 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
602 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
603
Andreas Gampef1221a12017-06-21 21:20:47 -0700604 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700605 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800606
607 // For the time being, set the thread to null. We don't have good ScopedLocalRef
608 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000609 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800610 stack_info.thread = nullptr;
611 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
612
613 size_t collected_frames = thread_frames.size();
614 if (max_frame_count == 0 || collected_frames == 0) {
615 stack_info.frame_count = 0;
616 stack_info.frame_buffer = nullptr;
617 continue;
618 }
619 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
620
621 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
622 frame_infos.emplace_back(frame_info);
623
624 jint count;
625 jvmtiError translate_result = TranslateFrameVector(thread_frames,
626 0,
627 0,
628 static_cast<jint>(collected_frames),
629 frame_info,
630 &count);
631 DCHECK(translate_result == JVMTI_ERROR_NONE);
632 stack_info.frame_count = static_cast<jint>(collected_frames);
633 stack_info.frame_buffer = frame_info;
634 sum_frames += static_cast<size_t>(count);
635 }
636
637 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
638 // potentially.
639 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
640 alignof(jvmtiFrameInfo));
641 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
642 unsigned char* chunk_data;
643 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
644 if (alloc_result != ERR(NONE)) {
645 return alloc_result;
646 }
647
648 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
649 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
650 chunk_data + rounded_stack_info_size);
651
652 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
653 // Check whether we found a running thread for this.
654 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
655 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700656 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
657 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800658 // No native thread. Must be new or dead. We need to fill out the stack info now.
659 // (Need to read the Java "started" field to know whether this is starting or terminated.)
660 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
661 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
662 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
663 CHECK(started_field != nullptr);
664 bool started = started_field->GetBoolean(peer) != 0;
665 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
666 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
667 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
668 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
669 stack_info[i].state = started ? kTerminatedState : kStartedState;
670 stack_info[i].frame_count = 0;
671 stack_info[i].frame_buffer = nullptr;
672 } else {
673 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700674 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800675
676 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
677 jvmtiStackInfo& new_stack_info = stack_info[i];
678
679 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
680 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
681 if (old_stack_info.frame_count > 0) {
682 // Only copy when there's data - leave the nullptr alone.
683 size_t frames_size =
684 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
685 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
686 new_stack_info.frame_buffer = frame_info;
687 frame_info += old_stack_info.frame_count;
688 }
689 }
690 }
691
Andreas Gampef1221a12017-06-21 21:20:47 -0700692 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800693
694 return ERR(NONE);
695}
696
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800697struct GetFrameCountClosure : public art::Closure {
698 public:
699 GetFrameCountClosure() : count(0) {}
700
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100701 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampec7d878d2018-11-19 18:42:06 +0000702 // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be
703 // counted.
704 art::StackVisitor::WalkStack(
705 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
706 art::ArtMethod* m = stack_visitor->GetMethod();
707 if (m != nullptr && !m->IsRuntimeMethod()) {
708 count++;
709 }
710 return true;
711 },
712 self,
713 /* context= */ nullptr,
714 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800715 }
716
717 size_t count;
718};
719
720jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
721 jthread java_thread,
722 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700723 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
724 // that the thread isn't dying on us.
725 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700726 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700727
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800728 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700729 jvmtiError thread_error = ERR(INTERNAL);
730 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700731 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800732 return thread_error;
733 }
Alex Light7ddc23d2017-09-22 15:33:41 -0700734
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800735 DCHECK(thread != nullptr);
Alex Light7ddc23d2017-09-22 15:33:41 -0700736 art::ThreadState state = thread->GetState();
737 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700738 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700739 return ERR(THREAD_NOT_ALIVE);
740 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800741
742 if (count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700743 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800744 return ERR(NULL_POINTER);
745 }
746
747 GetFrameCountClosure closure;
Alex Lightb1e31a82017-10-04 16:57:36 -0700748 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700749 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Light7ddc23d2017-09-22 15:33:41 -0700750 return ERR(THREAD_NOT_ALIVE);
751 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800752
753 *count_ptr = closure.count;
754 return ERR(NONE);
755}
756
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800757struct GetLocationClosure : public art::Closure {
758 public:
759 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
760
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100761 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampec7d878d2018-11-19 18:42:06 +0000762 // Walks up the stack 'n' callers.
763 size_t count = 0u;
764 art::StackVisitor::WalkStack(
765 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
766 art::ArtMethod* m = stack_visitor->GetMethod();
767 if (m != nullptr && !m->IsRuntimeMethod()) {
768 DCHECK(method == nullptr);
769 if (count == n) {
770 method = m;
771 dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false);
772 return false;
773 }
774 count++;
775 }
776 return true;
777 },
778 self,
779 /* context= */ nullptr,
780 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800781 }
782
783 const size_t n;
784 art::ArtMethod* method;
785 uint32_t dex_pc;
786};
787
788jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
789 jthread java_thread,
790 jint depth,
791 jmethodID* method_ptr,
792 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700793 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
794 // that the thread isn't dying on us.
795 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700796 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700797
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800798 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700799 jvmtiError thread_error = ERR(INTERNAL);
800 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700801 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800802 return thread_error;
803 }
804 DCHECK(thread != nullptr);
805
Alex Light7ddc23d2017-09-22 15:33:41 -0700806 art::ThreadState state = thread->GetState();
807 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700808 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700809 return ERR(THREAD_NOT_ALIVE);
810 }
811
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800812 if (depth < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700813 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800814 return ERR(ILLEGAL_ARGUMENT);
815 }
816 if (method_ptr == nullptr || location_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700817 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800818 return ERR(NULL_POINTER);
819 }
820
821 GetLocationClosure closure(static_cast<size_t>(depth));
Alex Lightb1e31a82017-10-04 16:57:36 -0700822 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700823 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700824 return ERR(THREAD_NOT_ALIVE);
825 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800826
827 if (closure.method == nullptr) {
828 return ERR(NO_MORE_FRAMES);
829 }
830
831 *method_ptr = art::jni::EncodeArtMethod(closure.method);
Alex Light3dea2122017-10-11 15:56:48 +0000832 if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800833 *location_ptr = -1;
834 } else {
Andreas Gampee2abbc62017-09-15 11:59:26 -0700835 if (closure.dex_pc == art::dex::kDexNoIndex) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800836 return ERR(INTERNAL);
837 }
838 *location_ptr = static_cast<jlocation>(closure.dex_pc);
839 }
840
841 return ERR(NONE);
842}
843
Alex Light88e1ddd2017-08-21 13:09:55 -0700844struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
845 // We need a context because VisitLocks needs it retrieve the monitor objects.
846 explicit MonitorVisitor(art::Thread* thread)
847 REQUIRES_SHARED(art::Locks::mutator_lock_)
848 : art::StackVisitor(thread,
849 art::Context::Create(),
850 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
851 hs(art::Thread::Current()),
852 current_stack_depth(0) {}
853
854 ~MonitorVisitor() {
855 delete context_;
856 }
857
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100858 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700859 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
860 if (!GetMethod()->IsRuntimeMethod()) {
861 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
862 ++current_stack_depth;
863 }
864 return true;
865 }
866
Vladimir Markof52d92f2019-03-29 12:33:02 +0000867 static void AppendOwnedMonitors(art::ObjPtr<art::mirror::Object> owned_monitor, void* arg)
Alex Light88e1ddd2017-08-21 13:09:55 -0700868 REQUIRES_SHARED(art::Locks::mutator_lock_) {
869 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
870 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
Alex Light88e1ddd2017-08-21 13:09:55 -0700871 // Filter out duplicates.
872 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
Vladimir Markof52d92f2019-03-29 12:33:02 +0000873 if (monitor.Get() == owned_monitor) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700874 return;
875 }
876 }
Vladimir Markof52d92f2019-03-29 12:33:02 +0000877 visitor->monitors.push_back(visitor->hs.NewHandle(owned_monitor));
Alex Light88e1ddd2017-08-21 13:09:55 -0700878 visitor->stack_depths.push_back(visitor->current_stack_depth);
879 }
880
881 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100882 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700883 for (const art::Handle<art::mirror::Object>& m : monitors) {
884 if (m.Get() == obj) {
885 return;
886 }
887 }
888 monitors.push_back(hs.NewHandle(obj));
889 stack_depths.push_back(-1);
890 }
891
892 art::VariableSizedHandleScope hs;
893 jint current_stack_depth;
894 std::vector<art::Handle<art::mirror::Object>> monitors;
895 std::vector<jint> stack_depths;
896};
897
898template<typename Fn>
899struct MonitorInfoClosure : public art::Closure {
900 public:
Alex Light318afe62018-03-22 16:50:10 -0700901 explicit MonitorInfoClosure(Fn handle_results)
902 : err_(OK), handle_results_(handle_results) {}
Alex Light88e1ddd2017-08-21 13:09:55 -0700903
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100904 void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700905 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
906 // Find the monitors on the stack.
907 MonitorVisitor visitor(target);
Andreas Gampe6e897762018-10-16 13:09:32 -0700908 visitor.WalkStack(/* include_transitions= */ false);
Alex Light88e1ddd2017-08-21 13:09:55 -0700909 // Find any other monitors, including ones acquired in native code.
910 art::RootInfo root_info(art::kRootVMInternal);
Ian Rogers55256cb2017-12-21 17:07:11 -0800911 target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
Alex Light318afe62018-03-22 16:50:10 -0700912 err_ = handle_results_(visitor);
Alex Light88e1ddd2017-08-21 13:09:55 -0700913 }
914
915 jvmtiError GetError() {
916 return err_;
917 }
918
919 private:
Alex Light88e1ddd2017-08-21 13:09:55 -0700920 jvmtiError err_;
921 Fn handle_results_;
922};
923
924
925template <typename Fn>
Alex Light318afe62018-03-22 16:50:10 -0700926static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
927 jthread thread,
928 Fn handle_results)
929 REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700930 art::Thread* self = art::Thread::Current();
Alex Light318afe62018-03-22 16:50:10 -0700931 MonitorInfoClosure<Fn> closure(handle_results);
Alex Light88e1ddd2017-08-21 13:09:55 -0700932 bool called_method = false;
933 {
Alex Lightb1e31a82017-10-04 16:57:36 -0700934 art::Locks::thread_list_lock_->ExclusiveLock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700935 art::Thread* target = nullptr;
936 jvmtiError err = ERR(INTERNAL);
937 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700938 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700939 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700940 }
941 if (target != self) {
942 called_method = true;
Alex Lightb1e31a82017-10-04 16:57:36 -0700943 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Lighta5cd4c02018-03-28 16:07:39 -0700944 // Since this deals with object references we need to avoid going to sleep.
945 art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage");
946 if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700947 return ERR(THREAD_NOT_ALIVE);
948 }
Alex Lightb1e31a82017-10-04 16:57:36 -0700949 } else {
950 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light88e1ddd2017-08-21 13:09:55 -0700951 }
952 }
953 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
954 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
955 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
956 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
957 if (!called_method) {
958 closure.Run(self);
959 }
960 return closure.GetError();
961}
962
963jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
964 jthread thread,
965 jint* info_cnt,
966 jvmtiMonitorStackDepthInfo** info_ptr) {
967 if (info_cnt == nullptr || info_ptr == nullptr) {
968 return ERR(NULL_POINTER);
969 }
Alex Light318afe62018-03-22 16:50:10 -0700970 art::ScopedObjectAccess soa(art::Thread::Current());
971 std::vector<art::GcRoot<art::mirror::Object>> mons;
972 std::vector<uint32_t> depths;
973 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700974 for (size_t i = 0; i < visitor.monitors.size(); i++) {
Alex Light318afe62018-03-22 16:50:10 -0700975 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
976 depths.push_back(visitor.stack_depths[i]);
Alex Light88e1ddd2017-08-21 13:09:55 -0700977 }
978 return OK;
979 };
Alex Light318afe62018-03-22 16:50:10 -0700980 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
981 if (err != OK) {
982 return err;
983 }
984 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
985 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
986 if (err != OK) {
987 return err;
988 }
989 *info_cnt = mons.size();
990 for (uint32_t i = 0; i < mons.size(); i++) {
991 (*info_ptr)[i] = {
992 soa.AddLocalReference<jobject>(mons[i].Read()),
993 static_cast<jint>(depths[i])
994 };
995 }
996 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700997}
998
999jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
1000 jthread thread,
1001 jint* owned_monitor_count_ptr,
1002 jobject** owned_monitors_ptr) {
Alex Light19a7d4f2018-03-23 10:05:49 -07001003 if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
Alex Light88e1ddd2017-08-21 13:09:55 -07001004 return ERR(NULL_POINTER);
1005 }
Alex Light318afe62018-03-22 16:50:10 -07001006 art::ScopedObjectAccess soa(art::Thread::Current());
1007 std::vector<art::GcRoot<art::mirror::Object>> mons;
1008 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -07001009 for (size_t i = 0; i < visitor.monitors.size(); i++) {
Alex Light318afe62018-03-22 16:50:10 -07001010 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
Alex Light88e1ddd2017-08-21 13:09:55 -07001011 }
1012 return OK;
1013 };
Alex Light318afe62018-03-22 16:50:10 -07001014 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
1015 if (err != OK) {
1016 return err;
1017 }
1018 auto nbytes = sizeof(jobject) * mons.size();
1019 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
1020 if (err != OK) {
1021 return err;
1022 }
1023 *owned_monitor_count_ptr = mons.size();
1024 for (uint32_t i = 0; i < mons.size(); i++) {
1025 (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
1026 }
1027 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -07001028}
1029
Alex Lighte814f9d2017-07-31 16:14:39 -07001030jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
1031 if (depth < 0) {
1032 return ERR(ILLEGAL_ARGUMENT);
1033 }
1034 ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
1035 art::Thread* self = art::Thread::Current();
1036 art::Thread* target;
Alex Lightc723b812019-04-08 16:13:24 +00001037
Alex Light679dec12019-04-08 16:30:14 +00001038 ScopedNoUserCodeSuspension snucs(self);
1039 // From now on we know we cannot get suspended by user-code.
1040 // NB This does a SuspendCheck (during thread state change) so we need to make
1041 // sure we don't have the 'suspend_lock' locked here.
1042 art::ScopedObjectAccess soa(self);
Alex Lighta4cdd362019-04-18 09:17:10 -07001043 art::Locks::thread_list_lock_->ExclusiveLock(self);
Alex Light679dec12019-04-08 16:30:14 +00001044 jvmtiError err = ERR(INTERNAL);
1045 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001046 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001047 return err;
1048 }
1049 if (target != self) {
1050 // TODO This is part of the spec but we could easily avoid needing to do it.
1051 // We would just put all the logic into a sync-checkpoint.
Alex Lighta4cdd362019-04-18 09:17:10 -07001052 art::Locks::thread_suspend_count_lock_->ExclusiveLock(self);
Alex Light679dec12019-04-08 16:30:14 +00001053 if (target->GetUserCodeSuspendCount() == 0) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001054 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1055 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001056 return ERR(THREAD_NOT_SUSPENDED);
Alex Lightc723b812019-04-08 16:13:24 +00001057 }
Alex Lighta4cdd362019-04-18 09:17:10 -07001058 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001059 }
1060 // We hold the user_code_suspension_lock_ so the target thread is staying
1061 // suspended until we are done (unless it's 'self' in which case we don't care
1062 // since we aren't going to be returning).
1063 // TODO We could implement this using a synchronous checkpoint and not bother
1064 // with any of the suspension stuff. The spec does specifically say to return
1065 // THREAD_NOT_SUSPENDED though. Find the requested stack frame.
1066 std::unique_ptr<art::Context> context(art::Context::Create());
1067 FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1068 visitor.WalkStack();
1069 if (!visitor.FoundFrame()) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001070 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001071 return ERR(NO_MORE_FRAMES);
1072 }
1073 art::ArtMethod* method = visitor.GetMethod();
1074 if (method->IsNative()) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001075 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001076 return ERR(OPAQUE_FRAME);
1077 }
1078 // From here we are sure to succeed.
1079 bool needs_instrument = false;
1080 // Get/create a shadow frame
1081 art::ShadowFrame* shadow_frame =
1082 visitor.GetOrCreateShadowFrame(&needs_instrument);
1083 {
1084 art::WriterMutexLock lk(self, tienv->event_info_mutex_);
1085 if (LIKELY(!shadow_frame->NeedsNotifyPop())) {
1086 // Ensure we won't miss exceptions being thrown if we get jit-compiled. We
1087 // only do this for the first NotifyPopFrame.
1088 target->IncrementForceInterpreterCount();
1089
1090 // Mark shadow frame as needs_notify_pop_
1091 shadow_frame->SetNotifyPop(true);
Alex Lightc723b812019-04-08 16:13:24 +00001092 }
Alex Light679dec12019-04-08 16:30:14 +00001093 tienv->notify_frames.insert(shadow_frame);
1094 }
1095 // Make sure can we will go to the interpreter and use the shadow frames.
1096 if (needs_instrument) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001097 art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1098 DeoptManager::Get()->DeoptimizeThread(self);
1099 });
1100 target->RequestSynchronousCheckpoint(&fc);
1101 } else {
1102 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001103 }
1104 return OK;
Alex Lighte814f9d2017-07-31 16:14:39 -07001105}
1106
Alex Lightb7c640d2019-03-20 15:52:13 -07001107namespace {
1108
1109enum class NonStandardExitType {
1110 kPopFrame,
1111 kForceReturn,
1112};
1113
1114template<NonStandardExitType kExitType>
1115class NonStandardExitFrames {
1116 public:
1117 NonStandardExitFrames(art::Thread* self, jvmtiEnv* env, jthread thread)
1118 REQUIRES(!art::Locks::thread_suspend_count_lock_)
1119 ACQUIRE_SHARED(art::Locks::mutator_lock_)
1120 ACQUIRE(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
1121 : snucs_(self) {
1122 // We keep the user-code-suspend-count lock.
1123 art::Locks::user_code_suspension_lock_->AssertExclusiveHeld(self);
1124
1125 // From now on we know we cannot get suspended by user-code.
1126 // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1127 // have the 'suspend_lock' locked here.
1128 old_state_ = self->TransitionFromSuspendedToRunnable();
1129 art::ScopedObjectAccessUnchecked soau(self);
1130
1131 art::Locks::thread_list_lock_->ExclusiveLock(self);
1132
1133 if (!ThreadUtil::GetAliveNativeThread(thread, soau, &target_, &result_)) {
1134 return;
1135 }
1136 {
1137 art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1138 if (target_ != self && target_->GetUserCodeSuspendCount() == 0) {
1139 // We cannot be the current thread for this function.
1140 result_ = ERR(THREAD_NOT_SUSPENDED);
1141 return;
1142 }
1143 }
1144 JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target_);
1145 constexpr art::StackVisitor::StackWalkKind kWalkKind =
1146 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
1147 if (tls_data != nullptr &&
1148 tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
1149 tls_data->disable_pop_frame_depth ==
1150 art::StackVisitor::ComputeNumFrames(target_, kWalkKind)) {
1151 JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. "
1152 << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
1153 << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
1154 << "more information.";
1155 result_ = ERR(OPAQUE_FRAME);
1156 return;
1157 }
1158 // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1159 // done.
1160 std::unique_ptr<art::Context> context(art::Context::Create());
1161 FindFrameAtDepthVisitor final_frame(target_, context.get(), 0);
1162 FindFrameAtDepthVisitor penultimate_frame(target_, context.get(), 1);
1163 final_frame.WalkStack();
1164 penultimate_frame.WalkStack();
1165
1166 if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
1167 // Cannot do it if there is only one frame!
1168 JVMTI_LOG(INFO, env) << "Can not pop final frame off of a stack";
1169 result_ = ERR(NO_MORE_FRAMES);
1170 return;
1171 }
1172
1173 art::ArtMethod* called_method = final_frame.GetMethod();
1174 art::ArtMethod* calling_method = penultimate_frame.GetMethod();
1175 if (!CheckFunctions(env, calling_method, called_method)) {
1176 return;
1177 }
1178 DCHECK(!called_method->IsNative()) << called_method->PrettyMethod();
1179
1180 // From here we are sure to succeed.
1181 result_ = OK;
1182
1183 // Get/create a shadow frame
1184 final_frame_ = final_frame.GetOrCreateShadowFrame(&created_final_frame_);
1185 penultimate_frame_ =
1186 (calling_method->IsNative()
1187 ? nullptr
1188 : penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame_));
1189
1190 final_frame_id_ = final_frame.GetFrameId();
1191 penultimate_frame_id_ = penultimate_frame.GetFrameId();
1192
1193 CHECK_NE(final_frame_, penultimate_frame_) << "Frames at different depths not different!";
1194 }
1195
1196 bool CheckFunctions(jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called)
1197 REQUIRES(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
1198 REQUIRES_SHARED(art::Locks::mutator_lock_);
1199
1200 ~NonStandardExitFrames() RELEASE_SHARED(art::Locks::mutator_lock_)
1201 REQUIRES(!art::Locks::thread_list_lock_)
1202 RELEASE(art::Locks::user_code_suspension_lock_) {
1203 art::Thread* self = art::Thread::Current();
1204 DCHECK_EQ(old_state_, art::ThreadState::kNative)
1205 << "Unexpected thread state on entering PopFrame!";
1206 self->TransitionFromRunnableToSuspended(old_state_);
1207 }
1208
1209 ScopedNoUserCodeSuspension snucs_;
1210 art::ShadowFrame* final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
1211 art::ShadowFrame* penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
1212 bool created_final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
1213 bool created_penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
1214 uint32_t final_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
1215 uint32_t penultimate_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
1216 art::Thread* target_ GUARDED_BY(art::Locks::thread_list_lock_) = nullptr;
1217 art::ThreadState old_state_ = art::ThreadState::kTerminated;
1218 jvmtiError result_ = ERR(INTERNAL);
1219};
1220
1221template <>
1222bool NonStandardExitFrames<NonStandardExitType::kForceReturn>::CheckFunctions(
1223 jvmtiEnv* env, art::ArtMethod* calling ATTRIBUTE_UNUSED, art::ArtMethod* called) {
1224 if (UNLIKELY(called->IsNative())) {
1225 result_ = ERR(OPAQUE_FRAME);
1226 JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod()
1227 << " because it is native.";
1228 return false;
1229 } else {
1230 return true;
1231 }
1232}
1233
1234template <>
1235bool NonStandardExitFrames<NonStandardExitType::kPopFrame>::CheckFunctions(
1236 jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called) {
1237 if (UNLIKELY(calling->IsNative() || called->IsNative())) {
1238 result_ = ERR(OPAQUE_FRAME);
1239 JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod() << " to "
1240 << calling->PrettyMethod() << " because at least one of them is native.";
1241 return false;
1242 } else {
1243 return true;
1244 }
1245}
1246
1247class SetupMethodExitEvents {
1248 public:
1249 SetupMethodExitEvents(art::Thread* self,
1250 EventHandler* event_handler,
1251 jthread target) REQUIRES(!art::Locks::mutator_lock_,
1252 !art::Locks::user_code_suspension_lock_,
1253 !art::Locks::thread_list_lock_)
1254 : self_(self), event_handler_(event_handler), target_(target) {
1255 DCHECK(target != nullptr);
1256 art::Locks::mutator_lock_->AssertNotHeld(self_);
1257 art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
1258 art::Locks::thread_list_lock_->AssertNotHeld(self_);
1259 event_handler_->SetInternalEvent(
1260 target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_ENABLE);
1261 }
1262
1263 ~SetupMethodExitEvents() REQUIRES(!art::Locks::mutator_lock_,
1264 !art::Locks::user_code_suspension_lock_,
1265 !art::Locks::thread_list_lock_) {
1266 art::Locks::mutator_lock_->AssertNotHeld(self_);
1267 art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
1268 art::Locks::thread_list_lock_->AssertNotHeld(self_);
1269 if (failed_) {
1270 event_handler_->SetInternalEvent(
1271 target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
1272 }
1273 }
1274
1275 void NotifyFailure() {
1276 failed_ = true;
1277 }
1278
1279 private:
1280 art::Thread* self_;
1281 EventHandler* event_handler_;
1282 jthread target_;
1283 bool failed_ = false;
1284};
1285
1286template <typename T>
1287void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value)
1288 REQUIRES_SHARED(art::Locks::mutator_lock_)
1289 REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
1290
1291template <typename T>
1292void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value) {
1293 art::JValue val = art::JValue::FromPrimitive(value);
1294 jvalue jval{ .j = val.GetJ() };
1295 handler->AddDelayedNonStandardExitEvent(frame, false, jval);
1296}
1297
1298template <>
1299void AddDelayedMethodExitEvent<std::nullptr_t>(EventHandler* handler,
1300 art::ShadowFrame* frame,
1301 std::nullptr_t null_val ATTRIBUTE_UNUSED) {
1302 jvalue jval;
1303 memset(&jval, 0, sizeof(jval));
1304 handler->AddDelayedNonStandardExitEvent(frame, false, jval);
1305}
1306
1307template <>
1308void AddDelayedMethodExitEvent<jobject>(EventHandler* handler,
1309 art::ShadowFrame* frame,
1310 jobject obj) {
1311 jvalue jval{ .l = art::Thread::Current()->GetJniEnv()->NewGlobalRef(obj) };
1312 handler->AddDelayedNonStandardExitEvent(frame, true, jval);
1313}
1314
1315template <typename T>
1316bool ValidReturnType(art::Thread* self, art::ObjPtr<art::mirror::Class> return_type, T value)
1317 REQUIRES_SHARED(art::Locks::mutator_lock_)
1318 REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
1319
1320#define SIMPLE_VALID_RETURN_TYPE(type, ...) \
1321 template <> \
1322 bool ValidReturnType<type>(art::Thread * self ATTRIBUTE_UNUSED, \
1323 art::ObjPtr<art::mirror::Class> return_type, \
1324 type value ATTRIBUTE_UNUSED) { \
1325 static constexpr std::initializer_list<art::Primitive::Type> types{ __VA_ARGS__ }; \
1326 return std::find(types.begin(), types.end(), return_type->GetPrimitiveType()) != types.end(); \
1327 }
1328
1329SIMPLE_VALID_RETURN_TYPE(jlong, art::Primitive::kPrimLong);
1330SIMPLE_VALID_RETURN_TYPE(jfloat, art::Primitive::kPrimFloat);
1331SIMPLE_VALID_RETURN_TYPE(jdouble, art::Primitive::kPrimDouble);
1332SIMPLE_VALID_RETURN_TYPE(nullptr_t, art::Primitive::kPrimVoid);
1333SIMPLE_VALID_RETURN_TYPE(jint,
1334 art::Primitive::kPrimInt,
1335 art::Primitive::kPrimChar,
1336 art::Primitive::kPrimBoolean,
1337 art::Primitive::kPrimShort,
1338 art::Primitive::kPrimByte);
1339#undef SIMPLE_VALID_RETURN_TYPE
1340
1341template <>
1342bool ValidReturnType<jobject>(art::Thread* self,
1343 art::ObjPtr<art::mirror::Class> return_type,
1344 jobject return_value) {
1345 if (return_type->IsPrimitive()) {
1346 return false;
1347 }
1348 if (return_value == nullptr) {
1349 // Null can be used for anything.
1350 return true;
1351 }
1352 return return_type->IsAssignableFrom(self->DecodeJObject(return_value)->GetClass());
1353}
1354
1355} // namespace
1356
Alex Lightae45cbb2018-10-18 15:49:56 -07001357jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
Alex Light0aa7a5a2018-10-10 15:58:14 +00001358 art::Thread* self = art::Thread::Current();
Alex Lightb7c640d2019-03-20 15:52:13 -07001359 NonStandardExitFrames<NonStandardExitType::kPopFrame> frames(self, env, thread);
1360 if (frames.result_ != OK) {
Alex Lighta4cdd362019-04-18 09:17:10 -07001361 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Lightb7c640d2019-03-20 15:52:13 -07001362 return frames.result_;
Alex Light679dec12019-04-08 16:30:14 +00001363 }
Alex Light679dec12019-04-08 16:30:14 +00001364 // Tell the shadow-frame to return immediately and skip all exit events.
Alex Lightb7c640d2019-03-20 15:52:13 -07001365 frames.penultimate_frame_->SetForceRetryInstruction(true);
1366 frames.final_frame_->SetForcePopFrame(true);
1367 frames.final_frame_->SetSkipMethodExitEvents(true);
1368 if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
1369 art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
Alex Lighta4cdd362019-04-18 09:17:10 -07001370 DeoptManager::Get()->DeoptimizeThread(self);
1371 });
Alex Lightb7c640d2019-03-20 15:52:13 -07001372 frames.target_->RequestSynchronousCheckpoint(&fc);
Alex Lighta4cdd362019-04-18 09:17:10 -07001373 } else {
1374 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light679dec12019-04-08 16:30:14 +00001375 }
1376 return OK;
Alex Light0aa7a5a2018-10-10 15:58:14 +00001377}
1378
Alex Lightb7c640d2019-03-20 15:52:13 -07001379template <typename T>
1380jvmtiError
1381StackUtil::ForceEarlyReturn(jvmtiEnv* env, EventHandler* event_handler, jthread thread, T value) {
1382 art::Thread* self = art::Thread::Current();
1383 // We don't want to use the null == current-thread idiom since for events (that we use internally
1384 // to implement force-early-return) we instead have null == all threads. Instead just get the
1385 // current jthread if needed.
1386 ScopedLocalRef<jthread> cur_thread(self->GetJniEnv(), nullptr);
1387 if (UNLIKELY(thread == nullptr)) {
1388 art::ScopedObjectAccess soa(self);
1389 cur_thread.reset(soa.AddLocalReference<jthread>(self->GetPeer()));
1390 thread = cur_thread.get();
1391 }
1392 // This sets up the exit events we implement early return using before we have the locks and
1393 // thanks to destructor ordering will tear them down if something goes wrong.
1394 SetupMethodExitEvents smee(self, event_handler, thread);
1395 NonStandardExitFrames<NonStandardExitType::kForceReturn> frames(self, env, thread);
1396 if (frames.result_ != OK) {
1397 smee.NotifyFailure();
1398 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1399 return frames.result_;
1400 } else if (!ValidReturnType<T>(
1401 self, frames.final_frame_->GetMethod()->ResolveReturnType(), value)) {
1402 smee.NotifyFailure();
1403 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1404 return ERR(TYPE_MISMATCH);
1405 } else if (frames.final_frame_->GetForcePopFrame()) {
1406 // TODO We should really support this.
1407 smee.NotifyFailure();
1408 std::string thread_name;
1409 frames.target_->GetThreadName(thread_name);
1410 JVMTI_LOG(WARNING, env) << "PopFrame or force-return already pending on thread " << thread_name;
1411 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1412 return ERR(OPAQUE_FRAME);
1413 }
1414 // Tell the shadow-frame to return immediately and skip all exit events.
1415 frames.final_frame_->SetForcePopFrame(true);
1416 AddDelayedMethodExitEvent<T>(event_handler, frames.final_frame_, value);
1417 if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
1418 art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
1419 DeoptManager::Get()->DeoptimizeThread(self);
1420 });
1421 frames.target_->RequestSynchronousCheckpoint(&fc);
1422 } else {
1423 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1424 }
1425 return OK;
1426}
1427
1428// Instantiate the ForceEarlyReturn templates.
1429template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jint);
1430template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jlong);
1431template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jfloat);
1432template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jdouble);
1433template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jobject);
1434template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, nullptr_t);
1435
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001436} // namespace openjdkjvmti