blob: 385ac454881a3b380f7cbc56d51b82ee0eda5e10 [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampee5d23982019-01-08 10:34:26 -080039#include "arch/context.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070040#include "art_field-inl.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070041#include "art_method-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070042#include "art_jvmti.h"
Steven Morelande431e272017-07-18 16:53:49 -070043#include "art_method-inl.h"
Andreas Gampe6237cd32017-06-22 22:17:38 -070044#include "barrier.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080045#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070046#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080047#include "base/mutex.h"
David Sehr9e734c72018-01-04 17:56:19 -080048#include "dex/code_item_accessors-inl.h"
49#include "dex/dex_file.h"
50#include "dex/dex_file_annotations.h"
51#include "dex/dex_file_types.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070052#include "gc_root.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080053#include "handle_scope-inl.h"
Vladimir Markoa3ad0cd2018-05-04 10:06:38 +010054#include "jni/jni_env_ext.h"
55#include "jni/jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070056#include "mirror/class.h"
57#include "mirror/dex_cache.h"
Andreas Gampe373a9b52017-10-18 09:01:57 -070058#include "nativehelper/scoped_local_ref.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070059#include "scoped_thread_state_change-inl.h"
60#include "stack.h"
Alex Lightae45cbb2018-10-18 15:49:56 -070061#include "ti_logging.h"
Alex Lighte814f9d2017-07-31 16:14:39 -070062#include "ti_thread.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070063#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080064#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070065#include "thread_pool.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070066#include "ti_thread.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070067#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070068
69namespace openjdkjvmti {
70
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070071template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070072struct GetStackTraceVisitor : public art::StackVisitor {
73 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070074 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070075 size_t stop_,
76 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070077 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070078 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070079 start(start_),
80 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070081 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
Andreas Gampe44b31742018-10-01 19:30:57 -070082 GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070083
Andreas Gampefa6a1b02018-09-07 08:11:55 -070084 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070085 art::ArtMethod* m = GetMethod();
86 if (m->IsRuntimeMethod()) {
87 return true;
88 }
89
90 if (start == 0) {
91 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080092 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070093
Andreas Gampe2340e3f2016-12-12 19:37:19 -080094 uint32_t dex_pc = GetDexPc(false);
Andreas Gampee2abbc62017-09-15 11:59:26 -070095 jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070096
Andreas Gampe2340e3f2016-12-12 19:37:19 -080097 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070098 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070099
100 if (stop == 1) {
101 return false; // We're done.
102 } else if (stop > 0) {
103 stop--;
104 }
105 } else {
106 start--;
107 }
108
109 return true;
110 }
111
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700112 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700113 size_t start;
114 size_t stop;
115};
116
Alex Light0aa7a5a2018-10-10 15:58:14 +0000117art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) {
118 art::ShadowFrame* cur = GetCurrentShadowFrame();
119 if (cur == nullptr) {
120 *created_frame = true;
121 art::ArtMethod* method = GetMethod();
122 const uint16_t num_regs = method->DexInstructionData().RegistersSize();
123 cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(),
124 num_regs,
125 method,
126 GetDexPc());
127 DCHECK(cur != nullptr);
128 } else {
129 *created_frame = false;
130 }
131 return cur;
132}
133
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700134template <typename FrameFn>
135GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
136 size_t start,
137 size_t stop,
138 FrameFn fn) {
139 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
140}
141
142struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700143 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700144 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700145 : start_input(start),
146 stop_input(stop),
147 start_result(0),
148 stop_result(0) {}
149
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100150 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700151 auto frames_fn = [&](jvmtiFrameInfo info) {
152 frames.push_back(info);
153 };
154 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700155 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700156
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700157 start_result = visitor.start;
158 stop_result = visitor.stop;
159 }
160
161 const size_t start_input;
162 const size_t stop_input;
163
164 std::vector<jvmtiFrameInfo> frames;
165 size_t start_result;
166 size_t stop_result;
167};
168
Andreas Gampea1a27c62017-01-11 16:37:16 -0800169static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
170 jint start_depth,
171 size_t start_result,
172 jint max_frame_count,
173 jvmtiFrameInfo* frame_buffer,
174 jint* count_ptr) {
175 size_t collected_frames = frames.size();
176
177 // Assume we're here having collected something.
178 DCHECK_GT(max_frame_count, 0);
179
180 // Frames from the top.
181 if (start_depth >= 0) {
182 if (start_result != 0) {
183 // Not enough frames.
184 return ERR(ILLEGAL_ARGUMENT);
185 }
186 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
187 if (frames.size() > 0) {
188 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
189 }
190 *count_ptr = static_cast<jint>(frames.size());
191 return ERR(NONE);
192 }
193
194 // Frames from the bottom.
195 if (collected_frames < static_cast<size_t>(-start_depth)) {
196 return ERR(ILLEGAL_ARGUMENT);
197 }
198
199 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
200 memcpy(frame_buffer,
201 &frames.data()[collected_frames + start_depth],
202 count * sizeof(jvmtiFrameInfo));
203 *count_ptr = static_cast<jint>(count);
204 return ERR(NONE);
205}
206
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700207struct GetStackTraceDirectClosure : public art::Closure {
208 public:
209 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
210 : frame_buffer(frame_buffer_),
211 start_input(start),
212 stop_input(stop),
213 index(0) {
214 DCHECK_GE(start_input, 0u);
215 }
216
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100217 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700218 auto frames_fn = [&](jvmtiFrameInfo info) {
219 frame_buffer[index] = info;
220 ++index;
221 };
222 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700223 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700224 }
225
226 jvmtiFrameInfo* frame_buffer;
227
228 const size_t start_input;
229 const size_t stop_input;
230
231 size_t index = 0;
232};
233
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700234jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
235 jthread java_thread,
236 jint start_depth,
237 jint max_frame_count,
238 jvmtiFrameInfo* frame_buffer,
239 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700240 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
241 // that the thread isn't dying on us.
242 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700243 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700244
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700245 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700246 jvmtiError thread_error = ERR(INTERNAL);
247 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700248 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800249 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700250 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800251 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700252
253 art::ThreadState state = thread->GetState();
Alex Light7ddc23d2017-09-22 15:33:41 -0700254 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700255 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700256 return ERR(THREAD_NOT_ALIVE);
257 }
258
259 if (max_frame_count < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700260 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700261 return ERR(ILLEGAL_ARGUMENT);
262 }
263 if (frame_buffer == nullptr || count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700264 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700265 return ERR(NULL_POINTER);
266 }
267
268 if (max_frame_count == 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700269 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700270 *count_ptr = 0;
271 return ERR(NONE);
272 }
273
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700274 if (start_depth >= 0) {
275 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
276 GetStackTraceDirectClosure closure(frame_buffer,
277 static_cast<size_t>(start_depth),
278 static_cast<size_t>(max_frame_count));
Alex Lightb1e31a82017-10-04 16:57:36 -0700279 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700280 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700281 return ERR(THREAD_NOT_ALIVE);
282 }
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700283 *count_ptr = static_cast<jint>(closure.index);
284 if (closure.index < static_cast<size_t>(start_depth)) {
285 return ERR(ILLEGAL_ARGUMENT);
286 }
287 return ERR(NONE);
Alex Lightb1e31a82017-10-04 16:57:36 -0700288 } else {
289 GetStackTraceVectorClosure closure(0, 0);
290 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700291 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700292 return ERR(THREAD_NOT_ALIVE);
293 }
294
295 return TranslateFrameVector(closure.frames,
296 start_depth,
297 closure.start_result,
298 max_frame_count,
299 frame_buffer,
300 count_ptr);
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700301 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800302}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700303
Andreas Gampef1221a12017-06-21 21:20:47 -0700304template <typename Data>
305struct GetAllStackTracesVectorClosure : public art::Closure {
Andreas Gampe6237cd32017-06-22 22:17:38 -0700306 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
307 : barrier(0), stop_input(stop), data(data_) {}
Andreas Gampef1221a12017-06-21 21:20:47 -0700308
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100309 void Run(art::Thread* thread) override
Andreas Gampef1221a12017-06-21 21:20:47 -0700310 REQUIRES_SHARED(art::Locks::mutator_lock_)
311 REQUIRES(!data->mutex) {
312 art::Thread* self = art::Thread::Current();
Andreas Gampe6237cd32017-06-22 22:17:38 -0700313 Work(thread, self);
314 barrier.Pass(self);
315 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700316
Andreas Gampe6237cd32017-06-22 22:17:38 -0700317 void Work(art::Thread* thread, art::Thread* self)
318 REQUIRES_SHARED(art::Locks::mutator_lock_)
319 REQUIRES(!data->mutex) {
Andreas Gampef1221a12017-06-21 21:20:47 -0700320 // Skip threads that are still starting.
321 if (thread->IsStillStarting()) {
322 return;
323 }
324
325 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
326 if (thread_frames == nullptr) {
327 return;
328 }
329
330 // Now collect the data.
331 auto frames_fn = [&](jvmtiFrameInfo info) {
332 thread_frames->push_back(info);
333 };
334 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
Andreas Gampe6e897762018-10-16 13:09:32 -0700335 visitor.WalkStack(/* include_transitions= */ false);
Andreas Gampef1221a12017-06-21 21:20:47 -0700336 }
337
Andreas Gampe6237cd32017-06-22 22:17:38 -0700338 art::Barrier barrier;
Andreas Gampef1221a12017-06-21 21:20:47 -0700339 const size_t stop_input;
340 Data* data;
341};
342
Andreas Gampe6237cd32017-06-22 22:17:38 -0700343template <typename Data>
Andreas Gampe6baa1c92018-05-25 16:17:49 -0700344static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
345 REQUIRES_SHARED(art::Locks::mutator_lock_) {
346 // Note: requires the mutator lock as the checkpoint requires the mutator lock.
Andreas Gampe6237cd32017-06-22 22:17:38 -0700347 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
348 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
349 if (barrier_count == 0) {
350 return;
351 }
352 art::Thread* self = art::Thread::Current();
353 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
354 closure.barrier.Increment(self, barrier_count);
355}
356
Andreas Gampea1a27c62017-01-11 16:37:16 -0800357jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
358 jint max_frame_count,
359 jvmtiStackInfo** stack_info_ptr,
360 jint* thread_count_ptr) {
361 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700362 return ERR(ILLEGAL_ARGUMENT);
363 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800364 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
365 return ERR(NULL_POINTER);
366 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700367
Andreas Gampef1221a12017-06-21 21:20:47 -0700368 struct AllStackTracesData {
369 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
370 ~AllStackTracesData() {
371 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
372 for (jthread global_thread_ref : thread_peers) {
373 jni_env->DeleteGlobalRef(global_thread_ref);
374 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800375 }
376
Andreas Gampef1221a12017-06-21 21:20:47 -0700377 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
378 REQUIRES_SHARED(art::Locks::mutator_lock_)
379 REQUIRES(!mutex) {
380 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800381
382 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800383
Andreas Gampef1221a12017-06-21 21:20:47 -0700384 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
385 self, thread->GetPeerFromOtherThread());
386 thread_peers.push_back(peer);
387
Andreas Gampead9173d2017-06-22 16:33:08 -0700388 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
389 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700390 }
391
392 art::Mutex mutex;
393
394 // Storage. Only access directly after completion.
395
396 std::vector<art::Thread*> threads;
397 // "thread_peers" contains global references to their peers.
398 std::vector<jthread> thread_peers;
399
Andreas Gampead9173d2017-06-22 16:33:08 -0700400 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700401 };
402
403 AllStackTracesData data;
Andreas Gampef1221a12017-06-21 21:20:47 -0700404 art::Thread* current = art::Thread::Current();
Andreas Gampe6baa1c92018-05-25 16:17:49 -0700405 {
406 art::ScopedObjectAccess soa(current);
407 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
408 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700409
410 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800411
412 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
413 // allocate one big chunk for this and the actual frames, which means we need
414 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700415 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800416 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700417 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800418
419 // Now run through and add data for each thread.
420 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700421 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800422 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
423 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
424
Andreas Gampead9173d2017-06-22 16:33:08 -0700425 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800426
Andreas Gampef1221a12017-06-21 21:20:47 -0700427 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800428 stack_info.thread = nullptr;
429 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
430
431 size_t collected_frames = thread_frames.size();
432 if (max_frame_count == 0 || collected_frames == 0) {
433 stack_info.frame_count = 0;
434 stack_info.frame_buffer = nullptr;
435 continue;
436 }
437 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
438
439 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
440 frame_infos.emplace_back(frame_info);
441
442 jint count;
443 jvmtiError translate_result = TranslateFrameVector(thread_frames,
444 0,
445 0,
446 static_cast<jint>(collected_frames),
447 frame_info,
448 &count);
449 DCHECK(translate_result == JVMTI_ERROR_NONE);
450 stack_info.frame_count = static_cast<jint>(collected_frames);
451 stack_info.frame_buffer = frame_info;
452 sum_frames += static_cast<size_t>(count);
453 }
454
455 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700456 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800457 alignof(jvmtiFrameInfo));
458 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
459 unsigned char* chunk_data;
460 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
461 if (alloc_result != ERR(NONE)) {
462 return alloc_result;
463 }
464
465 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
466 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700467 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800468
469 // Now copy the frames and fix up the pointers.
470 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
471 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700472 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800473 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
474 jvmtiStackInfo& new_stack_info = stack_info[i];
475
Andreas Gampef1221a12017-06-21 21:20:47 -0700476 // Translate the global ref into a local ref.
477 new_stack_info.thread =
478 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800479
480 if (old_stack_info.frame_count > 0) {
481 // Only copy when there's data - leave the nullptr alone.
482 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
483 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
484 new_stack_info.frame_buffer = frame_info;
485 frame_info += old_stack_info.frame_count;
486 }
487 }
488
489 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700490 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800491
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700492 return ERR(NONE);
493}
494
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800495jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
496 jint thread_count,
497 const jthread* thread_list,
498 jint max_frame_count,
499 jvmtiStackInfo** stack_info_ptr) {
500 if (max_frame_count < 0) {
501 return ERR(ILLEGAL_ARGUMENT);
502 }
503 if (thread_count < 0) {
504 return ERR(ILLEGAL_ARGUMENT);
505 }
506 if (thread_count == 0) {
507 *stack_info_ptr = nullptr;
508 return ERR(NONE);
509 }
Alex Light19a7d4f2018-03-23 10:05:49 -0700510 if (thread_list == nullptr || stack_info_ptr == nullptr) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800511 return ERR(NULL_POINTER);
512 }
513
514 art::Thread* current = art::Thread::Current();
515 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
516
Andreas Gampef1221a12017-06-21 21:20:47 -0700517 struct SelectStackTracesData {
518 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
519
520 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
521 REQUIRES_SHARED(art::Locks::mutator_lock_)
522 REQUIRES(!mutex) {
523 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
524 for (size_t index = 0; index != handles.size(); ++index) {
525 if (peer == handles[index].Get()) {
526 // Found the thread.
527 art::MutexLock mu(self, mutex);
528
529 threads.push_back(thread);
530 thread_list_indices.push_back(index);
531
Andreas Gampead9173d2017-06-22 16:33:08 -0700532 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
533 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700534 }
535 }
536 return nullptr;
537 }
538
539 art::Mutex mutex;
540
541 // Selection data.
542
543 std::vector<art::Handle<art::mirror::Object>> handles;
544
545 // Storage. Only access directly after completion.
546
547 std::vector<art::Thread*> threads;
548 std::vector<size_t> thread_list_indices;
549
Andreas Gampead9173d2017-06-22 16:33:08 -0700550 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700551 };
552
553 SelectStackTracesData data;
554
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800555 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
556 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800557 for (jint i = 0; i != thread_count; ++i) {
558 if (thread_list[i] == nullptr) {
559 return ERR(INVALID_THREAD);
560 }
561 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
562 return ERR(INVALID_THREAD);
563 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700564 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800565 }
566
Andreas Gampe6237cd32017-06-22 22:17:38 -0700567 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800568
569 // Convert the data into our output format.
570
571 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
572 // allocate one big chunk for this and the actual frames, which means we need
573 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700574 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800575 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700576 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800577
578 // Now run through and add data for each thread.
579 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700580 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800581 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
582 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
583
Andreas Gampef1221a12017-06-21 21:20:47 -0700584 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700585 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800586
587 // For the time being, set the thread to null. We don't have good ScopedLocalRef
588 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000589 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800590 stack_info.thread = nullptr;
591 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
592
593 size_t collected_frames = thread_frames.size();
594 if (max_frame_count == 0 || collected_frames == 0) {
595 stack_info.frame_count = 0;
596 stack_info.frame_buffer = nullptr;
597 continue;
598 }
599 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
600
601 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
602 frame_infos.emplace_back(frame_info);
603
604 jint count;
605 jvmtiError translate_result = TranslateFrameVector(thread_frames,
606 0,
607 0,
608 static_cast<jint>(collected_frames),
609 frame_info,
610 &count);
611 DCHECK(translate_result == JVMTI_ERROR_NONE);
612 stack_info.frame_count = static_cast<jint>(collected_frames);
613 stack_info.frame_buffer = frame_info;
614 sum_frames += static_cast<size_t>(count);
615 }
616
617 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
618 // potentially.
619 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
620 alignof(jvmtiFrameInfo));
621 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
622 unsigned char* chunk_data;
623 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
624 if (alloc_result != ERR(NONE)) {
625 return alloc_result;
626 }
627
628 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
629 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
630 chunk_data + rounded_stack_info_size);
631
632 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
633 // Check whether we found a running thread for this.
634 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
635 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700636 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
637 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800638 // No native thread. Must be new or dead. We need to fill out the stack info now.
639 // (Need to read the Java "started" field to know whether this is starting or terminated.)
640 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
641 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
642 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
643 CHECK(started_field != nullptr);
644 bool started = started_field->GetBoolean(peer) != 0;
645 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
646 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
647 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
648 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
649 stack_info[i].state = started ? kTerminatedState : kStartedState;
650 stack_info[i].frame_count = 0;
651 stack_info[i].frame_buffer = nullptr;
652 } else {
653 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700654 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800655
656 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
657 jvmtiStackInfo& new_stack_info = stack_info[i];
658
659 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
660 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
661 if (old_stack_info.frame_count > 0) {
662 // Only copy when there's data - leave the nullptr alone.
663 size_t frames_size =
664 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
665 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
666 new_stack_info.frame_buffer = frame_info;
667 frame_info += old_stack_info.frame_count;
668 }
669 }
670 }
671
Andreas Gampef1221a12017-06-21 21:20:47 -0700672 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800673
674 return ERR(NONE);
675}
676
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800677struct GetFrameCountClosure : public art::Closure {
678 public:
679 GetFrameCountClosure() : count(0) {}
680
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100681 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampec7d878d2018-11-19 18:42:06 +0000682 // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be
683 // counted.
684 art::StackVisitor::WalkStack(
685 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
686 art::ArtMethod* m = stack_visitor->GetMethod();
687 if (m != nullptr && !m->IsRuntimeMethod()) {
688 count++;
689 }
690 return true;
691 },
692 self,
693 /* context= */ nullptr,
694 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800695 }
696
697 size_t count;
698};
699
700jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
701 jthread java_thread,
702 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700703 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
704 // that the thread isn't dying on us.
705 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700706 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700707
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800708 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700709 jvmtiError thread_error = ERR(INTERNAL);
710 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700711 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800712 return thread_error;
713 }
Alex Light7ddc23d2017-09-22 15:33:41 -0700714
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800715 DCHECK(thread != nullptr);
Alex Light7ddc23d2017-09-22 15:33:41 -0700716 art::ThreadState state = thread->GetState();
717 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700718 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700719 return ERR(THREAD_NOT_ALIVE);
720 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800721
722 if (count_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700723 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800724 return ERR(NULL_POINTER);
725 }
726
727 GetFrameCountClosure closure;
Alex Lightb1e31a82017-10-04 16:57:36 -0700728 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700729 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Light7ddc23d2017-09-22 15:33:41 -0700730 return ERR(THREAD_NOT_ALIVE);
731 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800732
733 *count_ptr = closure.count;
734 return ERR(NONE);
735}
736
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800737struct GetLocationClosure : public art::Closure {
738 public:
739 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
740
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100741 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampec7d878d2018-11-19 18:42:06 +0000742 // Walks up the stack 'n' callers.
743 size_t count = 0u;
744 art::StackVisitor::WalkStack(
745 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
746 art::ArtMethod* m = stack_visitor->GetMethod();
747 if (m != nullptr && !m->IsRuntimeMethod()) {
748 DCHECK(method == nullptr);
749 if (count == n) {
750 method = m;
751 dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false);
752 return false;
753 }
754 count++;
755 }
756 return true;
757 },
758 self,
759 /* context= */ nullptr,
760 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800761 }
762
763 const size_t n;
764 art::ArtMethod* method;
765 uint32_t dex_pc;
766};
767
768jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
769 jthread java_thread,
770 jint depth,
771 jmethodID* method_ptr,
772 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700773 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
774 // that the thread isn't dying on us.
775 art::ScopedObjectAccess soa(art::Thread::Current());
Alex Lightb1e31a82017-10-04 16:57:36 -0700776 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
Andreas Gampe28c4a232017-06-21 21:21:31 -0700777
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800778 art::Thread* thread;
Alex Light7ddc23d2017-09-22 15:33:41 -0700779 jvmtiError thread_error = ERR(INTERNAL);
780 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700781 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800782 return thread_error;
783 }
784 DCHECK(thread != nullptr);
785
Alex Light7ddc23d2017-09-22 15:33:41 -0700786 art::ThreadState state = thread->GetState();
787 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700788 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Alex Light7ddc23d2017-09-22 15:33:41 -0700789 return ERR(THREAD_NOT_ALIVE);
790 }
791
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800792 if (depth < 0) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700793 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800794 return ERR(ILLEGAL_ARGUMENT);
795 }
796 if (method_ptr == nullptr || location_ptr == nullptr) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700797 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800798 return ERR(NULL_POINTER);
799 }
800
801 GetLocationClosure closure(static_cast<size_t>(depth));
Alex Lightb1e31a82017-10-04 16:57:36 -0700802 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Light318afe62018-03-22 16:50:10 -0700803 if (!thread->RequestSynchronousCheckpoint(&closure)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700804 return ERR(THREAD_NOT_ALIVE);
805 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800806
807 if (closure.method == nullptr) {
808 return ERR(NO_MORE_FRAMES);
809 }
810
811 *method_ptr = art::jni::EncodeArtMethod(closure.method);
Alex Light3dea2122017-10-11 15:56:48 +0000812 if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800813 *location_ptr = -1;
814 } else {
Andreas Gampee2abbc62017-09-15 11:59:26 -0700815 if (closure.dex_pc == art::dex::kDexNoIndex) {
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800816 return ERR(INTERNAL);
817 }
818 *location_ptr = static_cast<jlocation>(closure.dex_pc);
819 }
820
821 return ERR(NONE);
822}
823
Alex Light88e1ddd2017-08-21 13:09:55 -0700824struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
825 // We need a context because VisitLocks needs it retrieve the monitor objects.
826 explicit MonitorVisitor(art::Thread* thread)
827 REQUIRES_SHARED(art::Locks::mutator_lock_)
828 : art::StackVisitor(thread,
829 art::Context::Create(),
830 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
831 hs(art::Thread::Current()),
832 current_stack_depth(0) {}
833
834 ~MonitorVisitor() {
835 delete context_;
836 }
837
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100838 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700839 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
840 if (!GetMethod()->IsRuntimeMethod()) {
841 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
842 ++current_stack_depth;
843 }
844 return true;
845 }
846
847 static void AppendOwnedMonitors(art::mirror::Object* owned_monitor, void* arg)
848 REQUIRES_SHARED(art::Locks::mutator_lock_) {
849 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
850 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
851 art::ObjPtr<art::mirror::Object> mon(owned_monitor);
852 // Filter out duplicates.
853 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
854 if (monitor.Get() == mon.Ptr()) {
855 return;
856 }
857 }
858 visitor->monitors.push_back(visitor->hs.NewHandle(mon));
859 visitor->stack_depths.push_back(visitor->current_stack_depth);
860 }
861
862 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100863 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700864 for (const art::Handle<art::mirror::Object>& m : monitors) {
865 if (m.Get() == obj) {
866 return;
867 }
868 }
869 monitors.push_back(hs.NewHandle(obj));
870 stack_depths.push_back(-1);
871 }
872
873 art::VariableSizedHandleScope hs;
874 jint current_stack_depth;
875 std::vector<art::Handle<art::mirror::Object>> monitors;
876 std::vector<jint> stack_depths;
877};
878
879template<typename Fn>
880struct MonitorInfoClosure : public art::Closure {
881 public:
Alex Light318afe62018-03-22 16:50:10 -0700882 explicit MonitorInfoClosure(Fn handle_results)
883 : err_(OK), handle_results_(handle_results) {}
Alex Light88e1ddd2017-08-21 13:09:55 -0700884
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100885 void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700886 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
887 // Find the monitors on the stack.
888 MonitorVisitor visitor(target);
Andreas Gampe6e897762018-10-16 13:09:32 -0700889 visitor.WalkStack(/* include_transitions= */ false);
Alex Light88e1ddd2017-08-21 13:09:55 -0700890 // Find any other monitors, including ones acquired in native code.
891 art::RootInfo root_info(art::kRootVMInternal);
Ian Rogers55256cb2017-12-21 17:07:11 -0800892 target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
Alex Light318afe62018-03-22 16:50:10 -0700893 err_ = handle_results_(visitor);
Alex Light88e1ddd2017-08-21 13:09:55 -0700894 }
895
896 jvmtiError GetError() {
897 return err_;
898 }
899
900 private:
Alex Light88e1ddd2017-08-21 13:09:55 -0700901 jvmtiError err_;
902 Fn handle_results_;
903};
904
905
906template <typename Fn>
Alex Light318afe62018-03-22 16:50:10 -0700907static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
908 jthread thread,
909 Fn handle_results)
910 REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700911 art::Thread* self = art::Thread::Current();
Alex Light318afe62018-03-22 16:50:10 -0700912 MonitorInfoClosure<Fn> closure(handle_results);
Alex Light88e1ddd2017-08-21 13:09:55 -0700913 bool called_method = false;
914 {
Alex Lightb1e31a82017-10-04 16:57:36 -0700915 art::Locks::thread_list_lock_->ExclusiveLock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700916 art::Thread* target = nullptr;
917 jvmtiError err = ERR(INTERNAL);
918 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
Alex Lightb1e31a82017-10-04 16:57:36 -0700919 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light7ddc23d2017-09-22 15:33:41 -0700920 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700921 }
922 if (target != self) {
923 called_method = true;
Alex Lightb1e31a82017-10-04 16:57:36 -0700924 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
Alex Lighta5cd4c02018-03-28 16:07:39 -0700925 // Since this deals with object references we need to avoid going to sleep.
926 art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage");
927 if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700928 return ERR(THREAD_NOT_ALIVE);
929 }
Alex Lightb1e31a82017-10-04 16:57:36 -0700930 } else {
931 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
Alex Light88e1ddd2017-08-21 13:09:55 -0700932 }
933 }
934 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
935 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
936 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
937 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
938 if (!called_method) {
939 closure.Run(self);
940 }
941 return closure.GetError();
942}
943
944jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
945 jthread thread,
946 jint* info_cnt,
947 jvmtiMonitorStackDepthInfo** info_ptr) {
948 if (info_cnt == nullptr || info_ptr == nullptr) {
949 return ERR(NULL_POINTER);
950 }
Alex Light318afe62018-03-22 16:50:10 -0700951 art::ScopedObjectAccess soa(art::Thread::Current());
952 std::vector<art::GcRoot<art::mirror::Object>> mons;
953 std::vector<uint32_t> depths;
954 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700955 for (size_t i = 0; i < visitor.monitors.size(); i++) {
Alex Light318afe62018-03-22 16:50:10 -0700956 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
957 depths.push_back(visitor.stack_depths[i]);
Alex Light88e1ddd2017-08-21 13:09:55 -0700958 }
959 return OK;
960 };
Alex Light318afe62018-03-22 16:50:10 -0700961 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
962 if (err != OK) {
963 return err;
964 }
965 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
966 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
967 if (err != OK) {
968 return err;
969 }
970 *info_cnt = mons.size();
971 for (uint32_t i = 0; i < mons.size(); i++) {
972 (*info_ptr)[i] = {
973 soa.AddLocalReference<jobject>(mons[i].Read()),
974 static_cast<jint>(depths[i])
975 };
976 }
977 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -0700978}
979
980jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
981 jthread thread,
982 jint* owned_monitor_count_ptr,
983 jobject** owned_monitors_ptr) {
Alex Light19a7d4f2018-03-23 10:05:49 -0700984 if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700985 return ERR(NULL_POINTER);
986 }
Alex Light318afe62018-03-22 16:50:10 -0700987 art::ScopedObjectAccess soa(art::Thread::Current());
988 std::vector<art::GcRoot<art::mirror::Object>> mons;
989 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
Alex Light88e1ddd2017-08-21 13:09:55 -0700990 for (size_t i = 0; i < visitor.monitors.size(); i++) {
Alex Light318afe62018-03-22 16:50:10 -0700991 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
Alex Light88e1ddd2017-08-21 13:09:55 -0700992 }
993 return OK;
994 };
Alex Light318afe62018-03-22 16:50:10 -0700995 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
996 if (err != OK) {
997 return err;
998 }
999 auto nbytes = sizeof(jobject) * mons.size();
1000 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
1001 if (err != OK) {
1002 return err;
1003 }
1004 *owned_monitor_count_ptr = mons.size();
1005 for (uint32_t i = 0; i < mons.size(); i++) {
1006 (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
1007 }
1008 return err;
Alex Light88e1ddd2017-08-21 13:09:55 -07001009}
1010
Alex Lighte814f9d2017-07-31 16:14:39 -07001011jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
1012 if (depth < 0) {
1013 return ERR(ILLEGAL_ARGUMENT);
1014 }
1015 ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
1016 art::Thread* self = art::Thread::Current();
1017 art::Thread* target;
1018 do {
1019 ThreadUtil::SuspendCheck(self);
1020 art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
1021 // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
1022 // user-code suspension. We retry and do another SuspendCheck to clear this.
1023 if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
1024 continue;
1025 }
1026 // From now on we know we cannot get suspended by user-code.
1027 // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1028 // have the 'suspend_lock' locked here.
1029 art::ScopedObjectAccess soa(self);
1030 art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
Alex Light7ddc23d2017-09-22 15:33:41 -07001031 jvmtiError err = ERR(INTERNAL);
1032 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1033 return err;
1034 }
1035 if (target != self) {
Alex Lighte814f9d2017-07-31 16:14:39 -07001036 // TODO This is part of the spec but we could easily avoid needing to do it. We would just put
1037 // all the logic into a sync-checkpoint.
1038 art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1039 if (target->GetUserCodeSuspendCount() == 0) {
1040 return ERR(THREAD_NOT_SUSPENDED);
1041 }
1042 }
1043 // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1044 // done (unless it's 'self' in which case we don't care since we aren't going to be returning).
1045 // TODO We could implement this using a synchronous checkpoint and not bother with any of the
1046 // suspension stuff. The spec does specifically say to return THREAD_NOT_SUSPENDED though.
1047 // Find the requested stack frame.
1048 std::unique_ptr<art::Context> context(art::Context::Create());
1049 FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1050 visitor.WalkStack();
1051 if (!visitor.FoundFrame()) {
1052 return ERR(NO_MORE_FRAMES);
1053 }
1054 art::ArtMethod* method = visitor.GetMethod();
1055 if (method->IsNative()) {
1056 return ERR(OPAQUE_FRAME);
1057 }
1058 // From here we are sure to succeed.
1059 bool needs_instrument = false;
1060 // Get/create a shadow frame
Alex Light0aa7a5a2018-10-10 15:58:14 +00001061 art::ShadowFrame* shadow_frame = visitor.GetOrCreateShadowFrame(&needs_instrument);
Alex Lightb6106d52017-10-18 15:02:15 -07001062 {
1063 art::WriterMutexLock lk(self, tienv->event_info_mutex_);
1064 // Mark shadow frame as needs_notify_pop_
1065 shadow_frame->SetNotifyPop(true);
1066 tienv->notify_frames.insert(shadow_frame);
1067 }
Alex Lighte814f9d2017-07-31 16:14:39 -07001068 // Make sure can we will go to the interpreter and use the shadow frames.
1069 if (needs_instrument) {
1070 art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
1071 }
1072 return OK;
1073 } while (true);
1074}
1075
Alex Lightae45cbb2018-10-18 15:49:56 -07001076jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
Alex Light0aa7a5a2018-10-10 15:58:14 +00001077 art::Thread* self = art::Thread::Current();
1078 art::Thread* target;
1079 do {
1080 ThreadUtil::SuspendCheck(self);
1081 art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
1082 // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
1083 // user-code suspension. We retry and do another SuspendCheck to clear this.
1084 if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
1085 continue;
1086 }
1087 // From now on we know we cannot get suspended by user-code.
1088 // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1089 // have the 'suspend_lock' locked here.
1090 art::ScopedObjectAccess soa(self);
1091 art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
1092 jvmtiError err = ERR(INTERNAL);
1093 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1094 return err;
1095 }
1096 {
1097 art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1098 if (target == self || target->GetUserCodeSuspendCount() == 0) {
1099 // We cannot be the current thread for this function.
1100 return ERR(THREAD_NOT_SUSPENDED);
1101 }
1102 }
1103 JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target);
1104 constexpr art::StackVisitor::StackWalkKind kWalkKind =
1105 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
1106 if (tls_data != nullptr &&
1107 tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
1108 tls_data->disable_pop_frame_depth == art::StackVisitor::ComputeNumFrames(target,
1109 kWalkKind)) {
Alex Lightae45cbb2018-10-18 15:49:56 -07001110 JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. "
1111 << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
1112 << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
1113 << "more information.";
Alex Light0aa7a5a2018-10-10 15:58:14 +00001114 return ERR(OPAQUE_FRAME);
1115 }
1116 // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1117 // done.
1118 std::unique_ptr<art::Context> context(art::Context::Create());
1119 FindFrameAtDepthVisitor final_frame(target, context.get(), 0);
1120 FindFrameAtDepthVisitor penultimate_frame(target, context.get(), 1);
1121 final_frame.WalkStack();
1122 penultimate_frame.WalkStack();
1123
1124 if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
1125 // Cannot do it if there is only one frame!
1126 return ERR(NO_MORE_FRAMES);
1127 }
1128
1129 art::ArtMethod* called_method = final_frame.GetMethod();
1130 art::ArtMethod* calling_method = penultimate_frame.GetMethod();
1131 if (calling_method->IsNative() || called_method->IsNative()) {
1132 return ERR(OPAQUE_FRAME);
1133 }
1134 // From here we are sure to succeed.
1135
1136 // Get/create a shadow frame
1137 bool created_final_frame = false;
1138 bool created_penultimate_frame = false;
1139 art::ShadowFrame* called_shadow_frame =
1140 final_frame.GetOrCreateShadowFrame(&created_final_frame);
1141 art::ShadowFrame* calling_shadow_frame =
1142 penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame);
1143
1144 CHECK_NE(called_shadow_frame, calling_shadow_frame)
1145 << "Frames at different depths not different!";
1146
1147 // Tell the shadow-frame to return immediately and skip all exit events.
1148 called_shadow_frame->SetForcePopFrame(true);
1149 calling_shadow_frame->SetForceRetryInstruction(true);
1150
1151 // Make sure can we will go to the interpreter and use the shadow frames. The early return for
1152 // the final frame will force everything to the interpreter so we only need to instrument if it
1153 // was not present.
1154 if (created_final_frame) {
1155 DeoptManager::Get()->DeoptimizeThread(target);
1156 }
1157 return OK;
1158 } while (true);
1159}
1160
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001161} // namespace openjdkjvmti