blob: 31f178d079e7dab0f7b669dc34556eb792702f4e [file] [log] [blame]
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_SCOPED_THREAD_STATE_CHANGE_H_
18#define ART_SRC_SCOPED_THREAD_STATE_CHANGE_H_
19
Elliott Hughes1aa246d2012-12-13 09:29:36 -080020#include "base/casts.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070021#include "jni_internal.h"
Ian Rogers693ff612013-02-01 10:56:12 -080022#include "thread-inl.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070023
24namespace art {
25
26// Scoped change into and out of a particular state. Handles Runnable transitions that require
27// more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and
28// ScopedObjectAccess are used to handle the change into Runnable to get direct access to objects,
29// the unchecked variant doesn't aid annotalysis.
30class ScopedThreadStateChange {
31 public:
32 ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
Ian Rogersb726dcb2012-09-05 08:57:23 -070033 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -070034 : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
35 if (self_ == NULL) {
36 // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
37 old_thread_state_ = kTerminated;
Ian Rogers50b35e22012-10-04 10:09:15 -070038 MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -070039 Runtime* runtime = Runtime::Current();
40 CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown());
Ian Rogers00f7d0e2012-07-19 15:28:27 -070041 } else {
42 bool runnable_transition;
Ian Rogers22f454c2012-09-08 11:06:29 -070043 DCHECK_EQ(self, Thread::Current());
44 // Read state without locks, ok as state is effectively thread local and we're not interested
45 // in the suspend count (this will be handled in the runnable transitions).
Ian Rogers474b6da2012-09-25 00:20:38 -070046 old_thread_state_ = self->GetState();
Ian Rogers22f454c2012-09-08 11:06:29 -070047 runnable_transition = old_thread_state_ == kRunnable || new_thread_state == kRunnable;
48 if (!runnable_transition) {
49 // A suspended transition to another effectively suspended transition, ok to use Unsafe.
Ian Rogers474b6da2012-09-25 00:20:38 -070050 self_->SetState(new_thread_state);
Ian Rogers00f7d0e2012-07-19 15:28:27 -070051 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -070052
Ian Rogers00f7d0e2012-07-19 15:28:27 -070053 if (runnable_transition && old_thread_state_ != new_thread_state) {
54 if (new_thread_state == kRunnable) {
55 self_->TransitionFromSuspendedToRunnable();
56 } else {
57 DCHECK_EQ(old_thread_state_, kRunnable);
58 self_->TransitionFromRunnableToSuspended(new_thread_state);
59 }
60 }
61 }
62 }
63
Ian Rogersb726dcb2012-09-05 08:57:23 -070064 ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -070065 if (self_ == NULL) {
66 if (!expected_has_no_thread_) {
Ian Rogers50b35e22012-10-04 10:09:15 -070067 MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -070068 Runtime* runtime = Runtime::Current();
69 bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
70 CHECK(shutting_down);
Ian Rogers00f7d0e2012-07-19 15:28:27 -070071 }
72 } else {
73 if (old_thread_state_ != thread_state_) {
74 if (old_thread_state_ == kRunnable) {
75 self_->TransitionFromSuspendedToRunnable();
76 } else if (thread_state_ == kRunnable) {
77 self_->TransitionFromRunnableToSuspended(old_thread_state_);
78 } else {
Ian Rogers22f454c2012-09-08 11:06:29 -070079 // A suspended transition to another effectively suspended transition, ok to use Unsafe.
Ian Rogers474b6da2012-09-25 00:20:38 -070080 self_->SetState(old_thread_state_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -070081 }
82 }
83 }
84 }
85
86 Thread* Self() const {
87 return self_;
88 }
89
90 protected:
91 // Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*.
92 ScopedThreadStateChange()
93 : self_(NULL), thread_state_(kTerminated), old_thread_state_(kTerminated),
94 expected_has_no_thread_(true) {}
95
96 Thread* const self_;
97 const ThreadState thread_state_;
98
99 private:
100 ThreadState old_thread_state_;
101 const bool expected_has_no_thread_;
102
103 DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
104};
105
106// Entry/exit processing for transitions from Native to Runnable (ie within JNI functions).
107//
108// This class performs the necessary thread state switching to and from Runnable and lets us
109// amortize the cost of working out the current thread. Additionally it lets us check (and repair)
110// apps that are using a JNIEnv on the wrong thread. The class also decodes and encodes Objects
111// into jobjects via methods of this class. Performing this here enforces the Runnable thread state
112// for use of Object, thereby inhibiting the Object being modified by GC whilst native or VM code
113// is also manipulating the Object.
114//
115// The destructor transitions back to the previous thread state, typically Native. In this state
116// GC and thread suspension may occur.
117//
118// For annotalysis the subclass ScopedObjectAccess (below) makes it explicit that a shared of
119// the mutator_lock_ will be acquired on construction.
120class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
121 public:
122 explicit ScopedObjectAccessUnchecked(JNIEnv* env)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700123 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700124 : ScopedThreadStateChange(ThreadForEnv(env), kRunnable),
125 env_(reinterpret_cast<JNIEnvExt*>(env)), vm_(env_->vm) {
126 self_->VerifyStack();
127 }
128
129 explicit ScopedObjectAccessUnchecked(Thread* self)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700130 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700131 : ScopedThreadStateChange(self, kRunnable),
132 env_(reinterpret_cast<JNIEnvExt*>(self->GetJniEnv())),
133 vm_(env_ != NULL ? env_->vm : NULL) {
134 if (Vm() != NULL && !Vm()->work_around_app_jni_bugs && self != Thread::Current()) {
135 UnexpectedThreads(self, Thread::Current());
136 }
137 self_->VerifyStack();
138 }
139
140 // Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't
141 // change into Runnable or acquire a share on the mutator_lock_.
142 explicit ScopedObjectAccessUnchecked(JavaVM* vm)
143 : ScopedThreadStateChange(), env_(NULL), vm_(reinterpret_cast<JavaVMExt*>(vm)) {}
144
145 JNIEnvExt* Env() const {
146 return env_;
147 }
148
149 JavaVMExt* Vm() const {
150 return vm_;
151 }
152
153 /*
154 * Add a local reference for an object to the indirect reference table associated with the
155 * current stack frame. When the native function returns, the reference will be discarded.
156 * Part of the ScopedJniThreadState as native code shouldn't be working on raw Object* without
157 * having transitioned its state.
158 *
159 * We need to allow the same reference to be added multiple times.
160 *
161 * This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and
162 * it's best if we don't grab a mutex.
163 *
164 * Returns the local reference (currently just the same pointer that was
165 * passed in), or NULL on failure.
166 */
167 template<typename T>
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800168 T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700169 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
170 if (obj == NULL) {
171 return NULL;
172 }
173
174 DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
175
176 IndirectReferenceTable& locals = Env()->locals;
177
178 uint32_t cookie = Env()->local_ref_cookie;
179 IndirectRef ref = locals.Add(cookie, obj);
180
181#if 0 // TODO: fix this to understand PushLocalFrame, so we can turn it on.
182 if (Env()->check_jni) {
183 size_t entry_count = locals.Capacity();
184 if (entry_count > 16) {
185 LOG(WARNING) << "Warning: more than 16 JNI local references: "
186 << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n"
187 << Dumpable<IndirectReferenceTable>(locals);
188 // TODO: LOG(FATAL) in a later release?
189 }
190 }
191#endif
192
193 if (Vm()->work_around_app_jni_bugs) {
194 // Hand out direct pointers to support broken old apps.
195 return reinterpret_cast<T>(obj);
196 }
197
198 return reinterpret_cast<T>(ref);
199 }
200
201 template<typename T>
202 T Decode(jobject obj) const
203 LOCKS_EXCLUDED(JavaVMExt::globals_lock,
204 JavaVMExt::weak_globals_lock)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700205 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700206 Locks::mutator_lock_->AssertSharedHeld(Self());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700207 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
208 return down_cast<T>(Self()->DecodeJObject(obj));
209 }
210
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800211 mirror::Field* DecodeField(jfieldID fid) const
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700212 LOCKS_EXCLUDED(JavaVMExt::globals_lock,
213 JavaVMExt::weak_globals_lock)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700214 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700215 Locks::mutator_lock_->AssertSharedHeld(Self());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700216 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
217#ifdef MOVING_GARBAGE_COLLECTOR
218 // TODO: we should make these unique weak globals if Field instances can ever move.
219 UNIMPLEMENTED(WARNING);
220#endif
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800221 return reinterpret_cast<mirror::Field*>(fid);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700222 }
223
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800224 jfieldID EncodeField(mirror::Field* field) const
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700225 LOCKS_EXCLUDED(JavaVMExt::globals_lock,
226 JavaVMExt::weak_globals_lock)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700227 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700228 Locks::mutator_lock_->AssertSharedHeld(Self());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700229 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
230#ifdef MOVING_GARBAGE_COLLECTOR
231 UNIMPLEMENTED(WARNING);
232#endif
233 return reinterpret_cast<jfieldID>(field);
234 }
235
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800236 mirror::AbstractMethod* DecodeMethod(jmethodID mid) const
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700237 LOCKS_EXCLUDED(JavaVMExt::globals_lock,
238 JavaVMExt::weak_globals_lock)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700239 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700240 Locks::mutator_lock_->AssertSharedHeld(Self());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700241 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
242#ifdef MOVING_GARBAGE_COLLECTOR
243 // TODO: we should make these unique weak globals if Method instances can ever move.
244 UNIMPLEMENTED(WARNING);
245#endif
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800246 return reinterpret_cast<mirror::AbstractMethod*>(mid);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700247 }
248
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800249 jmethodID EncodeMethod(mirror::AbstractMethod* method) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700250 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700251 Locks::mutator_lock_->AssertSharedHeld(Self());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700252 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
253#ifdef MOVING_GARBAGE_COLLECTOR
254 UNIMPLEMENTED(WARNING);
255#endif
256 return reinterpret_cast<jmethodID>(method);
257 }
258
259 private:
260 static Thread* ThreadForEnv(JNIEnv* env) {
261 JNIEnvExt* full_env(reinterpret_cast<JNIEnvExt*>(env));
262 bool work_around_app_jni_bugs = full_env->vm->work_around_app_jni_bugs;
263 Thread* env_self = full_env->self;
264 Thread* self = work_around_app_jni_bugs ? Thread::Current() : env_self;
265 if (!work_around_app_jni_bugs && self != env_self) {
266 UnexpectedThreads(env_self, self);
267 }
268 return self;
269 }
270
271 static void UnexpectedThreads(Thread* found_self, Thread* expected_self) {
272 // TODO: pass through function name so we can use it here instead of NULL...
273 JniAbortF(NULL, "JNIEnv for %s used on %s",
274 found_self != NULL ? ToStr<Thread>(*found_self).c_str() : "NULL",
275 expected_self != NULL ? ToStr<Thread>(*expected_self).c_str() : "NULL");
276
277 }
278
279 // The full JNIEnv.
280 JNIEnvExt* const env_;
281 // The full JavaVM.
282 JavaVMExt* const vm_;
283
284 DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccessUnchecked);
285};
286
287// Annotalysis helping variant of the above.
288class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
289 public:
290 explicit ScopedObjectAccess(JNIEnv* env)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700291 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
292 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700293 : ScopedObjectAccessUnchecked(env) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700294 Locks::mutator_lock_->AssertSharedHeld(Self());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700295 }
296
297 explicit ScopedObjectAccess(Thread* self)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700298 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
299 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700300 : ScopedObjectAccessUnchecked(self) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700301 Locks::mutator_lock_->AssertSharedHeld(Self());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700302 }
303
Ian Rogersb726dcb2012-09-05 08:57:23 -0700304 ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700305 // Base class will release share of lock. Invoked after this destructor.
306 }
307
308 private:
309 // TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that
310 // routines operating with just a VM are sound, they are not, but when you have just a VM
311 // you cannot call the unsound routines.
312 explicit ScopedObjectAccess(JavaVM* vm)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700313 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700314 : ScopedObjectAccessUnchecked(vm) {}
315
316 friend class ScopedCheck;
317 DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccess);
318};
319
320} // namespace art
321
322#endif // ART_SRC_SCOPED_THREAD_STATE_CHANGE_H_