blob: 68fbf69fd7347ab147c23dde88f3fc8ab759205c [file] [log] [blame]
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_SCOPED_THREAD_STATE_CHANGE_H_
18#define ART_SRC_SCOPED_THREAD_STATE_CHANGE_H_
19
20#include "casts.h"
21#include "thread.h"
22
23namespace art {
24
25// Scoped change into and out of a particular state. Handles Runnable transitions that require
26// more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and
27// ScopedObjectAccess are used to handle the change into Runnable to get direct access to objects,
28// the unchecked variant doesn't aid annotalysis.
29class ScopedThreadStateChange {
30 public:
31 ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
Ian Rogersb726dcb2012-09-05 08:57:23 -070032 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -070033 : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
34 if (self_ == NULL) {
35 // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
36 old_thread_state_ = kTerminated;
37 CHECK(!Runtime::Current()->IsStarted() || Runtime::Current()->IsShuttingDown());
38 } else {
39 bool runnable_transition;
Ian Rogers22f454c2012-09-08 11:06:29 -070040 DCHECK_EQ(self, Thread::Current());
41 // Read state without locks, ok as state is effectively thread local and we're not interested
42 // in the suspend count (this will be handled in the runnable transitions).
43 old_thread_state_ = self->GetStateUnsafe();
44 runnable_transition = old_thread_state_ == kRunnable || new_thread_state == kRunnable;
45 if (!runnable_transition) {
46 // A suspended transition to another effectively suspended transition, ok to use Unsafe.
47 self_->SetStateUnsafe(new_thread_state);
Ian Rogers00f7d0e2012-07-19 15:28:27 -070048 }
49 if (runnable_transition && old_thread_state_ != new_thread_state) {
50 if (new_thread_state == kRunnable) {
51 self_->TransitionFromSuspendedToRunnable();
52 } else {
53 DCHECK_EQ(old_thread_state_, kRunnable);
54 self_->TransitionFromRunnableToSuspended(new_thread_state);
55 }
56 }
57 }
58 }
59
Ian Rogersb726dcb2012-09-05 08:57:23 -070060 ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -070061 if (self_ == NULL) {
62 if (!expected_has_no_thread_) {
63 CHECK(Runtime::Current()->IsShuttingDown());
64 }
65 } else {
66 if (old_thread_state_ != thread_state_) {
67 if (old_thread_state_ == kRunnable) {
68 self_->TransitionFromSuspendedToRunnable();
69 } else if (thread_state_ == kRunnable) {
70 self_->TransitionFromRunnableToSuspended(old_thread_state_);
71 } else {
Ian Rogers22f454c2012-09-08 11:06:29 -070072 // A suspended transition to another effectively suspended transition, ok to use Unsafe.
73 self_->SetStateUnsafe(old_thread_state_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -070074 }
75 }
76 }
77 }
78
79 Thread* Self() const {
80 return self_;
81 }
82
83 protected:
84 // Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*.
85 ScopedThreadStateChange()
86 : self_(NULL), thread_state_(kTerminated), old_thread_state_(kTerminated),
87 expected_has_no_thread_(true) {}
88
89 Thread* const self_;
90 const ThreadState thread_state_;
91
92 private:
93 ThreadState old_thread_state_;
94 const bool expected_has_no_thread_;
95
96 DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
97};
98
99// Entry/exit processing for transitions from Native to Runnable (ie within JNI functions).
100//
101// This class performs the necessary thread state switching to and from Runnable and lets us
102// amortize the cost of working out the current thread. Additionally it lets us check (and repair)
103// apps that are using a JNIEnv on the wrong thread. The class also decodes and encodes Objects
104// into jobjects via methods of this class. Performing this here enforces the Runnable thread state
105// for use of Object, thereby inhibiting the Object being modified by GC whilst native or VM code
106// is also manipulating the Object.
107//
108// The destructor transitions back to the previous thread state, typically Native. In this state
109// GC and thread suspension may occur.
110//
111// For annotalysis the subclass ScopedObjectAccess (below) makes it explicit that a shared of
112// the mutator_lock_ will be acquired on construction.
113class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
114 public:
115 explicit ScopedObjectAccessUnchecked(JNIEnv* env)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700116 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700117 : ScopedThreadStateChange(ThreadForEnv(env), kRunnable),
118 env_(reinterpret_cast<JNIEnvExt*>(env)), vm_(env_->vm) {
119 self_->VerifyStack();
120 }
121
122 explicit ScopedObjectAccessUnchecked(Thread* self)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700123 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700124 : ScopedThreadStateChange(self, kRunnable),
125 env_(reinterpret_cast<JNIEnvExt*>(self->GetJniEnv())),
126 vm_(env_ != NULL ? env_->vm : NULL) {
127 if (Vm() != NULL && !Vm()->work_around_app_jni_bugs && self != Thread::Current()) {
128 UnexpectedThreads(self, Thread::Current());
129 }
130 self_->VerifyStack();
131 }
132
133 // Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't
134 // change into Runnable or acquire a share on the mutator_lock_.
135 explicit ScopedObjectAccessUnchecked(JavaVM* vm)
136 : ScopedThreadStateChange(), env_(NULL), vm_(reinterpret_cast<JavaVMExt*>(vm)) {}
137
138 JNIEnvExt* Env() const {
139 return env_;
140 }
141
142 JavaVMExt* Vm() const {
143 return vm_;
144 }
145
146 /*
147 * Add a local reference for an object to the indirect reference table associated with the
148 * current stack frame. When the native function returns, the reference will be discarded.
149 * Part of the ScopedJniThreadState as native code shouldn't be working on raw Object* without
150 * having transitioned its state.
151 *
152 * We need to allow the same reference to be added multiple times.
153 *
154 * This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and
155 * it's best if we don't grab a mutex.
156 *
157 * Returns the local reference (currently just the same pointer that was
158 * passed in), or NULL on failure.
159 */
160 template<typename T>
161 T AddLocalReference(Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700162 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700163 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
164 if (obj == NULL) {
165 return NULL;
166 }
167
168 DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
169
170 IndirectReferenceTable& locals = Env()->locals;
171
172 uint32_t cookie = Env()->local_ref_cookie;
173 IndirectRef ref = locals.Add(cookie, obj);
174
175#if 0 // TODO: fix this to understand PushLocalFrame, so we can turn it on.
176 if (Env()->check_jni) {
177 size_t entry_count = locals.Capacity();
178 if (entry_count > 16) {
179 LOG(WARNING) << "Warning: more than 16 JNI local references: "
180 << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n"
181 << Dumpable<IndirectReferenceTable>(locals);
182 // TODO: LOG(FATAL) in a later release?
183 }
184 }
185#endif
186
187 if (Vm()->work_around_app_jni_bugs) {
188 // Hand out direct pointers to support broken old apps.
189 return reinterpret_cast<T>(obj);
190 }
191
192 return reinterpret_cast<T>(ref);
193 }
194
195 template<typename T>
196 T Decode(jobject obj) const
197 LOCKS_EXCLUDED(JavaVMExt::globals_lock,
198 JavaVMExt::weak_globals_lock)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700199 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
200 Locks::mutator_lock_->AssertSharedHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700201 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
202 return down_cast<T>(Self()->DecodeJObject(obj));
203 }
204
205 Field* DecodeField(jfieldID fid) const
206 LOCKS_EXCLUDED(JavaVMExt::globals_lock,
207 JavaVMExt::weak_globals_lock)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700208 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
209 Locks::mutator_lock_->AssertSharedHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700210 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
211#ifdef MOVING_GARBAGE_COLLECTOR
212 // TODO: we should make these unique weak globals if Field instances can ever move.
213 UNIMPLEMENTED(WARNING);
214#endif
215 return reinterpret_cast<Field*>(fid);
216 }
217
218 jfieldID EncodeField(Field* field) const
219 LOCKS_EXCLUDED(JavaVMExt::globals_lock,
220 JavaVMExt::weak_globals_lock)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700221 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
222 Locks::mutator_lock_->AssertSharedHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700223 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
224#ifdef MOVING_GARBAGE_COLLECTOR
225 UNIMPLEMENTED(WARNING);
226#endif
227 return reinterpret_cast<jfieldID>(field);
228 }
229
230 Method* DecodeMethod(jmethodID mid) const
231 LOCKS_EXCLUDED(JavaVMExt::globals_lock,
232 JavaVMExt::weak_globals_lock)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700233 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
234 Locks::mutator_lock_->AssertSharedHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700235 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
236#ifdef MOVING_GARBAGE_COLLECTOR
237 // TODO: we should make these unique weak globals if Method instances can ever move.
238 UNIMPLEMENTED(WARNING);
239#endif
240 return reinterpret_cast<Method*>(mid);
241 }
242
243 jmethodID EncodeMethod(Method* method) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700244 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
245 Locks::mutator_lock_->AssertSharedHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700246 DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
247#ifdef MOVING_GARBAGE_COLLECTOR
248 UNIMPLEMENTED(WARNING);
249#endif
250 return reinterpret_cast<jmethodID>(method);
251 }
252
253 private:
254 static Thread* ThreadForEnv(JNIEnv* env) {
255 JNIEnvExt* full_env(reinterpret_cast<JNIEnvExt*>(env));
256 bool work_around_app_jni_bugs = full_env->vm->work_around_app_jni_bugs;
257 Thread* env_self = full_env->self;
258 Thread* self = work_around_app_jni_bugs ? Thread::Current() : env_self;
259 if (!work_around_app_jni_bugs && self != env_self) {
260 UnexpectedThreads(env_self, self);
261 }
262 return self;
263 }
264
265 static void UnexpectedThreads(Thread* found_self, Thread* expected_self) {
266 // TODO: pass through function name so we can use it here instead of NULL...
267 JniAbortF(NULL, "JNIEnv for %s used on %s",
268 found_self != NULL ? ToStr<Thread>(*found_self).c_str() : "NULL",
269 expected_self != NULL ? ToStr<Thread>(*expected_self).c_str() : "NULL");
270
271 }
272
273 // The full JNIEnv.
274 JNIEnvExt* const env_;
275 // The full JavaVM.
276 JavaVMExt* const vm_;
277
278 DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccessUnchecked);
279};
280
281// Annotalysis helping variant of the above.
282class ScopedObjectAccess : public ScopedObjectAccessUnchecked {
283 public:
284 explicit ScopedObjectAccess(JNIEnv* env)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700285 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
286 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700287 : ScopedObjectAccessUnchecked(env) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700288 Locks::mutator_lock_->AssertSharedHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700289 }
290
291 explicit ScopedObjectAccess(Thread* self)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700292 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
293 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700294 : ScopedObjectAccessUnchecked(self) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700295 Locks::mutator_lock_->AssertSharedHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700296 }
297
Ian Rogersb726dcb2012-09-05 08:57:23 -0700298 ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700299 // Base class will release share of lock. Invoked after this destructor.
300 }
301
302 private:
303 // TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that
304 // routines operating with just a VM are sound, they are not, but when you have just a VM
305 // you cannot call the unsound routines.
306 explicit ScopedObjectAccess(JavaVM* vm)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700307 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700308 : ScopedObjectAccessUnchecked(vm) {}
309
310 friend class ScopedCheck;
311 DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccess);
312};
313
314} // namespace art
315
316#endif // ART_SRC_SCOPED_THREAD_STATE_CHANGE_H_