blob: 884aa64b24acb87ef3c0eaf9f67cbfaad026a87f [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
Carl Shapirob5573532011-07-12 18:22:59 -070020#include <pthread.h>
Elliott Hughesa0957642011-09-02 14:27:33 -070021
Elliott Hughes02b48d12011-09-07 17:15:51 -070022#include <bitset>
Elliott Hughesa0957642011-09-02 14:27:33 -070023#include <iosfwd>
Ian Rogersb033c752011-07-20 12:22:35 -070024#include <list>
Elliott Hughes8daa0922011-09-11 13:46:25 -070025#include <string>
Carl Shapirob5573532011-07-12 18:22:59 -070026
Brian Carlstrom1f870082011-08-23 16:02:11 -070027#include "dex_file.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070028#include "globals.h"
Elliott Hughes69f5bc62011-08-24 09:26:14 -070029#include "jni_internal.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070030#include "logging.h"
31#include "macros.h"
Elliott Hughes8daa0922011-09-11 13:46:25 -070032#include "mutex.h"
Brian Carlstromb765be02011-08-17 23:54:10 -070033#include "mem_map.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070034#include "offsets.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070035#include "runtime_stats.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070036#include "stack.h"
Ian Rogersbdb03912011-09-14 00:55:44 -070037#include "UniquePtr.h"
Ian Rogersb033c752011-07-20 12:22:35 -070038
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070039namespace art {
40
Elliott Hughes69f5bc62011-08-24 09:26:14 -070041class Array;
Elliott Hughes37f7a402011-08-22 18:56:01 -070042class Class;
Brian Carlstrom1f870082011-08-23 16:02:11 -070043class ClassLinker;
Elliott Hughesedcc09c2011-08-21 18:47:05 -070044class ClassLoader;
Ian Rogersbdb03912011-09-14 00:55:44 -070045class Context;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070046class Method;
Elliott Hughes8daa0922011-09-11 13:46:25 -070047class Monitor;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070048class Object;
Carl Shapirob5573532011-07-12 18:22:59 -070049class Runtime;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070050class Thread;
Carl Shapirob5573532011-07-12 18:22:59 -070051class ThreadList;
Elliott Hughese5b0dc82011-08-23 09:59:02 -070052class Throwable;
Elliott Hughes68e76522011-10-05 13:22:16 -070053class StackIndirectReferenceTable;
Shih-wei Liao55df06b2011-08-26 14:39:27 -070054class StackTraceElement;
buzbee1da522d2011-09-04 11:22:20 -070055class StaticStorageBase;
56
Shih-wei Liao55df06b2011-08-26 14:39:27 -070057template<class T> class ObjectArray;
Shih-wei Liao44175362011-08-28 16:59:17 -070058template<class T> class PrimitiveArray;
59typedef PrimitiveArray<int32_t> IntArray;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070060
Elliott Hughes85d15452011-09-16 17:33:01 -070061class PACKED Thread {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070062 public:
Elliott Hughes8daa0922011-09-11 13:46:25 -070063 /* thread priorities, from java.lang.Thread */
64 enum Priority {
65 kMinPriority = 1,
66 kNormPriority = 5,
67 kMaxPriority = 10,
68 };
Carl Shapirob5573532011-07-12 18:22:59 -070069 enum State {
Elliott Hughes93e74e82011-09-13 11:07:03 -070070 // These match up with JDWP values.
71 kTerminated = 0, // TERMINATED
72 kRunnable = 1, // RUNNABLE or running now
73 kTimedWaiting = 2, // TIMED_WAITING in Object.wait()
74 kBlocked = 3, // BLOCKED on a monitor
75 kWaiting = 4, // WAITING in Object.wait()
76 // Non-JDWP states.
77 kInitializing = 5, // allocated, not yet running --- TODO: unnecessary?
78 kStarting = 6, // native thread started, not yet ready to run managed code
79 kNative = 7, // off in a JNI native method
80 kVmWait = 8, // waiting on a VM resource
81 kSuspended = 9, // suspended, usually by GC or debugger
Carl Shapirob5573532011-07-12 18:22:59 -070082 };
83
Ian Rogers932746a2011-09-22 18:57:50 -070084 // Space to throw a StackOverflowError in.
Brian Carlstromaded5f72011-10-07 17:15:04 -070085 static const size_t kStackOverflowReservedBytes = 4 * KB;
buzbeec143c552011-08-20 17:38:58 -070086
Carl Shapiro61e019d2011-07-14 16:53:09 -070087 static const size_t kDefaultStackSize = 64 * KB;
88
buzbeec143c552011-08-20 17:38:58 -070089 // Runtime support function pointers
buzbee4a3164f2011-09-03 11:25:10 -070090 void (*pDebugMe)(Method*, uint32_t);
buzbeec143c552011-08-20 17:38:58 -070091 void* (*pMemcpy)(void*, const void*, size_t);
buzbee54330722011-08-23 16:46:55 -070092 uint64_t (*pShlLong)(uint64_t, uint32_t);
93 uint64_t (*pShrLong)(uint64_t, uint32_t);
94 uint64_t (*pUshrLong)(uint64_t, uint32_t);
buzbeec143c552011-08-20 17:38:58 -070095 float (*pI2f)(int);
96 int (*pF2iz)(float);
97 float (*pD2f)(double);
98 double (*pF2d)(float);
99 double (*pI2d)(int);
100 int (*pD2iz)(double);
101 float (*pL2f)(long);
102 double (*pL2d)(long);
buzbee1b4c8592011-08-31 10:43:51 -0700103 long long (*pF2l)(float);
104 long long (*pD2l)(double);
buzbeec143c552011-08-20 17:38:58 -0700105 float (*pFadd)(float, float);
106 float (*pFsub)(float, float);
107 float (*pFdiv)(float, float);
108 float (*pFmul)(float, float);
109 float (*pFmodf)(float, float);
110 double (*pDadd)(double, double);
111 double (*pDsub)(double, double);
112 double (*pDdiv)(double, double);
113 double (*pDmul)(double, double);
114 double (*pFmod)(double, double);
115 int (*pIdivmod)(int, int);
116 int (*pIdiv)(int, int);
buzbee439c4fa2011-08-27 15:59:07 -0700117 long long (*pLmul)(long long, long long);
buzbeec143c552011-08-20 17:38:58 -0700118 long long (*pLdivmod)(long long, long long);
Ian Rogers4a510d82011-10-09 14:30:24 -0700119 void (*pCheckSuspendFromCode)(Thread*); // Stub that is called when the suspend count is non-zero
120 void (*pTestSuspendFromCode)(); // Stub that is periodically called to test the suspend count
Ian Rogers21d9e832011-09-23 17:05:09 -0700121 void* (*pAllocObjectFromCode)(uint32_t, void*);
Elliott Hughesb408de72011-10-04 14:35:05 -0700122 void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t);
Ian Rogerse51a5112011-09-23 14:16:35 -0700123 void (*pCanPutArrayElementFromCode)(void*, void*);
Ian Rogersce9eca62011-10-07 17:11:03 -0700124 void* (*pCheckAndAllocArrayFromCode)(uint32_t, void*, int32_t);
Ian Rogersff1ed472011-09-20 13:46:24 -0700125 void (*pCheckCastFromCode) (void*, void*);
Ian Rogersce9eca62011-10-07 17:11:03 -0700126 Object* (*pDecodeJObjectInThread)(Thread* thread, jobject obj);
127 void (*pDeliverException)(void*);
128 void* (*pFindInstanceFieldFromCode)(uint32_t, void*);
129 Method* (*pFindInterfaceMethodInCache)(Class*, uint32_t, const Method*, struct DvmDex*);
130 void* (*pFindNativeMethod)(Thread* thread);
131 int32_t (*pGet32Static)(uint32_t, void*);
132 int64_t (*pGet64Static)(uint32_t, void*);
133 void* (*pGetObjStatic)(uint32_t, void*);
134 void (*pHandleFillArrayDataFromCode)(void*, void*);
135 void* (*pInitializeStaticStorage)(uint32_t, void*);
136 uint32_t (*pInstanceofNonTrivialFromCode) (const Class*, const Class*);
137 void (*pInvokeInterfaceTrampoline)(uint32_t, void*);
138 Class* (*pInitializeTypeFromCode)(uint32_t, Method*);
139 void (*pLockObjectFromCode)(void*);
140 void (*pObjectInit)(Object*);
141 void (*pResolveMethodFromCode)(Method*, uint32_t);
Brian Carlstromaded5f72011-10-07 17:15:04 -0700142 void* (*pResolveStringFromCode)(void*, uint32_t);
Ian Rogersce9eca62011-10-07 17:11:03 -0700143 int (*pSet32Static)(uint32_t, void*, int32_t);
144 int (*pSet64Static)(uint32_t, void*, int64_t);
145 int (*pSetObjStatic)(uint32_t, void*, void*);
Ian Rogers932746a2011-09-22 18:57:50 -0700146 void (*pThrowStackOverflowFromCode)(void*);
buzbee5ade1d22011-09-09 14:44:52 -0700147 void (*pThrowNullPointerFromCode)();
148 void (*pThrowArrayBoundsFromCode)(int32_t, int32_t);
149 void (*pThrowDivZeroFromCode)();
150 void (*pThrowVerificationErrorFromCode)(int32_t, int32_t);
151 void (*pThrowNegArraySizeFromCode)(int32_t);
buzbee5ade1d22011-09-09 14:44:52 -0700152 void (*pThrowNoSuchMethodFromCode)(int32_t);
Ian Rogersff1ed472011-09-20 13:46:24 -0700153 void (*pThrowAbstractMethodErrorFromCode)(Method* method, Thread* thread, Method** sp);
Ian Rogersce9eca62011-10-07 17:11:03 -0700154 void (*pUnlockObjectFromCode)(void*);
155 void* (*pUnresolvedDirectMethodTrampolineFromCode)(int32_t, void*, Thread*,
156 Runtime::TrampolineType);
buzbeec143c552011-08-20 17:38:58 -0700157
Shih-wei Liao9b576b42011-08-29 01:45:07 -0700158 class StackVisitor {
159 public:
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700160 virtual ~StackVisitor() {}
Ian Rogersbdb03912011-09-14 00:55:44 -0700161 virtual void VisitFrame(const Frame& frame, uintptr_t pc) = 0;
Shih-wei Liao9b576b42011-08-29 01:45:07 -0700162 };
163
Carl Shapiro61e019d2011-07-14 16:53:09 -0700164 // Creates a new thread.
Elliott Hughesd369bb72011-09-12 14:41:14 -0700165 static void Create(Object* peer, size_t stack_size);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700166
167 // Creates a new thread from the calling thread.
Elliott Hughesdcc24742011-09-07 14:02:44 -0700168 static Thread* Attach(const Runtime* runtime, const char* name, bool as_daemon);
Carl Shapirob5573532011-07-12 18:22:59 -0700169
170 static Thread* Current() {
Carl Shapirod0e7e772011-07-15 14:31:01 -0700171 void* thread = pthread_getspecific(Thread::pthread_key_self_);
172 return reinterpret_cast<Thread*>(thread);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700173 }
174
Elliott Hughes01158d72011-09-19 19:47:10 -0700175 static Thread* FromManagedThread(JNIEnv* env, jobject thread);
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700176 static uint32_t LockOwnerFromThreadLock(Object* thread_lock);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700177
Elliott Hughesa0957642011-09-02 14:27:33 -0700178 void Dump(std::ostream& os) const;
179
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700180 State GetState() const {
181 return state_;
182 }
183
Elliott Hughes8d768a92011-09-14 16:35:25 -0700184 State SetState(State new_state);
185
Elliott Hughes038a8062011-09-18 14:12:41 -0700186 bool IsDaemon();
187
Elliott Hughes8d768a92011-09-14 16:35:25 -0700188 void WaitUntilSuspended();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700189
Elliott Hughes5f791332011-09-15 17:45:30 -0700190 bool HoldsLock(Object*);
191
Elliott Hughes8daa0922011-09-11 13:46:25 -0700192 /*
193 * Changes the priority of this thread to match that of the java.lang.Thread object.
194 *
195 * We map a priority value from 1-10 to Linux "nice" values, where lower
196 * numbers indicate higher priority.
197 */
198 void SetNativePriority(int newPriority);
199
200 /*
201 * Returns the thread priority for the current thread by querying the system.
202 * This is useful when attaching a thread through JNI.
203 *
204 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
205 */
206 static int GetNativePriority();
207
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700208 bool CanAccessDirectReferences() const {
Elliott Hughesa59d1792011-09-04 18:42:35 -0700209 // TODO: when we have a moving collector, we'll need: return state_ == kRunnable;
210 return true;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700211 }
212
Elliott Hughesdcc24742011-09-07 14:02:44 -0700213 uint32_t GetThinLockId() const {
214 return thin_lock_id_;
Carl Shapirob5573532011-07-12 18:22:59 -0700215 }
216
Elliott Hughesd92bec42011-09-02 17:04:36 -0700217 pid_t GetTid() const {
218 return tid_;
219 }
Elliott Hughese27955c2011-08-26 15:21:24 -0700220
221 pthread_t GetImpl() const {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700222 return pthread_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700223 }
224
Elliott Hughesd369bb72011-09-12 14:41:14 -0700225 Object* GetPeer() const {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700226 return peer_;
227 }
228
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700229 RuntimeStats* GetStats() {
230 return &stats_;
231 }
232
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700233 // Returns the Method* for the current method.
234 // This is used by the JNI implementation for logging and diagnostic purposes.
235 const Method* GetCurrentMethod() const {
236 return top_of_managed_stack_.GetMethod();
237 }
238
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700239 bool IsExceptionPending() const {
Elliott Hughesb20a5542011-08-12 18:03:12 -0700240 return exception_ != NULL;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700241 }
242
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700243 Throwable* GetException() const {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700244 DCHECK(CanAccessDirectReferences());
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700245 return exception_;
246 }
247
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700248 void SetException(Throwable* new_exception) {
249 DCHECK(CanAccessDirectReferences());
250 CHECK(new_exception != NULL);
251 // TODO: CHECK(exception_ == NULL);
252 exception_ = new_exception; // TODO
253 }
254
255 void ClearException() {
256 exception_ = NULL;
Elliott Hughesa0957642011-09-02 14:27:33 -0700257 }
258
Ian Rogersbdb03912011-09-14 00:55:44 -0700259 // Find catch block and perform long jump to appropriate exception handle
Ian Rogersff1ed472011-09-20 13:46:24 -0700260 void DeliverException();
Ian Rogersbdb03912011-09-14 00:55:44 -0700261
262 Context* GetLongJumpContext();
263
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700264 Frame GetTopOfStack() const {
265 return top_of_managed_stack_;
266 }
267
268 // TODO: this is here for testing, remove when we have exception unit tests
269 // that use the real stack
Ian Rogersbdb03912011-09-14 00:55:44 -0700270 void SetTopOfStack(void* stack, uintptr_t pc) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700271 top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(stack));
Ian Rogersbdb03912011-09-14 00:55:44 -0700272 top_of_managed_stack_pc_ = pc;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700273 }
274
Ian Rogersbdb03912011-09-14 00:55:44 -0700275 void SetTopOfStackPC(uintptr_t pc) {
276 top_of_managed_stack_pc_ = pc;
277 }
278
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700279 // 'msg' may be NULL.
280 void ThrowNewException(const char* exception_class_descriptor, const char* msg);
281
282 void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700283 __attribute__ ((format(printf, 3, 4)));
284
Elliott Hughes4a2b4172011-09-20 17:08:25 -0700285 void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap);
286
Elliott Hughes79082e32011-08-25 12:07:32 -0700287 // This exception is special, because we need to pre-allocate an instance.
Elliott Hughes418dfe72011-10-06 18:56:27 -0700288 void ThrowOutOfMemoryError(Class* c, size_t byte_count);
Elliott Hughes79082e32011-08-25 12:07:32 -0700289
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700290 Frame FindExceptionHandler(void* throw_pc, void** handler_pc);
291
292 void* FindExceptionHandlerInMethod(const Method* method,
293 void* throw_pc,
294 const DexFile& dex_file,
295 ClassLinker* class_linker);
buzbeec143c552011-08-20 17:38:58 -0700296
Carl Shapirob5573532011-07-12 18:22:59 -0700297 void SetName(const char* name);
298
Elliott Hughesbe759c62011-09-08 19:38:21 -0700299 static void Startup();
Elliott Hughes038a8062011-09-18 14:12:41 -0700300 static void FinishStartup();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700301 static void Shutdown();
Carl Shapirob5573532011-07-12 18:22:59 -0700302
Ian Rogersb033c752011-07-20 12:22:35 -0700303 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700304 JNIEnvExt* GetJniEnv() const {
Ian Rogersb033c752011-07-20 12:22:35 -0700305 return jni_env_;
306 }
307
Ian Rogers408f79a2011-08-23 18:22:33 -0700308 // Number of references allocated in SIRTs on this thread
309 size_t NumSirtReferences();
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700310
Ian Rogers408f79a2011-08-23 18:22:33 -0700311 // Is the given obj in this thread's stack indirect reference table?
312 bool SirtContains(jobject obj);
313
Shih-wei Liao8dfc9d52011-09-28 18:06:15 -0700314 void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg);
315
Ian Rogers408f79a2011-08-23 18:22:33 -0700316 // Convert a jobject into a Object*
317 Object* DecodeJObject(jobject obj);
Ian Rogersb033c752011-07-20 12:22:35 -0700318
Elliott Hughes8daa0922011-09-11 13:46:25 -0700319 // Implements java.lang.Thread.interrupted.
320 bool Interrupted() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700321 MutexLock mu(*wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700322 bool interrupted = interrupted_;
323 interrupted_ = false;
324 return interrupted;
325 }
326
327 // Implements java.lang.Thread.isInterrupted.
328 bool IsInterrupted() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700329 MutexLock mu(*wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700330 return interrupted_;
331 }
332
Elliott Hughes5f791332011-09-15 17:45:30 -0700333 void Interrupt() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700334 MutexLock mu(*wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700335 if (interrupted_) {
336 return;
337 }
338 interrupted_ = true;
339 NotifyLocked();
340 }
341
342 void Notify() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700343 MutexLock mu(*wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700344 NotifyLocked();
345 }
346
Ian Rogers6de08602011-08-19 14:52:39 -0700347 // Linked list recording transitions from native to managed code
348 void PushNativeToManagedRecord(NativeToManagedRecord* record) {
Ian Rogersbdb03912011-09-14 00:55:44 -0700349 record->last_top_of_managed_stack_ = reinterpret_cast<void*>(top_of_managed_stack_.GetSP());
350 record->last_top_of_managed_stack_pc_ = top_of_managed_stack_pc_;
351 record->link_ = native_to_managed_record_;
Ian Rogers6de08602011-08-19 14:52:39 -0700352 native_to_managed_record_ = record;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700353 top_of_managed_stack_.SetSP(NULL);
Ian Rogers6de08602011-08-19 14:52:39 -0700354 }
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700355
Ian Rogers6de08602011-08-19 14:52:39 -0700356 void PopNativeToManagedRecord(const NativeToManagedRecord& record) {
Ian Rogersbdb03912011-09-14 00:55:44 -0700357 native_to_managed_record_ = record.link_;
358 top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(record.last_top_of_managed_stack_));
359 top_of_managed_stack_pc_ = record.last_top_of_managed_stack_pc_;
Ian Rogers6de08602011-08-19 14:52:39 -0700360 }
361
Brian Carlstrombffb1552011-08-25 12:23:53 -0700362 const ClassLoader* GetClassLoaderOverride() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700363 // TODO: need to place the class_loader_override_ in a handle
364 // DCHECK(CanAccessDirectReferences());
buzbeec143c552011-08-20 17:38:58 -0700365 return class_loader_override_;
366 }
367
Brian Carlstrombffb1552011-08-25 12:23:53 -0700368 void SetClassLoaderOverride(const ClassLoader* class_loader_override) {
buzbeec143c552011-08-20 17:38:58 -0700369 class_loader_override_ = class_loader_override;
370 }
371
Ian Rogersaaa20802011-09-11 21:47:37 -0700372 // Create the internal representation of a stack trace, that is more time
373 // and space efficient to compute than the StackTraceElement[]
Elliott Hughes01158d72011-09-19 19:47:10 -0700374 jobject CreateInternalStackTrace(JNIEnv* env) const;
Ian Rogersaaa20802011-09-11 21:47:37 -0700375
Elliott Hughes01158d72011-09-19 19:47:10 -0700376 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
377 // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
378 // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
379 // with the number of valid frames in the returned array.
380 static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
381 jobjectArray output_array = NULL, int* stack_depth = NULL);
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700382
Ian Rogersd6b1f612011-09-27 13:38:14 -0700383 void VisitRoots(Heap::RootVisitor* visitor, void* arg);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700384
Elliott Hughesbe759c62011-09-08 19:38:21 -0700385 //
386 // Offsets of various members of native Thread class, used by compiled code.
387 //
388
389 static ThreadOffset SelfOffset() {
390 return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
391 }
392
393 static ThreadOffset ExceptionOffset() {
394 return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
395 }
396
Elliott Hughes54e7df12011-09-16 11:47:04 -0700397 static ThreadOffset ThinLockIdOffset() {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700398 return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
399 }
400
401 static ThreadOffset CardTableOffset() {
402 return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
403 }
404
405 static ThreadOffset SuspendCountOffset() {
406 return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
407 }
408
409 static ThreadOffset StateOffset() {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700410 return ThreadOffset(OFFSETOF_VOLATILE_MEMBER(Thread, state_));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700411 }
412
Ian Rogers932746a2011-09-22 18:57:50 -0700413 // Size of stack less any space reserved for stack overflow
414 size_t GetStackSize() {
415 return stack_size_ - (stack_end_ - stack_base_);
416 }
417
418 // Set the stack end to that to be used during a stack overflow
419 void SetStackEndForStackOverflow() {
420 // During stack overflow we allow use of the full stack
421 CHECK(stack_end_ != stack_base_) << "Need to increase: kStackOverflowReservedBytes ("
422 << kStackOverflowReservedBytes << ")";
423 stack_end_ = stack_base_;
424 }
425
426 // Set the stack end to that to be used during regular execution
427 void ResetDefaultStackEnd() {
428 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
429 // to throw a StackOverflowError.
430 stack_end_ = stack_base_ + kStackOverflowReservedBytes;
431 }
432
Elliott Hughes449b4bd2011-09-09 12:01:38 -0700433 static ThreadOffset StackEndOffset() {
434 return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700435 }
436
437 static ThreadOffset JniEnvOffset() {
438 return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
439 }
440
441 static ThreadOffset TopOfManagedStackOffset() {
442 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_) +
443 OFFSETOF_MEMBER(Frame, sp_));
444 }
445
Ian Rogersbdb03912011-09-14 00:55:44 -0700446 static ThreadOffset TopOfManagedStackPcOffset() {
447 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_pc_));
448 }
449
Elliott Hughesbe759c62011-09-08 19:38:21 -0700450 static ThreadOffset TopSirtOffset() {
451 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
452 }
453
Shih-wei Liao9407c602011-09-16 10:36:43 -0700454 void WalkStack(StackVisitor* visitor) const;
455
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700456 private:
Elliott Hughesdcc24742011-09-07 14:02:44 -0700457 Thread();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700458 ~Thread();
Elliott Hughes02b48d12011-09-07 17:15:51 -0700459 friend class ThreadList; // For ~Thread.
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700460
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700461 void CreatePeer(const char* name, bool as_daemon);
462 friend class Runtime; // For CreatePeer.
463
Elliott Hughesd92bec42011-09-02 17:04:36 -0700464 void DumpState(std::ostream& os) const;
465 void DumpStack(std::ostream& os) const;
466
Elliott Hughes93e74e82011-09-13 11:07:03 -0700467 void Attach(const Runtime* runtime);
468 static void* CreateCallback(void* arg);
469
Ian Rogersb033c752011-07-20 12:22:35 -0700470 void InitCpu();
buzbee3ea4ec52011-08-22 17:37:19 -0700471 void InitFunctionPointers();
Elliott Hughesbe759c62011-09-08 19:38:21 -0700472 void InitStackHwm();
473
Elliott Hughes5f791332011-09-15 17:45:30 -0700474 void NotifyLocked() {
475 if (wait_monitor_ != NULL) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700476 wait_cond_->Signal();
Elliott Hughes5f791332011-09-15 17:45:30 -0700477 }
478 }
479
Elliott Hughesbe759c62011-09-08 19:38:21 -0700480 static void ThreadExitCallback(void* arg);
Ian Rogersb033c752011-07-20 12:22:35 -0700481
Ian Rogers67375ac2011-09-14 00:55:44 -0700482 void WalkStackUntilUpCall(StackVisitor* visitor, bool include_upcall) const;
Ian Rogersbdb03912011-09-14 00:55:44 -0700483
Elliott Hughesdcc24742011-09-07 14:02:44 -0700484 // Thin lock thread id. This is a small integer used by the thin lock implementation.
485 // This is not to be confused with the native thread's tid, nor is it the value returned
486 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
487 // important difference between this id and the ids visible to managed code is that these
488 // ones get reused (to ensure that they fit in the number of bits available).
489 uint32_t thin_lock_id_;
Ian Rogersb033c752011-07-20 12:22:35 -0700490
Elliott Hughesd92bec42011-09-02 17:04:36 -0700491 // System thread id.
492 pid_t tid_;
493
494 // Native thread handle.
Elliott Hughesbe759c62011-09-08 19:38:21 -0700495 pthread_t pthread_;
Elliott Hughesd92bec42011-09-02 17:04:36 -0700496
Elliott Hughesdcc24742011-09-07 14:02:44 -0700497 // Our managed peer (an instance of java.lang.Thread).
Elliott Hughesd369bb72011-09-12 14:41:14 -0700498 Object* peer_;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700499
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700500 // The top_of_managed_stack_ and top_of_managed_stack_pc_ fields are accessed from
501 // compiled code, so we keep them early in the structure to (a) avoid having to keep
502 // fixing the assembler offsets and (b) improve the chances that these will still be aligned.
503
504 // Top of the managed stack, written out prior to the state transition from
Elliott Hughes68e76522011-10-05 13:22:16 -0700505 // kRunnable to kNative. Uses include giving the starting point for scanning
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700506 // a managed stack when a thread is in native code.
507 Frame top_of_managed_stack_;
508 // PC corresponding to the call out of the top_of_managed_stack_ frame
509 uintptr_t top_of_managed_stack_pc_;
510
Elliott Hughes8daa0922011-09-11 13:46:25 -0700511 // Guards the 'interrupted_' and 'wait_monitor_' members.
Elliott Hughes85d15452011-09-16 17:33:01 -0700512 mutable Mutex* wait_mutex_;
513 ConditionVariable* wait_cond_;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700514 // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_.
515 Monitor* wait_monitor_;
516 // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700517 uint32_t interrupted_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700518 // The next thread in the wait set this thread is part of.
519 Thread* wait_next_;
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700520 // If we're blocked in MonitorEnter, this is the object we're trying to lock.
521 Object* monitor_enter_object_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700522
523 friend class Monitor;
Elliott Hughesdcc24742011-09-07 14:02:44 -0700524
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700525 RuntimeStats stats_;
526
buzbeec143c552011-08-20 17:38:58 -0700527 // FIXME: placeholder for the gc cardTable
528 uint32_t card_table_;
529
Elliott Hughes449b4bd2011-09-09 12:01:38 -0700530 // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
531 // We leave extra space so there's room for the code that throws StackOverflowError.
532 byte* stack_end_;
Elliott Hughesbe759c62011-09-08 19:38:21 -0700533
Ian Rogers932746a2011-09-22 18:57:50 -0700534 // Size of the stack
535 size_t stack_size_;
536
537 // The "lowest addressable byte" of the stack
538 byte* stack_base_;
539
Ian Rogers6de08602011-08-19 14:52:39 -0700540 // A linked list (of stack allocated records) recording transitions from
541 // native to managed code.
542 NativeToManagedRecord* native_to_managed_record_;
543
Ian Rogers408f79a2011-08-23 18:22:33 -0700544 // Top of linked list of stack indirect reference tables or NULL for none
545 StackIndirectReferenceTable* top_sirt_;
Ian Rogersb033c752011-07-20 12:22:35 -0700546
547 // Every thread may have an associated JNI environment
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700548 JNIEnvExt* jni_env_;
Ian Rogersb033c752011-07-20 12:22:35 -0700549
Elliott Hughes93e74e82011-09-13 11:07:03 -0700550 volatile State state_;
Carl Shapirob5573532011-07-12 18:22:59 -0700551
Carl Shapiro69759ea2011-07-21 18:13:35 -0700552 // Initialized to "this". On certain architectures (such as x86) reading
553 // off of Thread::Current is easy but getting the address of Thread::Current
554 // is hard. This field can be read off of Thread::Current to give the address.
555 Thread* self_;
556
557 Runtime* runtime_;
558
559 // The pending exception or NULL.
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700560 Throwable* exception_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700561
Ian Rogers45a76cb2011-07-21 22:00:15 -0700562 // A non-zero value is used to tell the current thread to enter a safe point
563 // at the next poll.
564 int suspend_count_;
565
Elliott Hughesedcc09c2011-08-21 18:47:05 -0700566 // Needed to get the right ClassLoader in JNI_OnLoad, but also
567 // useful for testing.
Brian Carlstrombffb1552011-08-25 12:23:53 -0700568 const ClassLoader* class_loader_override_;
buzbeec143c552011-08-20 17:38:58 -0700569
Ian Rogersbdb03912011-09-14 00:55:44 -0700570 // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
Elliott Hughes85d15452011-09-16 17:33:01 -0700571 Context* long_jump_context_;
Ian Rogersbdb03912011-09-14 00:55:44 -0700572
Elliott Hughes418dfe72011-10-06 18:56:27 -0700573 // A boolean telling us whether we're recursively throwing OOME.
Elliott Hughes726079d2011-10-07 18:43:44 -0700574 uint32_t throwing_OutOfMemoryError_;
575
576 Throwable* pre_allocated_OutOfMemoryError_;
Elliott Hughes418dfe72011-10-06 18:56:27 -0700577
Carl Shapiro69759ea2011-07-21 18:13:35 -0700578 // TLS key used to retrieve the VM thread object.
Carl Shapirob5573532011-07-12 18:22:59 -0700579 static pthread_key_t pthread_key_self_;
580
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700581 DISALLOW_COPY_AND_ASSIGN(Thread);
582};
Ian Rogersbdb03912011-09-14 00:55:44 -0700583
Elliott Hughes330304d2011-08-12 14:28:05 -0700584std::ostream& operator<<(std::ostream& os, const Thread& thread);
Ian Rogersb033c752011-07-20 12:22:35 -0700585std::ostream& operator<<(std::ostream& os, const Thread::State& state);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700586
Elliott Hughesad7c2a32011-08-31 11:58:10 -0700587class ScopedThreadStateChange {
588 public:
589 ScopedThreadStateChange(Thread* thread, Thread::State new_state) : thread_(thread) {
590 old_thread_state_ = thread_->SetState(new_state);
591 }
592
593 ~ScopedThreadStateChange() {
594 thread_->SetState(old_thread_state_);
595 }
596
597 private:
598 Thread* thread_;
599 Thread::State old_thread_state_;
600 DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
601};
602
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700603} // namespace art
604
605#endif // ART_SRC_THREAD_H_