blob: 7609b40f1f87d6766a63c7e6c460a88cf9a34fb9 [file] [log] [blame]
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07002
3#ifndef ART_SRC_THREAD_H_
4#define ART_SRC_THREAD_H_
5
Carl Shapirob5573532011-07-12 18:22:59 -07006#include <pthread.h>
Ian Rogersb033c752011-07-20 12:22:35 -07007#include <list>
Carl Shapirob5573532011-07-12 18:22:59 -07008
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07009#include "globals.h"
Elliott Hughes69f5bc62011-08-24 09:26:14 -070010#include "jni_internal.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070011#include "logging.h"
12#include "macros.h"
Brian Carlstromb765be02011-08-17 23:54:10 -070013#include "mem_map.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070014#include "offsets.h"
15#include "runtime.h"
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
Ian Rogersb033c752011-07-20 12:22:35 -070017
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070018namespace art {
19
Elliott Hughes69f5bc62011-08-24 09:26:14 -070020class Array;
Elliott Hughes37f7a402011-08-22 18:56:01 -070021class Class;
Elliott Hughesedcc09c2011-08-21 18:47:05 -070022class ClassLoader;
Elliott Hughes69f5bc62011-08-24 09:26:14 -070023class JNIEnvExt;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070024class Method;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070025class Object;
Carl Shapirob5573532011-07-12 18:22:59 -070026class Runtime;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070027class Thread;
Carl Shapirob5573532011-07-12 18:22:59 -070028class ThreadList;
Elliott Hughese5b0dc82011-08-23 09:59:02 -070029class Throwable;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070030
31class Mutex {
32 public:
33 virtual ~Mutex() {}
34
Carl Shapirob5573532011-07-12 18:22:59 -070035 void Lock();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070036
Carl Shapirob5573532011-07-12 18:22:59 -070037 bool TryLock();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070038
Carl Shapirob5573532011-07-12 18:22:59 -070039 void Unlock();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070040
41 const char* GetName() { return name_; }
42
43 Thread* GetOwner() { return owner_; }
44
Carl Shapirob5573532011-07-12 18:22:59 -070045 static Mutex* Create(const char* name);
46
Elliott Hughes79082e32011-08-25 12:07:32 -070047 // TODO: only needed because we lack a condition variable abstraction.
48 pthread_mutex_t* GetImpl() { return &lock_impl_; }
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070049
50 private:
Elliott Hughes18c07532011-08-18 15:50:51 -070051 explicit Mutex(const char* name) : name_(name), owner_(NULL) {}
52
Elliott Hughes79082e32011-08-25 12:07:32 -070053 void SetOwner(Thread* thread) { owner_ = thread; }
54
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070055 const char* name_;
56
57 Thread* owner_;
58
Carl Shapirob5573532011-07-12 18:22:59 -070059 pthread_mutex_t lock_impl_;
60
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070061 DISALLOW_COPY_AND_ASSIGN(Mutex);
62};
63
64class MutexLock {
65 public:
66 explicit MutexLock(Mutex *mu) : mu_(mu) {
67 mu_->Lock();
68 }
69 ~MutexLock() { mu_->Unlock(); }
70 private:
71 Mutex* const mu_;
72 DISALLOW_COPY_AND_ASSIGN(MutexLock);
73};
74
Ian Rogers408f79a2011-08-23 18:22:33 -070075// Stack allocated indirect reference table, allocated within the bridge frame
76// between managed and native code.
77class StackIndirectReferenceTable {
Ian Rogersb033c752011-07-20 12:22:35 -070078 public:
Ian Rogers408f79a2011-08-23 18:22:33 -070079 // Number of references contained within this SIRT
Ian Rogersb033c752011-07-20 12:22:35 -070080 size_t NumberOfReferences() {
81 return number_of_references_;
82 }
83
Ian Rogers408f79a2011-08-23 18:22:33 -070084 // Link to previous SIRT or NULL
85 StackIndirectReferenceTable* Link() {
Ian Rogersb033c752011-07-20 12:22:35 -070086 return link_;
87 }
88
Ian Rogers408f79a2011-08-23 18:22:33 -070089 Object** References() {
90 return references_;
Ian Rogersa8cd9f42011-08-19 16:43:41 -070091 }
92
Ian Rogers408f79a2011-08-23 18:22:33 -070093 // Offset of length within SIRT, used by generated code
Ian Rogersb033c752011-07-20 12:22:35 -070094 static size_t NumberOfReferencesOffset() {
Ian Rogers408f79a2011-08-23 18:22:33 -070095 return OFFSETOF_MEMBER(StackIndirectReferenceTable, number_of_references_);
Ian Rogersb033c752011-07-20 12:22:35 -070096 }
97
Ian Rogers408f79a2011-08-23 18:22:33 -070098 // Offset of link within SIRT, used by generated code
Ian Rogersb033c752011-07-20 12:22:35 -070099 static size_t LinkOffset() {
Ian Rogers408f79a2011-08-23 18:22:33 -0700100 return OFFSETOF_MEMBER(StackIndirectReferenceTable, link_);
Ian Rogersb033c752011-07-20 12:22:35 -0700101 }
102
103 private:
Ian Rogers408f79a2011-08-23 18:22:33 -0700104 StackIndirectReferenceTable() {}
Ian Rogersb033c752011-07-20 12:22:35 -0700105
106 size_t number_of_references_;
Ian Rogers408f79a2011-08-23 18:22:33 -0700107 StackIndirectReferenceTable* link_;
Ian Rogersb033c752011-07-20 12:22:35 -0700108
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700109 // Fake array, really allocated and filled in by jni_compiler.
Ian Rogers408f79a2011-08-23 18:22:33 -0700110 Object* references_[0];
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700111
Ian Rogers408f79a2011-08-23 18:22:33 -0700112 DISALLOW_COPY_AND_ASSIGN(StackIndirectReferenceTable);
Ian Rogersb033c752011-07-20 12:22:35 -0700113};
114
Ian Rogers6de08602011-08-19 14:52:39 -0700115struct NativeToManagedRecord {
116 NativeToManagedRecord* link;
117 void* last_top_of_managed_stack;
118};
119
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700120// Iterator over managed frames up to the first native-to-managed transition
121class Frame {
122 Frame() : sp_(NULL) {}
123
124 const Method* GetMethod() const {
125 return *sp_;
126 }
127
128 bool HasNext() const {
129 return NextMethod() != NULL;
130 }
131
132 void Next();
133
134 void* GetPC() const;
135
136 const Method** GetSP() const {
137 return sp_;
138 }
139
140 // TODO: this is here for testing, remove when we have exception unit tests
141 // that use the real stack
142 void SetSP(const Method** sp) {
143 sp_ = sp;
144 }
145
146 private:
147 const Method* NextMethod() const;
148
149 friend class Thread;
150
151 const Method** sp_;
152};
153
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700154class Thread {
155 public:
Carl Shapirob5573532011-07-12 18:22:59 -0700156 enum State {
157 kUnknown = -1,
158 kNew,
159 kRunnable,
160 kBlocked,
161 kWaiting,
162 kTimedWaiting,
Ian Rogersb033c752011-07-20 12:22:35 -0700163 kNative,
Carl Shapirob5573532011-07-12 18:22:59 -0700164 kTerminated,
165 };
166
buzbeec143c552011-08-20 17:38:58 -0700167
Carl Shapiro61e019d2011-07-14 16:53:09 -0700168 static const size_t kDefaultStackSize = 64 * KB;
169
buzbeec143c552011-08-20 17:38:58 -0700170 // Runtime support function pointers
171 void* (*pMemcpy)(void*, const void*, size_t);
buzbee54330722011-08-23 16:46:55 -0700172 uint64_t (*pShlLong)(uint64_t, uint32_t);
173 uint64_t (*pShrLong)(uint64_t, uint32_t);
174 uint64_t (*pUshrLong)(uint64_t, uint32_t);
buzbeec143c552011-08-20 17:38:58 -0700175 float (*pI2f)(int);
176 int (*pF2iz)(float);
177 float (*pD2f)(double);
178 double (*pF2d)(float);
179 double (*pI2d)(int);
180 int (*pD2iz)(double);
181 float (*pL2f)(long);
182 double (*pL2d)(long);
183 long long (*pArtF2l)(float);
184 long long (*pArtD2l)(double);
185 float (*pFadd)(float, float);
186 float (*pFsub)(float, float);
187 float (*pFdiv)(float, float);
188 float (*pFmul)(float, float);
189 float (*pFmodf)(float, float);
190 double (*pDadd)(double, double);
191 double (*pDsub)(double, double);
192 double (*pDdiv)(double, double);
193 double (*pDmul)(double, double);
194 double (*pFmod)(double, double);
195 int (*pIdivmod)(int, int);
196 int (*pIdiv)(int, int);
197 long long (*pLdivmod)(long long, long long);
198 bool (*pArtUnlockObject)(struct Thread*, struct Object*);
199 bool (*pArtCanPutArrayElementNoThrow)(const struct ClassObject*,
buzbee3ea4ec52011-08-22 17:37:19 -0700200 const struct ClassObject*);
201 int (*pArtInstanceofNonTrivialNoThrow) (const struct ClassObject*,
202 const struct ClassObject*);
203 int (*pArtInstanceofNonTrivial) (const struct ClassObject*, const struct ClassObject*);
204 Array* (*pArtAllocArrayByClass)(Class*, size_t);
buzbeec143c552011-08-20 17:38:58 -0700205 struct Method* (*pArtFindInterfaceMethodInCache)(ClassObject*, uint32_t,
buzbee3ea4ec52011-08-22 17:37:19 -0700206 const struct Method*, struct DvmDex*);
buzbeec143c552011-08-20 17:38:58 -0700207 bool (*pArtUnlockObjectNoThrow)(struct Thread*, struct Object*);
208 void (*pArtLockObjectNoThrow)(struct Thread*, struct Object*);
209 struct Object* (*pArtAllocObjectNoThrow)(struct ClassObject*, int);
210 void (*pArtThrowException)(struct Thread*, struct Object*);
211 bool (*pArtHandleFillArrayDataNoThrow)(struct ArrayObject*, const uint16_t*);
212
Carl Shapiro61e019d2011-07-14 16:53:09 -0700213 // Creates a new thread.
Brian Carlstromb765be02011-08-17 23:54:10 -0700214 static Thread* Create(const Runtime* runtime);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700215
216 // Creates a new thread from the calling thread.
Elliott Hughes515a5bc2011-08-17 11:08:34 -0700217 static Thread* Attach(const Runtime* runtime);
Carl Shapirob5573532011-07-12 18:22:59 -0700218
219 static Thread* Current() {
Carl Shapirod0e7e772011-07-15 14:31:01 -0700220 void* thread = pthread_getspecific(Thread::pthread_key_self_);
221 return reinterpret_cast<Thread*>(thread);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700222 }
223
Carl Shapirob5573532011-07-12 18:22:59 -0700224 uint32_t GetId() const {
225 return id_;
226 }
227
228 pid_t GetNativeId() const {
229 return native_id_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700230 }
231
232 bool IsExceptionPending() const {
Elliott Hughesb20a5542011-08-12 18:03:12 -0700233 return exception_ != NULL;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700234 }
235
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700236 Throwable* GetException() const {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700237 return exception_;
238 }
239
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700240 Frame GetTopOfStack() const {
241 return top_of_managed_stack_;
242 }
243
244 // TODO: this is here for testing, remove when we have exception unit tests
245 // that use the real stack
246 void SetTopOfStack(void* stack) {
247 top_of_managed_stack_.SetSP(reinterpret_cast<const Method**>(stack));
248 }
249
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700250 void SetException(Throwable* new_exception) {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700251 CHECK(new_exception != NULL);
252 // TODO: CHECK(exception_ == NULL);
253 exception_ = new_exception; // TODO
254 }
255
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700256 void ThrowNewException(const char* exception_class_descriptor, const char* fmt, ...)
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700257 __attribute__ ((format(printf, 3, 4)));
258
Elliott Hughes79082e32011-08-25 12:07:32 -0700259 // This exception is special, because we need to pre-allocate an instance.
260 void ThrowOutOfMemoryError();
261
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700262 void ClearException() {
263 exception_ = NULL;
264 }
265
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700266 Frame FindExceptionHandler(void* throw_pc, void** handler_pc);
267
268 void* FindExceptionHandlerInMethod(const Method* method,
269 void* throw_pc,
270 const DexFile& dex_file,
271 ClassLinker* class_linker);
272
Ian Rogers45a76cb2011-07-21 22:00:15 -0700273 // Offset of exception within Thread, used by generated code
274 static ThreadOffset ExceptionOffset() {
275 return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
276 }
277
buzbeec143c552011-08-20 17:38:58 -0700278 // Offset of id within Thread, used by generated code
279 static ThreadOffset IdOffset() {
280 return ThreadOffset(OFFSETOF_MEMBER(Thread, id_));
281 }
282
283 // Offset of card_table within Thread, used by generated code
284 static ThreadOffset CardTableOffset() {
285 return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
286 }
287
Carl Shapirob5573532011-07-12 18:22:59 -0700288 void SetName(const char* name);
289
290 void Suspend();
291
292 bool IsSuspended();
293
294 void Resume();
295
296 static bool Init();
297
Elliott Hughes330304d2011-08-12 14:28:05 -0700298 State GetState() const {
Carl Shapirob5573532011-07-12 18:22:59 -0700299 return state_;
300 }
301
302 void SetState(State new_state) {
303 state_ = new_state;
304 }
305
Ian Rogers45a76cb2011-07-21 22:00:15 -0700306 static ThreadOffset SuspendCountOffset() {
307 return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
308 }
309
Ian Rogersb033c752011-07-20 12:22:35 -0700310 // Offset of state within Thread, used by generated code
311 static ThreadOffset StateOffset() {
312 return ThreadOffset(OFFSETOF_MEMBER(Thread, state_));
313 }
314
Ian Rogersb033c752011-07-20 12:22:35 -0700315 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700316 JNIEnvExt* GetJniEnv() const {
Ian Rogersb033c752011-07-20 12:22:35 -0700317 return jni_env_;
318 }
319
320 // Offset of JNI environment within Thread, used by generated code
321 static ThreadOffset JniEnvOffset() {
322 return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
323 }
324
Ian Rogers45a76cb2011-07-21 22:00:15 -0700325 // Offset of top of managed stack address, used by generated code
326 static ThreadOffset TopOfManagedStackOffset() {
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700327 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_) +
328 OFFSETOF_MEMBER(Frame, sp_));
Ian Rogers45a76cb2011-07-21 22:00:15 -0700329 }
330
Ian Rogers408f79a2011-08-23 18:22:33 -0700331 // Offset of top stack indirect reference table within Thread, used by
332 // generated code
333 static ThreadOffset TopSirtOffset() {
334 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
Ian Rogersb033c752011-07-20 12:22:35 -0700335 }
336
Ian Rogers408f79a2011-08-23 18:22:33 -0700337 // Number of references allocated in SIRTs on this thread
338 size_t NumSirtReferences();
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700339
Ian Rogers408f79a2011-08-23 18:22:33 -0700340 // Is the given obj in this thread's stack indirect reference table?
341 bool SirtContains(jobject obj);
342
343 // Convert a jobject into a Object*
344 Object* DecodeJObject(jobject obj);
Ian Rogersb033c752011-07-20 12:22:35 -0700345
Ian Rogers45a76cb2011-07-21 22:00:15 -0700346 // Offset of exception_entry_point_ within Thread, used by generated code
347 static ThreadOffset ExceptionEntryPointOffset() {
348 return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_entry_point_));
349 }
350
351 void RegisterExceptionEntryPoint(void (*handler)(Method**)) {
352 exception_entry_point_ = handler;
353 }
354
355 // Offset of suspend_count_entry_point_ within Thread, used by generated code
356 static ThreadOffset SuspendCountEntryPointOffset() {
357 return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_entry_point_));
358 }
359
360 void RegisterSuspendCountEntryPoint(void (*handler)(Method**)) {
361 suspend_count_entry_point_ = handler;
362 }
363
364 // Increasing the suspend count, will cause the thread to run to safepoint
365 void IncrementSuspendCount() { suspend_count_++; }
366 void DecrementSuspendCount() { suspend_count_--; }
367
Ian Rogers6de08602011-08-19 14:52:39 -0700368 // Linked list recording transitions from native to managed code
369 void PushNativeToManagedRecord(NativeToManagedRecord* record) {
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700370 record->last_top_of_managed_stack = reinterpret_cast<void*>(top_of_managed_stack_.GetSP());
Ian Rogers6de08602011-08-19 14:52:39 -0700371 record->link = native_to_managed_record_;
372 native_to_managed_record_ = record;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700373 top_of_managed_stack_.SetSP(NULL);
Ian Rogers6de08602011-08-19 14:52:39 -0700374 }
375 void PopNativeToManagedRecord(const NativeToManagedRecord& record) {
376 native_to_managed_record_ = record.link;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700377 top_of_managed_stack_.SetSP( reinterpret_cast<const Method**>(record.last_top_of_managed_stack) );
Ian Rogers6de08602011-08-19 14:52:39 -0700378 }
379
Brian Carlstrombffb1552011-08-25 12:23:53 -0700380 const ClassLoader* GetClassLoaderOverride() {
buzbeec143c552011-08-20 17:38:58 -0700381 return class_loader_override_;
382 }
383
Brian Carlstrombffb1552011-08-25 12:23:53 -0700384 void SetClassLoaderOverride(const ClassLoader* class_loader_override) {
buzbeec143c552011-08-20 17:38:58 -0700385 class_loader_override_ = class_loader_override;
386 }
387
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700388 private:
Elliott Hughes40ef99e2011-08-11 17:44:34 -0700389 Thread()
Elliott Hughes330304d2011-08-12 14:28:05 -0700390 : id_(1234),
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700391 top_of_managed_stack_(),
Ian Rogers6de08602011-08-19 14:52:39 -0700392 native_to_managed_record_(NULL),
Ian Rogers408f79a2011-08-23 18:22:33 -0700393 top_sirt_(NULL),
Elliott Hughes330304d2011-08-12 14:28:05 -0700394 jni_env_(NULL),
395 exception_(NULL),
buzbeec143c552011-08-20 17:38:58 -0700396 suspend_count_(0),
397 class_loader_override_(NULL) {
buzbee3ea4ec52011-08-22 17:37:19 -0700398 InitFunctionPointers();
Ian Rogersb033c752011-07-20 12:22:35 -0700399 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700400
Ian Rogersdf20fe02011-07-20 20:34:16 -0700401 ~Thread() {
402 delete jni_env_;
403 }
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700404
Ian Rogersb033c752011-07-20 12:22:35 -0700405 void InitCpu();
buzbee3ea4ec52011-08-22 17:37:19 -0700406 void InitFunctionPointers();
Ian Rogersb033c752011-07-20 12:22:35 -0700407
Carl Shapiro69759ea2011-07-21 18:13:35 -0700408 // Managed thread id.
409 uint32_t id_;
Ian Rogersb033c752011-07-20 12:22:35 -0700410
buzbeec143c552011-08-20 17:38:58 -0700411 // FIXME: placeholder for the gc cardTable
412 uint32_t card_table_;
413
Ian Rogers45a76cb2011-07-21 22:00:15 -0700414 // Top of the managed stack, written out prior to the state transition from
415 // kRunnable to kNative. Uses include to give the starting point for scanning
416 // a managed stack when a thread is in native code.
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700417 Frame top_of_managed_stack_;
Ian Rogers45a76cb2011-07-21 22:00:15 -0700418
Ian Rogers6de08602011-08-19 14:52:39 -0700419 // A linked list (of stack allocated records) recording transitions from
420 // native to managed code.
421 NativeToManagedRecord* native_to_managed_record_;
422
Ian Rogers408f79a2011-08-23 18:22:33 -0700423 // Top of linked list of stack indirect reference tables or NULL for none
424 StackIndirectReferenceTable* top_sirt_;
Ian Rogersb033c752011-07-20 12:22:35 -0700425
426 // Every thread may have an associated JNI environment
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700427 JNIEnvExt* jni_env_;
Ian Rogersb033c752011-07-20 12:22:35 -0700428
Carl Shapirob5573532011-07-12 18:22:59 -0700429 State state_;
430
Carl Shapiro69759ea2011-07-21 18:13:35 -0700431 // Native (kernel) thread id.
Carl Shapirob5573532011-07-12 18:22:59 -0700432 pid_t native_id_;
433
Carl Shapiro69759ea2011-07-21 18:13:35 -0700434 // Native thread handle.
Carl Shapiro61e019d2011-07-14 16:53:09 -0700435 pthread_t handle_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700436
Carl Shapiro69759ea2011-07-21 18:13:35 -0700437 // Initialized to "this". On certain architectures (such as x86) reading
438 // off of Thread::Current is easy but getting the address of Thread::Current
439 // is hard. This field can be read off of Thread::Current to give the address.
440 Thread* self_;
441
442 Runtime* runtime_;
443
444 // The pending exception or NULL.
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700445 Throwable* exception_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700446
Ian Rogers45a76cb2011-07-21 22:00:15 -0700447 // A non-zero value is used to tell the current thread to enter a safe point
448 // at the next poll.
449 int suspend_count_;
450
Elliott Hughesedcc09c2011-08-21 18:47:05 -0700451 // Needed to get the right ClassLoader in JNI_OnLoad, but also
452 // useful for testing.
Brian Carlstrombffb1552011-08-25 12:23:53 -0700453 const ClassLoader* class_loader_override_;
buzbeec143c552011-08-20 17:38:58 -0700454
Brian Carlstromb765be02011-08-17 23:54:10 -0700455 // The memory mapping of the stack for non-attached threads.
456 scoped_ptr<MemMap> stack_;
457
Carl Shapiro69759ea2011-07-21 18:13:35 -0700458 // The inclusive base of the control stack.
Carl Shapiro61e019d2011-07-14 16:53:09 -0700459 byte* stack_base_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700460
461 // The exclusive limit of the control stack.
Carl Shapiro61e019d2011-07-14 16:53:09 -0700462 byte* stack_limit_;
463
Carl Shapiro69759ea2011-07-21 18:13:35 -0700464 // TLS key used to retrieve the VM thread object.
Carl Shapirob5573532011-07-12 18:22:59 -0700465 static pthread_key_t pthread_key_self_;
466
Ian Rogers45a76cb2011-07-21 22:00:15 -0700467 // Entry point called when exception_ is set
468 void (*exception_entry_point_)(Method** frame);
469
470 // Entry point called when suspend_count_ is non-zero
471 void (*suspend_count_entry_point_)(Method** frame);
472
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700473 DISALLOW_COPY_AND_ASSIGN(Thread);
474};
Elliott Hughes330304d2011-08-12 14:28:05 -0700475std::ostream& operator<<(std::ostream& os, const Thread& thread);
Ian Rogersb033c752011-07-20 12:22:35 -0700476std::ostream& operator<<(std::ostream& os, const Thread::State& state);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700477
Carl Shapirob5573532011-07-12 18:22:59 -0700478class ThreadList {
479 public:
Carl Shapiro61e019d2011-07-14 16:53:09 -0700480 static const int kMaxId = 0xFFFF;
481 static const int kInvalidId = 0;
482 static const int kMainId = 1;
Carl Shapirob5573532011-07-12 18:22:59 -0700483
Carl Shapiro61e019d2011-07-14 16:53:09 -0700484 static ThreadList* Create();
485
486 ~ThreadList();
Carl Shapirob5573532011-07-12 18:22:59 -0700487
488 void Register(Thread* thread);
489
490 void Unregister(Thread* thread);
491
Carl Shapirob5573532011-07-12 18:22:59 -0700492 void Lock() {
493 lock_->Lock();
494 }
495
496 void Unlock() {
497 lock_->Unlock();
498 };
499
500 private:
501 ThreadList();
502
503 std::list<Thread*> list_;
504
505 Mutex* lock_;
506
507 DISALLOW_COPY_AND_ASSIGN(ThreadList);
508};
509
510class ThreadListLock {
511 public:
512 ThreadListLock(ThreadList* thread_list, Thread* current_thread)
513 : thread_list_(thread_list) {
514 if (current_thread == NULL) { // try to get it from TLS
515 current_thread = Thread::Current();
516 }
517 Thread::State old_state;
518 if (current_thread != NULL) {
519 old_state = current_thread->GetState();
520 current_thread->SetState(Thread::kWaiting); // TODO: VMWAIT
521 } else {
522 // happens during VM shutdown
523 old_state = Thread::kUnknown; // TODO: something else
524 }
525 thread_list_->Lock();
526 if (current_thread != NULL) {
527 current_thread->SetState(old_state);
528 }
529 }
530
531 ~ThreadListLock() {
532 thread_list_->Unlock();
533 }
534
Carl Shapirob5573532011-07-12 18:22:59 -0700535 private:
536 ThreadList* thread_list_;
537
538 DISALLOW_COPY_AND_ASSIGN(ThreadListLock);
539};
540
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700541} // namespace art
542
543#endif // ART_SRC_THREAD_H_