blob: d511f04d8e4bcb27f719736b94572ef4ed49e1cc [file] [log] [blame]
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07002
3#ifndef ART_SRC_THREAD_H_
4#define ART_SRC_THREAD_H_
5
Carl Shapirob5573532011-07-12 18:22:59 -07006#include <pthread.h>
Ian Rogersb033c752011-07-20 12:22:35 -07007#include <list>
Carl Shapirob5573532011-07-12 18:22:59 -07008
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07009#include "globals.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070010#include "logging.h"
11#include "macros.h"
Brian Carlstromb765be02011-08-17 23:54:10 -070012#include "mem_map.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070013#include "offsets.h"
14#include "runtime.h"
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070015
Ian Rogersb033c752011-07-20 12:22:35 -070016#include "jni.h"
17
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070018namespace art {
19
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070020class Method;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070021class Object;
Carl Shapirob5573532011-07-12 18:22:59 -070022class Runtime;
Ian Rogersb033c752011-07-20 12:22:35 -070023class StackHandleBlock;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070024class Thread;
Carl Shapirob5573532011-07-12 18:22:59 -070025class ThreadList;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070026
27class Mutex {
28 public:
29 virtual ~Mutex() {}
30
Carl Shapirob5573532011-07-12 18:22:59 -070031 void Lock();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070032
Carl Shapirob5573532011-07-12 18:22:59 -070033 bool TryLock();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070034
Carl Shapirob5573532011-07-12 18:22:59 -070035 void Unlock();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070036
37 const char* GetName() { return name_; }
38
39 Thread* GetOwner() { return owner_; }
40
Carl Shapirob5573532011-07-12 18:22:59 -070041 static Mutex* Create(const char* name);
42
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070043 public: // TODO: protected
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070044 void SetOwner(Thread* thread) { owner_ = thread; }
45
46 private:
Elliott Hughes18c07532011-08-18 15:50:51 -070047 explicit Mutex(const char* name) : name_(name), owner_(NULL) {}
48
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070049 const char* name_;
50
51 Thread* owner_;
52
Carl Shapirob5573532011-07-12 18:22:59 -070053 pthread_mutex_t lock_impl_;
54
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070055 DISALLOW_COPY_AND_ASSIGN(Mutex);
56};
57
58class MutexLock {
59 public:
60 explicit MutexLock(Mutex *mu) : mu_(mu) {
61 mu_->Lock();
62 }
63 ~MutexLock() { mu_->Unlock(); }
64 private:
65 Mutex* const mu_;
66 DISALLOW_COPY_AND_ASSIGN(MutexLock);
67};
68
Ian Rogersb033c752011-07-20 12:22:35 -070069// Stack handle blocks are allocated within the bridge frame between managed
70// and native code.
71class StackHandleBlock {
72 public:
73 // Number of references contained within this SHB
74 size_t NumberOfReferences() {
75 return number_of_references_;
76 }
77
78 // Link to previous SHB or NULL
79 StackHandleBlock* Link() {
80 return link_;
81 }
82
Ian Rogersa8cd9f42011-08-19 16:43:41 -070083 Object** Handles() {
84 return handles_;
85 }
86
Ian Rogersb033c752011-07-20 12:22:35 -070087 // Offset of length within SHB, used by generated code
88 static size_t NumberOfReferencesOffset() {
89 return OFFSETOF_MEMBER(StackHandleBlock, number_of_references_);
90 }
91
92 // Offset of link within SHB, used by generated code
93 static size_t LinkOffset() {
94 return OFFSETOF_MEMBER(StackHandleBlock, link_);
95 }
96
97 private:
98 StackHandleBlock() {}
99
100 size_t number_of_references_;
101 StackHandleBlock* link_;
102
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700103 // Fake array, really allocated and filled in by jni_compiler.
104 Object* handles_[0];
105
Ian Rogersb033c752011-07-20 12:22:35 -0700106 DISALLOW_COPY_AND_ASSIGN(StackHandleBlock);
107};
108
Ian Rogers6de08602011-08-19 14:52:39 -0700109struct NativeToManagedRecord {
110 NativeToManagedRecord* link;
111 void* last_top_of_managed_stack;
112};
113
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700114class Thread {
115 public:
Carl Shapirob5573532011-07-12 18:22:59 -0700116 enum State {
117 kUnknown = -1,
118 kNew,
119 kRunnable,
120 kBlocked,
121 kWaiting,
122 kTimedWaiting,
Ian Rogersb033c752011-07-20 12:22:35 -0700123 kNative,
Carl Shapirob5573532011-07-12 18:22:59 -0700124 kTerminated,
125 };
126
buzbeec143c552011-08-20 17:38:58 -0700127
Carl Shapiro61e019d2011-07-14 16:53:09 -0700128 static const size_t kDefaultStackSize = 64 * KB;
129
buzbeec143c552011-08-20 17:38:58 -0700130// TODO - needs to be redone properly, just hacked into place for now
131 // Runtime support function pointers
132 void* (*pMemcpy)(void*, const void*, size_t);
133 float (*pI2f)(int);
134 int (*pF2iz)(float);
135 float (*pD2f)(double);
136 double (*pF2d)(float);
137 double (*pI2d)(int);
138 int (*pD2iz)(double);
139 float (*pL2f)(long);
140 double (*pL2d)(long);
141 long long (*pArtF2l)(float);
142 long long (*pArtD2l)(double);
143 float (*pFadd)(float, float);
144 float (*pFsub)(float, float);
145 float (*pFdiv)(float, float);
146 float (*pFmul)(float, float);
147 float (*pFmodf)(float, float);
148 double (*pDadd)(double, double);
149 double (*pDsub)(double, double);
150 double (*pDdiv)(double, double);
151 double (*pDmul)(double, double);
152 double (*pFmod)(double, double);
153 int (*pIdivmod)(int, int);
154 int (*pIdiv)(int, int);
155 long long (*pLdivmod)(long long, long long);
156 bool (*pArtUnlockObject)(struct Thread*, struct Object*);
157 bool (*pArtCanPutArrayElementNoThrow)(const struct ClassObject*,
158 const struct ClassObject*);
159 int (*pArtInstanceofNonTrivialNoThrow)
160 (const struct ClassObject*, const struct ClassObject*);
161 int (*pArtInstanceofNonTrivial) (const struct ClassObject*,
162 const struct ClassObject*);
163 struct ArrayObject* (*pArtAllocArrayByClass)(struct ClassObject*,
164 size_t, int);
165 struct Method* (*pArtFindInterfaceMethodInCache)(ClassObject*, uint32_t,
166 const struct Method*, struct DvmDex*);
167 bool (*pArtUnlockObjectNoThrow)(struct Thread*, struct Object*);
168 void (*pArtLockObjectNoThrow)(struct Thread*, struct Object*);
169 struct Object* (*pArtAllocObjectNoThrow)(struct ClassObject*, int);
170 void (*pArtThrowException)(struct Thread*, struct Object*);
171 bool (*pArtHandleFillArrayDataNoThrow)(struct ArrayObject*, const uint16_t*);
172
173
Carl Shapiro61e019d2011-07-14 16:53:09 -0700174 // Creates a new thread.
Brian Carlstromb765be02011-08-17 23:54:10 -0700175 static Thread* Create(const Runtime* runtime);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700176
177 // Creates a new thread from the calling thread.
Elliott Hughes515a5bc2011-08-17 11:08:34 -0700178 static Thread* Attach(const Runtime* runtime);
Carl Shapirob5573532011-07-12 18:22:59 -0700179
180 static Thread* Current() {
Carl Shapirod0e7e772011-07-15 14:31:01 -0700181 void* thread = pthread_getspecific(Thread::pthread_key_self_);
182 return reinterpret_cast<Thread*>(thread);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700183 }
184
Carl Shapirob5573532011-07-12 18:22:59 -0700185 uint32_t GetId() const {
186 return id_;
187 }
188
189 pid_t GetNativeId() const {
190 return native_id_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700191 }
192
193 bool IsExceptionPending() const {
Elliott Hughesb20a5542011-08-12 18:03:12 -0700194 return exception_ != NULL;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700195 }
196
197 Object* GetException() const {
198 return exception_;
199 }
200
201 void SetException(Object* new_exception) {
202 CHECK(new_exception != NULL);
203 // TODO: CHECK(exception_ == NULL);
204 exception_ = new_exception; // TODO
205 }
206
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700207 void ThrowNewException(const char* exception_class_name, const char* fmt, ...)
208 __attribute__ ((format(printf, 3, 4)));
209
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700210 void ClearException() {
211 exception_ = NULL;
212 }
213
Ian Rogers45a76cb2011-07-21 22:00:15 -0700214 // Offset of exception within Thread, used by generated code
215 static ThreadOffset ExceptionOffset() {
216 return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
217 }
218
buzbeec143c552011-08-20 17:38:58 -0700219 // Offset of id within Thread, used by generated code
220 static ThreadOffset IdOffset() {
221 return ThreadOffset(OFFSETOF_MEMBER(Thread, id_));
222 }
223
224 // Offset of card_table within Thread, used by generated code
225 static ThreadOffset CardTableOffset() {
226 return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
227 }
228
Carl Shapirob5573532011-07-12 18:22:59 -0700229 void SetName(const char* name);
230
231 void Suspend();
232
233 bool IsSuspended();
234
235 void Resume();
236
237 static bool Init();
238
Carl Shapiro69759ea2011-07-21 18:13:35 -0700239 Runtime* GetRuntime() const {
240 return runtime_;
241 }
242
Elliott Hughes330304d2011-08-12 14:28:05 -0700243 State GetState() const {
Carl Shapirob5573532011-07-12 18:22:59 -0700244 return state_;
245 }
246
247 void SetState(State new_state) {
248 state_ = new_state;
249 }
250
Ian Rogers45a76cb2011-07-21 22:00:15 -0700251 static ThreadOffset SuspendCountOffset() {
252 return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
253 }
254
Ian Rogersb033c752011-07-20 12:22:35 -0700255 // Offset of state within Thread, used by generated code
256 static ThreadOffset StateOffset() {
257 return ThreadOffset(OFFSETOF_MEMBER(Thread, state_));
258 }
259
Ian Rogersb033c752011-07-20 12:22:35 -0700260 // JNI methods
Elliott Hughes40ef99e2011-08-11 17:44:34 -0700261 JNIEnv* GetJniEnv() const {
Ian Rogersb033c752011-07-20 12:22:35 -0700262 return jni_env_;
263 }
264
265 // Offset of JNI environment within Thread, used by generated code
266 static ThreadOffset JniEnvOffset() {
267 return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
268 }
269
Ian Rogers45a76cb2011-07-21 22:00:15 -0700270 // Offset of top of managed stack address, used by generated code
271 static ThreadOffset TopOfManagedStackOffset() {
272 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_));
273 }
274
Ian Rogersb033c752011-07-20 12:22:35 -0700275 // Offset of top stack handle block within Thread, used by generated code
276 static ThreadOffset TopShbOffset() {
277 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_shb_));
278 }
279
280 // Number of references allocated in StackHandleBlocks on this thread
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700281 size_t NumShbHandles();
282
283 // Is the given obj in this thread's stack handle blocks?
284 bool ShbContains(jobject obj);
Ian Rogersb033c752011-07-20 12:22:35 -0700285
Ian Rogers45a76cb2011-07-21 22:00:15 -0700286 // Offset of exception_entry_point_ within Thread, used by generated code
287 static ThreadOffset ExceptionEntryPointOffset() {
288 return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_entry_point_));
289 }
290
291 void RegisterExceptionEntryPoint(void (*handler)(Method**)) {
292 exception_entry_point_ = handler;
293 }
294
295 // Offset of suspend_count_entry_point_ within Thread, used by generated code
296 static ThreadOffset SuspendCountEntryPointOffset() {
297 return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_entry_point_));
298 }
299
300 void RegisterSuspendCountEntryPoint(void (*handler)(Method**)) {
301 suspend_count_entry_point_ = handler;
302 }
303
304 // Increasing the suspend count, will cause the thread to run to safepoint
305 void IncrementSuspendCount() { suspend_count_++; }
306 void DecrementSuspendCount() { suspend_count_--; }
307
Ian Rogers6de08602011-08-19 14:52:39 -0700308 // Linked list recording transitions from native to managed code
309 void PushNativeToManagedRecord(NativeToManagedRecord* record) {
310 record->last_top_of_managed_stack = top_of_managed_stack_;
311 record->link = native_to_managed_record_;
312 native_to_managed_record_ = record;
313 top_of_managed_stack_ = NULL;
314 }
315 void PopNativeToManagedRecord(const NativeToManagedRecord& record) {
316 native_to_managed_record_ = record.link;
317 top_of_managed_stack_ = record.last_top_of_managed_stack;
318 }
319
buzbeec143c552011-08-20 17:38:58 -0700320 Object* GetClassLoaderOverride() {
321 return class_loader_override_;
322 }
323
324 void SetClassLoaderOverride(Object* class_loader_override) {
325 class_loader_override_ = class_loader_override;
326 }
327
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700328 private:
Elliott Hughes40ef99e2011-08-11 17:44:34 -0700329 Thread()
Elliott Hughes330304d2011-08-12 14:28:05 -0700330 : id_(1234),
Ian Rogers6de08602011-08-19 14:52:39 -0700331 top_of_managed_stack_(NULL),
332 native_to_managed_record_(NULL),
Elliott Hughes330304d2011-08-12 14:28:05 -0700333 top_shb_(NULL),
334 jni_env_(NULL),
335 exception_(NULL),
buzbeec143c552011-08-20 17:38:58 -0700336 suspend_count_(0),
337 class_loader_override_(NULL) {
Ian Rogersb033c752011-07-20 12:22:35 -0700338 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700339
Ian Rogersdf20fe02011-07-20 20:34:16 -0700340 ~Thread() {
341 delete jni_env_;
342 }
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700343
Ian Rogersb033c752011-07-20 12:22:35 -0700344 void InitCpu();
345
Carl Shapiro69759ea2011-07-21 18:13:35 -0700346 // Managed thread id.
347 uint32_t id_;
Ian Rogersb033c752011-07-20 12:22:35 -0700348
buzbeec143c552011-08-20 17:38:58 -0700349 // FIXME: placeholder for the gc cardTable
350 uint32_t card_table_;
351
Ian Rogers45a76cb2011-07-21 22:00:15 -0700352 // Top of the managed stack, written out prior to the state transition from
353 // kRunnable to kNative. Uses include to give the starting point for scanning
354 // a managed stack when a thread is in native code.
355 void* top_of_managed_stack_;
356
Ian Rogers6de08602011-08-19 14:52:39 -0700357 // A linked list (of stack allocated records) recording transitions from
358 // native to managed code.
359 NativeToManagedRecord* native_to_managed_record_;
360
Ian Rogersb033c752011-07-20 12:22:35 -0700361 // Top of linked list of stack handle blocks or NULL for none
362 StackHandleBlock* top_shb_;
363
364 // Every thread may have an associated JNI environment
Elliott Hughes40ef99e2011-08-11 17:44:34 -0700365 JNIEnv* jni_env_;
Ian Rogersb033c752011-07-20 12:22:35 -0700366
Carl Shapirob5573532011-07-12 18:22:59 -0700367 State state_;
368
Carl Shapiro69759ea2011-07-21 18:13:35 -0700369 // Native (kernel) thread id.
Carl Shapirob5573532011-07-12 18:22:59 -0700370 pid_t native_id_;
371
Carl Shapiro69759ea2011-07-21 18:13:35 -0700372 // Native thread handle.
Carl Shapiro61e019d2011-07-14 16:53:09 -0700373 pthread_t handle_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700374
Carl Shapiro69759ea2011-07-21 18:13:35 -0700375 // Initialized to "this". On certain architectures (such as x86) reading
376 // off of Thread::Current is easy but getting the address of Thread::Current
377 // is hard. This field can be read off of Thread::Current to give the address.
378 Thread* self_;
379
380 Runtime* runtime_;
381
382 // The pending exception or NULL.
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700383 Object* exception_;
384
Ian Rogers45a76cb2011-07-21 22:00:15 -0700385 // A non-zero value is used to tell the current thread to enter a safe point
386 // at the next poll.
387 int suspend_count_;
388
buzbeec143c552011-08-20 17:38:58 -0700389 Object* class_loader_override_;
390
Brian Carlstromb765be02011-08-17 23:54:10 -0700391 // The memory mapping of the stack for non-attached threads.
392 scoped_ptr<MemMap> stack_;
393
Carl Shapiro69759ea2011-07-21 18:13:35 -0700394 // The inclusive base of the control stack.
Carl Shapiro61e019d2011-07-14 16:53:09 -0700395 byte* stack_base_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700396
397 // The exclusive limit of the control stack.
Carl Shapiro61e019d2011-07-14 16:53:09 -0700398 byte* stack_limit_;
399
Carl Shapiro69759ea2011-07-21 18:13:35 -0700400 // TLS key used to retrieve the VM thread object.
Carl Shapirob5573532011-07-12 18:22:59 -0700401 static pthread_key_t pthread_key_self_;
402
Ian Rogers45a76cb2011-07-21 22:00:15 -0700403 // Entry point called when exception_ is set
404 void (*exception_entry_point_)(Method** frame);
405
406 // Entry point called when suspend_count_ is non-zero
407 void (*suspend_count_entry_point_)(Method** frame);
408
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700409 DISALLOW_COPY_AND_ASSIGN(Thread);
410};
Elliott Hughes330304d2011-08-12 14:28:05 -0700411std::ostream& operator<<(std::ostream& os, const Thread& thread);
Ian Rogersb033c752011-07-20 12:22:35 -0700412std::ostream& operator<<(std::ostream& os, const Thread::State& state);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700413
Carl Shapirob5573532011-07-12 18:22:59 -0700414class ThreadList {
415 public:
Carl Shapiro61e019d2011-07-14 16:53:09 -0700416 static const int kMaxId = 0xFFFF;
417 static const int kInvalidId = 0;
418 static const int kMainId = 1;
Carl Shapirob5573532011-07-12 18:22:59 -0700419
Carl Shapiro61e019d2011-07-14 16:53:09 -0700420 static ThreadList* Create();
421
422 ~ThreadList();
Carl Shapirob5573532011-07-12 18:22:59 -0700423
424 void Register(Thread* thread);
425
426 void Unregister(Thread* thread);
427
Carl Shapirob5573532011-07-12 18:22:59 -0700428 void Lock() {
429 lock_->Lock();
430 }
431
432 void Unlock() {
433 lock_->Unlock();
434 };
435
436 private:
437 ThreadList();
438
439 std::list<Thread*> list_;
440
441 Mutex* lock_;
442
443 DISALLOW_COPY_AND_ASSIGN(ThreadList);
444};
445
446class ThreadListLock {
447 public:
448 ThreadListLock(ThreadList* thread_list, Thread* current_thread)
449 : thread_list_(thread_list) {
450 if (current_thread == NULL) { // try to get it from TLS
451 current_thread = Thread::Current();
452 }
453 Thread::State old_state;
454 if (current_thread != NULL) {
455 old_state = current_thread->GetState();
456 current_thread->SetState(Thread::kWaiting); // TODO: VMWAIT
457 } else {
458 // happens during VM shutdown
459 old_state = Thread::kUnknown; // TODO: something else
460 }
461 thread_list_->Lock();
462 if (current_thread != NULL) {
463 current_thread->SetState(old_state);
464 }
465 }
466
467 ~ThreadListLock() {
468 thread_list_->Unlock();
469 }
470
Carl Shapirob5573532011-07-12 18:22:59 -0700471 private:
472 ThreadList* thread_list_;
473
474 DISALLOW_COPY_AND_ASSIGN(ThreadListLock);
475};
476
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700477} // namespace art
478
479#endif // ART_SRC_THREAD_H_