blob: c949339addde2a35cf17f36522e93646bcd753e2 [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
Carl Shapirob5573532011-07-12 18:22:59 -070020#include <pthread.h>
Elliott Hughesa0957642011-09-02 14:27:33 -070021
Elliott Hughes02b48d12011-09-07 17:15:51 -070022#include <bitset>
Elliott Hughesa0957642011-09-02 14:27:33 -070023#include <iosfwd>
Ian Rogersb033c752011-07-20 12:22:35 -070024#include <list>
Elliott Hughes8daa0922011-09-11 13:46:25 -070025#include <string>
Carl Shapirob5573532011-07-12 18:22:59 -070026
Brian Carlstrom1f870082011-08-23 16:02:11 -070027#include "dex_file.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070028#include "globals.h"
Elliott Hughes69f5bc62011-08-24 09:26:14 -070029#include "jni_internal.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070030#include "logging.h"
31#include "macros.h"
Elliott Hughes8daa0922011-09-11 13:46:25 -070032#include "mutex.h"
Brian Carlstromb765be02011-08-17 23:54:10 -070033#include "mem_map.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070034#include "offsets.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070035#include "runtime_stats.h"
Ian Rogersbdb03912011-09-14 00:55:44 -070036#include "UniquePtr.h"
Ian Rogersb033c752011-07-20 12:22:35 -070037
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070038namespace art {
39
Elliott Hughes69f5bc62011-08-24 09:26:14 -070040class Array;
Elliott Hughes37f7a402011-08-22 18:56:01 -070041class Class;
Brian Carlstrom1f870082011-08-23 16:02:11 -070042class ClassLinker;
Elliott Hughesedcc09c2011-08-21 18:47:05 -070043class ClassLoader;
Ian Rogersbdb03912011-09-14 00:55:44 -070044class Context;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070045class Method;
Elliott Hughes8daa0922011-09-11 13:46:25 -070046class Monitor;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070047class Object;
Carl Shapirob5573532011-07-12 18:22:59 -070048class Runtime;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070049class Thread;
Carl Shapirob5573532011-07-12 18:22:59 -070050class ThreadList;
Elliott Hughese5b0dc82011-08-23 09:59:02 -070051class Throwable;
Shih-wei Liao55df06b2011-08-26 14:39:27 -070052class StackTraceElement;
buzbee1da522d2011-09-04 11:22:20 -070053class StaticStorageBase;
54
Shih-wei Liao55df06b2011-08-26 14:39:27 -070055template<class T> class ObjectArray;
Shih-wei Liao44175362011-08-28 16:59:17 -070056template<class T> class PrimitiveArray;
57typedef PrimitiveArray<int32_t> IntArray;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070058
Ian Rogers408f79a2011-08-23 18:22:33 -070059// Stack allocated indirect reference table, allocated within the bridge frame
60// between managed and native code.
61class StackIndirectReferenceTable {
Ian Rogersb033c752011-07-20 12:22:35 -070062 public:
Ian Rogers408f79a2011-08-23 18:22:33 -070063 // Number of references contained within this SIRT
Ian Rogersb033c752011-07-20 12:22:35 -070064 size_t NumberOfReferences() {
65 return number_of_references_;
66 }
67
Ian Rogers408f79a2011-08-23 18:22:33 -070068 // Link to previous SIRT or NULL
69 StackIndirectReferenceTable* Link() {
Ian Rogersb033c752011-07-20 12:22:35 -070070 return link_;
71 }
72
Ian Rogers408f79a2011-08-23 18:22:33 -070073 Object** References() {
74 return references_;
Ian Rogersa8cd9f42011-08-19 16:43:41 -070075 }
76
Ian Rogers408f79a2011-08-23 18:22:33 -070077 // Offset of length within SIRT, used by generated code
Ian Rogersb033c752011-07-20 12:22:35 -070078 static size_t NumberOfReferencesOffset() {
Ian Rogers408f79a2011-08-23 18:22:33 -070079 return OFFSETOF_MEMBER(StackIndirectReferenceTable, number_of_references_);
Ian Rogersb033c752011-07-20 12:22:35 -070080 }
81
Ian Rogers408f79a2011-08-23 18:22:33 -070082 // Offset of link within SIRT, used by generated code
Ian Rogersb033c752011-07-20 12:22:35 -070083 static size_t LinkOffset() {
Ian Rogers408f79a2011-08-23 18:22:33 -070084 return OFFSETOF_MEMBER(StackIndirectReferenceTable, link_);
Ian Rogersb033c752011-07-20 12:22:35 -070085 }
86
87 private:
Ian Rogers408f79a2011-08-23 18:22:33 -070088 StackIndirectReferenceTable() {}
Ian Rogersb033c752011-07-20 12:22:35 -070089
90 size_t number_of_references_;
Ian Rogers408f79a2011-08-23 18:22:33 -070091 StackIndirectReferenceTable* link_;
Ian Rogersb033c752011-07-20 12:22:35 -070092
Ian Rogersa8cd9f42011-08-19 16:43:41 -070093 // Fake array, really allocated and filled in by jni_compiler.
Ian Rogers408f79a2011-08-23 18:22:33 -070094 Object* references_[0];
Ian Rogersa8cd9f42011-08-19 16:43:41 -070095
Ian Rogers408f79a2011-08-23 18:22:33 -070096 DISALLOW_COPY_AND_ASSIGN(StackIndirectReferenceTable);
Ian Rogersb033c752011-07-20 12:22:35 -070097};
98
Ian Rogers6de08602011-08-19 14:52:39 -070099struct NativeToManagedRecord {
Ian Rogersbdb03912011-09-14 00:55:44 -0700100 NativeToManagedRecord* link_;
101 void* last_top_of_managed_stack_;
102 uintptr_t last_top_of_managed_stack_pc_;
Ian Rogers6de08602011-08-19 14:52:39 -0700103};
104
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700105// Iterator over managed frames up to the first native-to-managed transition
Elliott Hughes85d15452011-09-16 17:33:01 -0700106class PACKED Frame {
Shih-wei Liao9b576b42011-08-29 01:45:07 -0700107 public:
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700108 Frame() : sp_(NULL) {}
109
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700110 Method* GetMethod() const {
Elliott Hughesa0957642011-09-02 14:27:33 -0700111 return (sp_ != NULL) ? *sp_ : NULL;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700112 }
113
114 bool HasNext() const {
115 return NextMethod() != NULL;
116 }
117
118 void Next();
119
Ian Rogersbdb03912011-09-14 00:55:44 -0700120 uintptr_t GetReturnPC() const;
121
122 uintptr_t LoadCalleeSave(int num) const;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700123
Ian Rogersd6b1f612011-09-27 13:38:14 -0700124 uintptr_t GetVReg(Method* method, int vreg) const;
125
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700126 Method** GetSP() const {
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700127 return sp_;
128 }
129
130 // TODO: this is here for testing, remove when we have exception unit tests
131 // that use the real stack
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700132 void SetSP(Method** sp) {
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700133 sp_ = sp;
134 }
135
Ian Rogers90865722011-09-19 11:11:44 -0700136 // Is this a frame for a real method (native or with dex code)
137 bool HasMethod() const;
138
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700139 private:
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700140 Method* NextMethod() const;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700141
142 friend class Thread;
143
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700144 Method** sp_;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700145};
146
Elliott Hughes85d15452011-09-16 17:33:01 -0700147class PACKED Thread {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700148 public:
Elliott Hughes8daa0922011-09-11 13:46:25 -0700149 /* thread priorities, from java.lang.Thread */
150 enum Priority {
151 kMinPriority = 1,
152 kNormPriority = 5,
153 kMaxPriority = 10,
154 };
Carl Shapirob5573532011-07-12 18:22:59 -0700155 enum State {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700156 // These match up with JDWP values.
157 kTerminated = 0, // TERMINATED
158 kRunnable = 1, // RUNNABLE or running now
159 kTimedWaiting = 2, // TIMED_WAITING in Object.wait()
160 kBlocked = 3, // BLOCKED on a monitor
161 kWaiting = 4, // WAITING in Object.wait()
162 // Non-JDWP states.
163 kInitializing = 5, // allocated, not yet running --- TODO: unnecessary?
164 kStarting = 6, // native thread started, not yet ready to run managed code
165 kNative = 7, // off in a JNI native method
166 kVmWait = 8, // waiting on a VM resource
167 kSuspended = 9, // suspended, usually by GC or debugger
Carl Shapirob5573532011-07-12 18:22:59 -0700168 };
169
Ian Rogers932746a2011-09-22 18:57:50 -0700170 // Space to throw a StackOverflowError in.
171 static const size_t kStackOverflowReservedBytes = 3 * KB;
buzbeec143c552011-08-20 17:38:58 -0700172
Carl Shapiro61e019d2011-07-14 16:53:09 -0700173 static const size_t kDefaultStackSize = 64 * KB;
174
buzbeec143c552011-08-20 17:38:58 -0700175 // Runtime support function pointers
buzbee4a3164f2011-09-03 11:25:10 -0700176 void (*pDebugMe)(Method*, uint32_t);
buzbeec143c552011-08-20 17:38:58 -0700177 void* (*pMemcpy)(void*, const void*, size_t);
buzbee54330722011-08-23 16:46:55 -0700178 uint64_t (*pShlLong)(uint64_t, uint32_t);
179 uint64_t (*pShrLong)(uint64_t, uint32_t);
180 uint64_t (*pUshrLong)(uint64_t, uint32_t);
buzbeec143c552011-08-20 17:38:58 -0700181 float (*pI2f)(int);
182 int (*pF2iz)(float);
183 float (*pD2f)(double);
184 double (*pF2d)(float);
185 double (*pI2d)(int);
186 int (*pD2iz)(double);
187 float (*pL2f)(long);
188 double (*pL2d)(long);
buzbee1b4c8592011-08-31 10:43:51 -0700189 long long (*pF2l)(float);
190 long long (*pD2l)(double);
buzbeec143c552011-08-20 17:38:58 -0700191 float (*pFadd)(float, float);
192 float (*pFsub)(float, float);
193 float (*pFdiv)(float, float);
194 float (*pFmul)(float, float);
195 float (*pFmodf)(float, float);
196 double (*pDadd)(double, double);
197 double (*pDsub)(double, double);
198 double (*pDdiv)(double, double);
199 double (*pDmul)(double, double);
200 double (*pFmod)(double, double);
201 int (*pIdivmod)(int, int);
202 int (*pIdiv)(int, int);
buzbee439c4fa2011-08-27 15:59:07 -0700203 long long (*pLmul)(long long, long long);
buzbeec143c552011-08-20 17:38:58 -0700204 long long (*pLdivmod)(long long, long long);
Ian Rogers21d9e832011-09-23 17:05:09 -0700205 void* (*pAllocObjectFromCode)(uint32_t, void*);
Elliott Hughesb408de72011-10-04 14:35:05 -0700206 void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t);
207 void* (*pCheckAndAllocArrayFromCode)(uint32_t, void*, int32_t);
buzbeee1931742011-08-28 21:15:53 -0700208 uint32_t (*pGet32Static)(uint32_t, const Method*);
209 void (*pSet32Static)(uint32_t, const Method*, uint32_t);
210 uint64_t (*pGet64Static)(uint32_t, const Method*);
211 void (*pSet64Static)(uint32_t, const Method*, uint64_t);
212 Object* (*pGetObjStatic)(uint32_t, const Method*);
213 void (*pSetObjStatic)(uint32_t, const Method*, Object*);
Ian Rogerse51a5112011-09-23 14:16:35 -0700214 void (*pCanPutArrayElementFromCode)(void*, void*);
buzbee991e3ac2011-09-29 15:44:22 -0700215 uint32_t (*pInstanceofNonTrivialFromCode) (const Class*, const Class*);
Ian Rogersff1ed472011-09-20 13:46:24 -0700216 void (*pCheckCastFromCode) (void*, void*);
buzbee1b4c8592011-08-31 10:43:51 -0700217 Method* (*pFindInterfaceMethodInCache)(Class*, uint32_t, const Method*, struct DvmDex*);
Ian Rogersff1ed472011-09-20 13:46:24 -0700218 void (*pUnlockObjectFromCode)(void*, void*);
buzbee1b4c8592011-08-31 10:43:51 -0700219 void (*pLockObjectFromCode)(Thread*, Object*);
Ian Rogers67375ac2011-09-14 00:55:44 -0700220 void (*pDeliverException)(void*);
Ian Rogersff1ed472011-09-20 13:46:24 -0700221 void (*pHandleFillArrayDataFromCode)(void*, void*);
buzbee1b4c8592011-08-31 10:43:51 -0700222 Class* (*pInitializeTypeFromCode)(uint32_t, Method*);
buzbee561227c2011-09-02 15:28:19 -0700223 void (*pResolveMethodFromCode)(Method*, uint32_t);
buzbee4a3164f2011-09-03 11:25:10 -0700224 void (*pInvokeInterfaceTrampoline)(void*, void*, void*, void*);
Ian Rogerscbba6ac2011-09-22 16:28:37 -0700225 void* (*pInitializeStaticStorage)(uint32_t, void*);
Brian Carlstrom845490b2011-09-19 15:56:53 -0700226 Field* (*pFindInstanceFieldFromCode)(uint32_t, const Method*);
buzbee0d966cf2011-09-08 17:34:58 -0700227 void (*pCheckSuspendFromCode)(Thread*);
buzbeec1f45042011-09-21 16:03:19 -0700228 void (*pTestSuspendFromCode)();
Ian Rogers932746a2011-09-22 18:57:50 -0700229 void (*pThrowStackOverflowFromCode)(void*);
buzbee5ade1d22011-09-09 14:44:52 -0700230 void (*pThrowNullPointerFromCode)();
231 void (*pThrowArrayBoundsFromCode)(int32_t, int32_t);
232 void (*pThrowDivZeroFromCode)();
233 void (*pThrowVerificationErrorFromCode)(int32_t, int32_t);
234 void (*pThrowNegArraySizeFromCode)(int32_t);
235 void (*pThrowRuntimeExceptionFromCode)(int32_t);
236 void (*pThrowInternalErrorFromCode)(int32_t);
237 void (*pThrowNoSuchMethodFromCode)(int32_t);
Ian Rogersff1ed472011-09-20 13:46:24 -0700238 void (*pThrowAbstractMethodErrorFromCode)(Method* method, Thread* thread, Method** sp);
Brian Carlstrom16192862011-09-12 17:50:06 -0700239 void* (*pFindNativeMethod)(Thread* thread);
240 Object* (*pDecodeJObjectInThread)(Thread* thread, jobject obj);
buzbeec143c552011-08-20 17:38:58 -0700241
Shih-wei Liao9b576b42011-08-29 01:45:07 -0700242 class StackVisitor {
243 public:
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700244 virtual ~StackVisitor() {}
Ian Rogersbdb03912011-09-14 00:55:44 -0700245 virtual void VisitFrame(const Frame& frame, uintptr_t pc) = 0;
Shih-wei Liao9b576b42011-08-29 01:45:07 -0700246 };
247
Carl Shapiro61e019d2011-07-14 16:53:09 -0700248 // Creates a new thread.
Elliott Hughesd369bb72011-09-12 14:41:14 -0700249 static void Create(Object* peer, size_t stack_size);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700250
251 // Creates a new thread from the calling thread.
Elliott Hughesdcc24742011-09-07 14:02:44 -0700252 static Thread* Attach(const Runtime* runtime, const char* name, bool as_daemon);
Carl Shapirob5573532011-07-12 18:22:59 -0700253
254 static Thread* Current() {
Carl Shapirod0e7e772011-07-15 14:31:01 -0700255 void* thread = pthread_getspecific(Thread::pthread_key_self_);
256 return reinterpret_cast<Thread*>(thread);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700257 }
258
Elliott Hughes01158d72011-09-19 19:47:10 -0700259 static Thread* FromManagedThread(JNIEnv* env, jobject thread);
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700260 static uint32_t LockOwnerFromThreadLock(Object* thread_lock);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700261
Elliott Hughesa0957642011-09-02 14:27:33 -0700262 void Dump(std::ostream& os) const;
263
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700264 State GetState() const {
265 return state_;
266 }
267
Elliott Hughes8d768a92011-09-14 16:35:25 -0700268 State SetState(State new_state);
269
Elliott Hughes038a8062011-09-18 14:12:41 -0700270 bool IsDaemon();
271
Elliott Hughes8d768a92011-09-14 16:35:25 -0700272 void WaitUntilSuspended();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700273
Elliott Hughes5f791332011-09-15 17:45:30 -0700274 bool HoldsLock(Object*);
275
Elliott Hughes8daa0922011-09-11 13:46:25 -0700276 /*
277 * Changes the priority of this thread to match that of the java.lang.Thread object.
278 *
279 * We map a priority value from 1-10 to Linux "nice" values, where lower
280 * numbers indicate higher priority.
281 */
282 void SetNativePriority(int newPriority);
283
284 /*
285 * Returns the thread priority for the current thread by querying the system.
286 * This is useful when attaching a thread through JNI.
287 *
288 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
289 */
290 static int GetNativePriority();
291
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700292 bool CanAccessDirectReferences() const {
Elliott Hughesa59d1792011-09-04 18:42:35 -0700293 // TODO: when we have a moving collector, we'll need: return state_ == kRunnable;
294 return true;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700295 }
296
Elliott Hughesdcc24742011-09-07 14:02:44 -0700297 uint32_t GetThinLockId() const {
298 return thin_lock_id_;
Carl Shapirob5573532011-07-12 18:22:59 -0700299 }
300
Elliott Hughesd92bec42011-09-02 17:04:36 -0700301 pid_t GetTid() const {
302 return tid_;
303 }
Elliott Hughese27955c2011-08-26 15:21:24 -0700304
305 pthread_t GetImpl() const {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700306 return pthread_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700307 }
308
Elliott Hughesd369bb72011-09-12 14:41:14 -0700309 Object* GetPeer() const {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700310 return peer_;
311 }
312
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700313 RuntimeStats* GetStats() {
314 return &stats_;
315 }
316
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700317 // Returns the Method* for the current method.
318 // This is used by the JNI implementation for logging and diagnostic purposes.
319 const Method* GetCurrentMethod() const {
320 return top_of_managed_stack_.GetMethod();
321 }
322
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700323 bool IsExceptionPending() const {
Elliott Hughesb20a5542011-08-12 18:03:12 -0700324 return exception_ != NULL;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700325 }
326
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700327 Throwable* GetException() const {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700328 DCHECK(CanAccessDirectReferences());
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700329 return exception_;
330 }
331
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700332 void SetException(Throwable* new_exception) {
333 DCHECK(CanAccessDirectReferences());
334 CHECK(new_exception != NULL);
335 // TODO: CHECK(exception_ == NULL);
336 exception_ = new_exception; // TODO
337 }
338
339 void ClearException() {
340 exception_ = NULL;
Elliott Hughesa0957642011-09-02 14:27:33 -0700341 }
342
Ian Rogersbdb03912011-09-14 00:55:44 -0700343 // Find catch block and perform long jump to appropriate exception handle
Ian Rogersff1ed472011-09-20 13:46:24 -0700344 void DeliverException();
Ian Rogersbdb03912011-09-14 00:55:44 -0700345
346 Context* GetLongJumpContext();
347
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700348 Frame GetTopOfStack() const {
349 return top_of_managed_stack_;
350 }
351
352 // TODO: this is here for testing, remove when we have exception unit tests
353 // that use the real stack
Ian Rogersbdb03912011-09-14 00:55:44 -0700354 void SetTopOfStack(void* stack, uintptr_t pc) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700355 top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(stack));
Ian Rogersbdb03912011-09-14 00:55:44 -0700356 top_of_managed_stack_pc_ = pc;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700357 }
358
Ian Rogersbdb03912011-09-14 00:55:44 -0700359 void SetTopOfStackPC(uintptr_t pc) {
360 top_of_managed_stack_pc_ = pc;
361 }
362
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700363 // 'msg' may be NULL.
364 void ThrowNewException(const char* exception_class_descriptor, const char* msg);
365
366 void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700367 __attribute__ ((format(printf, 3, 4)));
368
Elliott Hughes4a2b4172011-09-20 17:08:25 -0700369 void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap);
370
Elliott Hughes79082e32011-08-25 12:07:32 -0700371 // This exception is special, because we need to pre-allocate an instance.
372 void ThrowOutOfMemoryError();
373
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700374 Frame FindExceptionHandler(void* throw_pc, void** handler_pc);
375
376 void* FindExceptionHandlerInMethod(const Method* method,
377 void* throw_pc,
378 const DexFile& dex_file,
379 ClassLinker* class_linker);
buzbeec143c552011-08-20 17:38:58 -0700380
Carl Shapirob5573532011-07-12 18:22:59 -0700381 void SetName(const char* name);
382
Elliott Hughesbe759c62011-09-08 19:38:21 -0700383 static void Startup();
Elliott Hughes038a8062011-09-18 14:12:41 -0700384 static void FinishStartup();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700385 static void Shutdown();
Carl Shapirob5573532011-07-12 18:22:59 -0700386
Ian Rogersb033c752011-07-20 12:22:35 -0700387 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700388 JNIEnvExt* GetJniEnv() const {
Ian Rogersb033c752011-07-20 12:22:35 -0700389 return jni_env_;
390 }
391
Ian Rogers408f79a2011-08-23 18:22:33 -0700392 // Number of references allocated in SIRTs on this thread
393 size_t NumSirtReferences();
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700394
Ian Rogers408f79a2011-08-23 18:22:33 -0700395 // Is the given obj in this thread's stack indirect reference table?
396 bool SirtContains(jobject obj);
397
Shih-wei Liao8dfc9d52011-09-28 18:06:15 -0700398 void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg);
399
Ian Rogers67375ac2011-09-14 00:55:44 -0700400 // Pop the top SIRT
401 void PopSirt();
402
Ian Rogers408f79a2011-08-23 18:22:33 -0700403 // Convert a jobject into a Object*
404 Object* DecodeJObject(jobject obj);
Ian Rogersb033c752011-07-20 12:22:35 -0700405
Elliott Hughes8daa0922011-09-11 13:46:25 -0700406 // Implements java.lang.Thread.interrupted.
407 bool Interrupted() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700408 MutexLock mu(*wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700409 bool interrupted = interrupted_;
410 interrupted_ = false;
411 return interrupted;
412 }
413
414 // Implements java.lang.Thread.isInterrupted.
415 bool IsInterrupted() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700416 MutexLock mu(*wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700417 return interrupted_;
418 }
419
Elliott Hughes5f791332011-09-15 17:45:30 -0700420 void Interrupt() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700421 MutexLock mu(*wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700422 if (interrupted_) {
423 return;
424 }
425 interrupted_ = true;
426 NotifyLocked();
427 }
428
429 void Notify() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700430 MutexLock mu(*wait_mutex_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700431 NotifyLocked();
432 }
433
Ian Rogers6de08602011-08-19 14:52:39 -0700434 // Linked list recording transitions from native to managed code
435 void PushNativeToManagedRecord(NativeToManagedRecord* record) {
Ian Rogersbdb03912011-09-14 00:55:44 -0700436 record->last_top_of_managed_stack_ = reinterpret_cast<void*>(top_of_managed_stack_.GetSP());
437 record->last_top_of_managed_stack_pc_ = top_of_managed_stack_pc_;
438 record->link_ = native_to_managed_record_;
Ian Rogers6de08602011-08-19 14:52:39 -0700439 native_to_managed_record_ = record;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700440 top_of_managed_stack_.SetSP(NULL);
Ian Rogers6de08602011-08-19 14:52:39 -0700441 }
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700442
Ian Rogers6de08602011-08-19 14:52:39 -0700443 void PopNativeToManagedRecord(const NativeToManagedRecord& record) {
Ian Rogersbdb03912011-09-14 00:55:44 -0700444 native_to_managed_record_ = record.link_;
445 top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(record.last_top_of_managed_stack_));
446 top_of_managed_stack_pc_ = record.last_top_of_managed_stack_pc_;
Ian Rogers6de08602011-08-19 14:52:39 -0700447 }
448
Brian Carlstrombffb1552011-08-25 12:23:53 -0700449 const ClassLoader* GetClassLoaderOverride() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700450 // TODO: need to place the class_loader_override_ in a handle
451 // DCHECK(CanAccessDirectReferences());
buzbeec143c552011-08-20 17:38:58 -0700452 return class_loader_override_;
453 }
454
Brian Carlstrombffb1552011-08-25 12:23:53 -0700455 void SetClassLoaderOverride(const ClassLoader* class_loader_override) {
buzbeec143c552011-08-20 17:38:58 -0700456 class_loader_override_ = class_loader_override;
457 }
458
Ian Rogersaaa20802011-09-11 21:47:37 -0700459 // Create the internal representation of a stack trace, that is more time
460 // and space efficient to compute than the StackTraceElement[]
Elliott Hughes01158d72011-09-19 19:47:10 -0700461 jobject CreateInternalStackTrace(JNIEnv* env) const;
Ian Rogersaaa20802011-09-11 21:47:37 -0700462
Elliott Hughes01158d72011-09-19 19:47:10 -0700463 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
464 // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
465 // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
466 // with the number of valid frames in the returned array.
467 static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
468 jobjectArray output_array = NULL, int* stack_depth = NULL);
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700469
Ian Rogersd6b1f612011-09-27 13:38:14 -0700470 void VisitRoots(Heap::RootVisitor* visitor, void* arg);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700471
Elliott Hughesbe759c62011-09-08 19:38:21 -0700472 //
473 // Offsets of various members of native Thread class, used by compiled code.
474 //
475
476 static ThreadOffset SelfOffset() {
477 return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
478 }
479
480 static ThreadOffset ExceptionOffset() {
481 return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
482 }
483
Elliott Hughes54e7df12011-09-16 11:47:04 -0700484 static ThreadOffset ThinLockIdOffset() {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700485 return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
486 }
487
488 static ThreadOffset CardTableOffset() {
489 return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
490 }
491
492 static ThreadOffset SuspendCountOffset() {
493 return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
494 }
495
496 static ThreadOffset StateOffset() {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700497 return ThreadOffset(OFFSETOF_VOLATILE_MEMBER(Thread, state_));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700498 }
499
Ian Rogers932746a2011-09-22 18:57:50 -0700500 // Size of stack less any space reserved for stack overflow
501 size_t GetStackSize() {
502 return stack_size_ - (stack_end_ - stack_base_);
503 }
504
505 // Set the stack end to that to be used during a stack overflow
506 void SetStackEndForStackOverflow() {
507 // During stack overflow we allow use of the full stack
508 CHECK(stack_end_ != stack_base_) << "Need to increase: kStackOverflowReservedBytes ("
509 << kStackOverflowReservedBytes << ")";
510 stack_end_ = stack_base_;
511 }
512
513 // Set the stack end to that to be used during regular execution
514 void ResetDefaultStackEnd() {
515 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
516 // to throw a StackOverflowError.
517 stack_end_ = stack_base_ + kStackOverflowReservedBytes;
518 }
519
Elliott Hughes449b4bd2011-09-09 12:01:38 -0700520 static ThreadOffset StackEndOffset() {
521 return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700522 }
523
524 static ThreadOffset JniEnvOffset() {
525 return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
526 }
527
528 static ThreadOffset TopOfManagedStackOffset() {
529 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_) +
530 OFFSETOF_MEMBER(Frame, sp_));
531 }
532
Ian Rogersbdb03912011-09-14 00:55:44 -0700533 static ThreadOffset TopOfManagedStackPcOffset() {
534 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_pc_));
535 }
536
Elliott Hughesbe759c62011-09-08 19:38:21 -0700537 static ThreadOffset TopSirtOffset() {
538 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
539 }
540
Shih-wei Liao9407c602011-09-16 10:36:43 -0700541 void WalkStack(StackVisitor* visitor) const;
542
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700543 private:
Elliott Hughesdcc24742011-09-07 14:02:44 -0700544 Thread();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700545 ~Thread();
Elliott Hughes02b48d12011-09-07 17:15:51 -0700546 friend class ThreadList; // For ~Thread.
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700547
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700548 void CreatePeer(const char* name, bool as_daemon);
549 friend class Runtime; // For CreatePeer.
550
Elliott Hughesd92bec42011-09-02 17:04:36 -0700551 void DumpState(std::ostream& os) const;
552 void DumpStack(std::ostream& os) const;
553
Elliott Hughes93e74e82011-09-13 11:07:03 -0700554 void Attach(const Runtime* runtime);
555 static void* CreateCallback(void* arg);
556
Ian Rogersb033c752011-07-20 12:22:35 -0700557 void InitCpu();
buzbee3ea4ec52011-08-22 17:37:19 -0700558 void InitFunctionPointers();
Elliott Hughesbe759c62011-09-08 19:38:21 -0700559 void InitStackHwm();
560
Elliott Hughes5f791332011-09-15 17:45:30 -0700561 void NotifyLocked() {
562 if (wait_monitor_ != NULL) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700563 wait_cond_->Signal();
Elliott Hughes5f791332011-09-15 17:45:30 -0700564 }
565 }
566
Elliott Hughesbe759c62011-09-08 19:38:21 -0700567 static void ThreadExitCallback(void* arg);
Ian Rogersb033c752011-07-20 12:22:35 -0700568
Ian Rogers67375ac2011-09-14 00:55:44 -0700569 void WalkStackUntilUpCall(StackVisitor* visitor, bool include_upcall) const;
Ian Rogersbdb03912011-09-14 00:55:44 -0700570
Elliott Hughesdcc24742011-09-07 14:02:44 -0700571 // Thin lock thread id. This is a small integer used by the thin lock implementation.
572 // This is not to be confused with the native thread's tid, nor is it the value returned
573 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
574 // important difference between this id and the ids visible to managed code is that these
575 // ones get reused (to ensure that they fit in the number of bits available).
576 uint32_t thin_lock_id_;
Ian Rogersb033c752011-07-20 12:22:35 -0700577
Elliott Hughesd92bec42011-09-02 17:04:36 -0700578 // System thread id.
579 pid_t tid_;
580
581 // Native thread handle.
Elliott Hughesbe759c62011-09-08 19:38:21 -0700582 pthread_t pthread_;
Elliott Hughesd92bec42011-09-02 17:04:36 -0700583
Elliott Hughesdcc24742011-09-07 14:02:44 -0700584 // Our managed peer (an instance of java.lang.Thread).
Elliott Hughesd369bb72011-09-12 14:41:14 -0700585 Object* peer_;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700586
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700587 // The top_of_managed_stack_ and top_of_managed_stack_pc_ fields are accessed from
588 // compiled code, so we keep them early in the structure to (a) avoid having to keep
589 // fixing the assembler offsets and (b) improve the chances that these will still be aligned.
590
591 // Top of the managed stack, written out prior to the state transition from
592 // kRunnable to kNative. Uses include to give the starting point for scanning
593 // a managed stack when a thread is in native code.
594 Frame top_of_managed_stack_;
595 // PC corresponding to the call out of the top_of_managed_stack_ frame
596 uintptr_t top_of_managed_stack_pc_;
597
Elliott Hughes8daa0922011-09-11 13:46:25 -0700598 // Guards the 'interrupted_' and 'wait_monitor_' members.
Elliott Hughes85d15452011-09-16 17:33:01 -0700599 mutable Mutex* wait_mutex_;
600 ConditionVariable* wait_cond_;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700601 // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_.
602 Monitor* wait_monitor_;
603 // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_.
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700604 uint32_t interrupted_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700605 // The next thread in the wait set this thread is part of.
606 Thread* wait_next_;
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700607 // If we're blocked in MonitorEnter, this is the object we're trying to lock.
608 Object* monitor_enter_object_;
Elliott Hughes5f791332011-09-15 17:45:30 -0700609
610 friend class Monitor;
Elliott Hughesdcc24742011-09-07 14:02:44 -0700611
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700612 RuntimeStats stats_;
613
buzbeec143c552011-08-20 17:38:58 -0700614 // FIXME: placeholder for the gc cardTable
615 uint32_t card_table_;
616
Elliott Hughes449b4bd2011-09-09 12:01:38 -0700617 // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
618 // We leave extra space so there's room for the code that throws StackOverflowError.
619 byte* stack_end_;
Elliott Hughesbe759c62011-09-08 19:38:21 -0700620
Ian Rogers932746a2011-09-22 18:57:50 -0700621 // Size of the stack
622 size_t stack_size_;
623
624 // The "lowest addressable byte" of the stack
625 byte* stack_base_;
626
Ian Rogers6de08602011-08-19 14:52:39 -0700627 // A linked list (of stack allocated records) recording transitions from
628 // native to managed code.
629 NativeToManagedRecord* native_to_managed_record_;
630
Ian Rogers408f79a2011-08-23 18:22:33 -0700631 // Top of linked list of stack indirect reference tables or NULL for none
632 StackIndirectReferenceTable* top_sirt_;
Ian Rogersb033c752011-07-20 12:22:35 -0700633
634 // Every thread may have an associated JNI environment
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700635 JNIEnvExt* jni_env_;
Ian Rogersb033c752011-07-20 12:22:35 -0700636
Elliott Hughes93e74e82011-09-13 11:07:03 -0700637 volatile State state_;
Carl Shapirob5573532011-07-12 18:22:59 -0700638
Carl Shapiro69759ea2011-07-21 18:13:35 -0700639 // Initialized to "this". On certain architectures (such as x86) reading
640 // off of Thread::Current is easy but getting the address of Thread::Current
641 // is hard. This field can be read off of Thread::Current to give the address.
642 Thread* self_;
643
644 Runtime* runtime_;
645
646 // The pending exception or NULL.
Elliott Hughese5b0dc82011-08-23 09:59:02 -0700647 Throwable* exception_;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700648
Ian Rogers45a76cb2011-07-21 22:00:15 -0700649 // A non-zero value is used to tell the current thread to enter a safe point
650 // at the next poll.
651 int suspend_count_;
652
Elliott Hughesedcc09c2011-08-21 18:47:05 -0700653 // Needed to get the right ClassLoader in JNI_OnLoad, but also
654 // useful for testing.
Brian Carlstrombffb1552011-08-25 12:23:53 -0700655 const ClassLoader* class_loader_override_;
buzbeec143c552011-08-20 17:38:58 -0700656
Ian Rogersbdb03912011-09-14 00:55:44 -0700657 // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
Elliott Hughes85d15452011-09-16 17:33:01 -0700658 Context* long_jump_context_;
Ian Rogersbdb03912011-09-14 00:55:44 -0700659
Carl Shapiro69759ea2011-07-21 18:13:35 -0700660 // TLS key used to retrieve the VM thread object.
Carl Shapirob5573532011-07-12 18:22:59 -0700661 static pthread_key_t pthread_key_self_;
662
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700663 DISALLOW_COPY_AND_ASSIGN(Thread);
664};
Ian Rogersbdb03912011-09-14 00:55:44 -0700665
Elliott Hughes330304d2011-08-12 14:28:05 -0700666std::ostream& operator<<(std::ostream& os, const Thread& thread);
Ian Rogersb033c752011-07-20 12:22:35 -0700667std::ostream& operator<<(std::ostream& os, const Thread::State& state);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700668
Elliott Hughesad7c2a32011-08-31 11:58:10 -0700669class ScopedThreadStateChange {
670 public:
671 ScopedThreadStateChange(Thread* thread, Thread::State new_state) : thread_(thread) {
672 old_thread_state_ = thread_->SetState(new_state);
673 }
674
675 ~ScopedThreadStateChange() {
676 thread_->SetState(old_thread_state_);
677 }
678
679 private:
680 Thread* thread_;
681 Thread::State old_thread_state_;
682 DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
683};
684
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700685} // namespace art
686
687#endif // ART_SRC_THREAD_H_