blob: 34954384483400a6071f06e4ecc72736019727e5 [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapirob5573532011-07-12 18:22:59 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "thread.h"
Carl Shapirob5573532011-07-12 18:22:59 -070018
Elliott Hughes8d768a92011-09-14 16:35:25 -070019#include <dynamic_annotations.h>
Ian Rogersb033c752011-07-20 12:22:35 -070020#include <pthread.h>
21#include <sys/mman.h>
Elliott Hughesa0957642011-09-02 14:27:33 -070022
Carl Shapirob5573532011-07-12 18:22:59 -070023#include <algorithm>
Elliott Hughesdcc24742011-09-07 14:02:44 -070024#include <bitset>
Elliott Hugheseb4f6142011-07-15 17:43:51 -070025#include <cerrno>
Elliott Hughesa0957642011-09-02 14:27:33 -070026#include <iostream>
Carl Shapirob5573532011-07-12 18:22:59 -070027#include <list>
Carl Shapirob5573532011-07-12 18:22:59 -070028
Elliott Hughesa5b897e2011-08-16 11:33:06 -070029#include "class_linker.h"
Ian Rogersbdb03912011-09-14 00:55:44 -070030#include "context.h"
Ian Rogers408f79a2011-08-23 18:22:33 -070031#include "heap.h"
Elliott Hughesc5f7c912011-08-18 14:00:42 -070032#include "jni_internal.h"
Elliott Hughesa5b897e2011-08-16 11:33:06 -070033#include "object.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070034#include "runtime.h"
buzbee54330722011-08-23 16:46:55 -070035#include "runtime_support.h"
Ian Rogersaaa20802011-09-11 21:47:37 -070036#include "scoped_jni_thread_state.h"
Elliott Hughes8daa0922011-09-11 13:46:25 -070037#include "thread_list.h"
Elliott Hughesa0957642011-09-02 14:27:33 -070038#include "utils.h"
Carl Shapirob5573532011-07-12 18:22:59 -070039
40namespace art {
41
42pthread_key_t Thread::pthread_key_self_;
43
Elliott Hughes038a8062011-09-18 14:12:41 -070044static Field* gThread_daemon = NULL;
45static Field* gThread_group = NULL;
46static Field* gThread_lock = NULL;
47static Field* gThread_name = NULL;
48static Field* gThread_priority = NULL;
49static Field* gThread_vmData = NULL;
50static Field* gThreadGroup_name = NULL;
51static Method* gThread_run = NULL;
52
buzbee4a3164f2011-09-03 11:25:10 -070053// Temporary debugging hook for compiler.
Elliott Hughesd369bb72011-09-12 14:41:14 -070054void DebugMe(Method* method, uint32_t info) {
buzbee4a3164f2011-09-03 11:25:10 -070055 LOG(INFO) << "DebugMe";
56 if (method != NULL)
57 LOG(INFO) << PrettyMethod(method);
58 LOG(INFO) << "Info: " << info;
59}
60
Ian Rogersbdb03912011-09-14 00:55:44 -070061} // namespace art
62
63// Called by generated call to throw an exception
Ian Rogers67375ac2011-09-14 00:55:44 -070064extern "C" void artDeliverExceptionHelper(art::Throwable* exception,
65 art::Thread* thread,
66 art::Method** sp) {
Elliott Hughesd369bb72011-09-12 14:41:14 -070067 /*
68 * exception may be NULL, in which case this routine should
69 * throw NPE. NOTE: this is a convenience for generated code,
70 * which previously did the null check inline and constructed
71 * and threw a NPE if NULL. This routine responsible for setting
Ian Rogersbdb03912011-09-14 00:55:44 -070072 * exception_ in thread and delivering the exception.
Elliott Hughesd369bb72011-09-12 14:41:14 -070073 */
Ian Rogers67375ac2011-09-14 00:55:44 -070074#if defined(__i386__)
75 thread = art::Thread::Current(); // TODO: fix passing this in as an argument
76#endif
77 // Place a special frame at the TOS that will save all callee saves
Ian Rogersbdb03912011-09-14 00:55:44 -070078 *sp = thread->CalleeSaveMethod();
79 thread->SetTopOfStack(sp, 0);
Ian Rogers93dd9662011-09-17 23:21:22 -070080 if (exception == NULL) {
81 thread->ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception");
82 exception = thread->GetException();
83 }
Ian Rogersbdb03912011-09-14 00:55:44 -070084 thread->DeliverException(exception);
buzbee1b4c8592011-08-31 10:43:51 -070085}
86
Ian Rogersbdb03912011-09-14 00:55:44 -070087namespace art {
88
buzbee1b4c8592011-08-31 10:43:51 -070089// TODO: placeholder. Helper function to type
Elliott Hughesd369bb72011-09-12 14:41:14 -070090Class* InitializeTypeFromCode(uint32_t type_idx, Method* method) {
buzbee1b4c8592011-08-31 10:43:51 -070091 /*
92 * Should initialize & fix up method->dex_cache_resolved_types_[].
93 * Returns initialized type. Does not return normally if an exception
94 * is thrown, but instead initiates the catch. Should be similar to
95 * ClassLinker::InitializeStaticStorageFromCode.
96 */
97 UNIMPLEMENTED(FATAL);
98 return NULL;
99}
100
buzbee561227c2011-09-02 15:28:19 -0700101// TODO: placeholder. Helper function to resolve virtual method
Elliott Hughesd369bb72011-09-12 14:41:14 -0700102void ResolveMethodFromCode(Method* method, uint32_t method_idx) {
buzbee561227c2011-09-02 15:28:19 -0700103 /*
104 * Slow-path handler on invoke virtual method path in which
105 * base method is unresolved at compile-time. Doesn't need to
106 * return anything - just either ensure that
107 * method->dex_cache_resolved_methods_(method_idx) != NULL or
108 * throw and unwind. The caller will restart call sequence
109 * from the beginning.
110 */
111}
112
buzbee1da522d2011-09-04 11:22:20 -0700113// TODO: placeholder. Helper function to alloc array for OP_FILLED_NEW_ARRAY
Elliott Hughesd369bb72011-09-12 14:41:14 -0700114Array* CheckAndAllocFromCode(uint32_t type_index, Method* method, int32_t component_count) {
buzbee1da522d2011-09-04 11:22:20 -0700115 /*
116 * Just a wrapper around Array::AllocFromCode() that additionally
117 * throws a runtime exception "bad Filled array req" for 'D' and 'J'.
118 */
119 UNIMPLEMENTED(WARNING) << "Need check that not 'D' or 'J'";
120 return Array::AllocFromCode(type_index, method, component_count);
121}
122
buzbee2a475e72011-09-07 17:19:17 -0700123// TODO: placeholder (throw on failure)
Elliott Hughesd369bb72011-09-12 14:41:14 -0700124void CheckCastFromCode(const Class* a, const Class* b) {
Brian Carlstromc2282522011-09-17 10:33:14 -0700125 DCHECK(a->IsClass());
126 DCHECK(b->IsClass());
127 if (b->IsAssignableFrom(a)) {
128 return;
129 }
130 UNIMPLEMENTED(FATAL);
buzbee2a475e72011-09-07 17:19:17 -0700131}
132
Elliott Hughesd369bb72011-09-12 14:41:14 -0700133void UnlockObjectFromCode(Thread* thread, Object* obj) {
Elliott Hughes8d768a92011-09-14 16:35:25 -0700134 // TODO: throw and unwind if lock not held
135 // TODO: throw and unwind on NPE
136 obj->MonitorExit(thread);
buzbee2a475e72011-09-07 17:19:17 -0700137}
138
Elliott Hughesd369bb72011-09-12 14:41:14 -0700139void LockObjectFromCode(Thread* thread, Object* obj) {
Elliott Hughes8d768a92011-09-14 16:35:25 -0700140 obj->MonitorEnter(thread);
141 // TODO: throw and unwind on failure.
buzbee2a475e72011-09-07 17:19:17 -0700142}
143
Elliott Hughesd369bb72011-09-12 14:41:14 -0700144void CheckSuspendFromCode(Thread* thread) {
Elliott Hughes8d768a92011-09-14 16:35:25 -0700145 Runtime::Current()->GetThreadList()->FullSuspendCheck(thread);
buzbee0d966cf2011-09-08 17:34:58 -0700146}
147
buzbeecefd1872011-09-09 09:59:52 -0700148// TODO: placeholder
Elliott Hughesd369bb72011-09-12 14:41:14 -0700149void StackOverflowFromCode(Method* method) {
Brian Carlstromfa3baf72011-09-18 15:44:15 -0700150 Thread::Current()->SetTopOfStackPC(reinterpret_cast<uintptr_t>(__builtin_return_address(0)));
Brian Carlstrom16192862011-09-12 17:50:06 -0700151 Thread::Current()->Dump(std::cerr);
Elliott Hughesd369bb72011-09-12 14:41:14 -0700152 //NOTE: to save code space, this handler needs to look up its own Thread*
153 UNIMPLEMENTED(FATAL) << "Stack overflow: " << PrettyMethod(method);
buzbeecefd1872011-09-09 09:59:52 -0700154}
155
buzbee5ade1d22011-09-09 14:44:52 -0700156// TODO: placeholder
Elliott Hughesd369bb72011-09-12 14:41:14 -0700157void ThrowNullPointerFromCode() {
Brian Carlstromfa3baf72011-09-18 15:44:15 -0700158 Thread::Current()->SetTopOfStackPC(reinterpret_cast<uintptr_t>(__builtin_return_address(0)));
Elliott Hughesd369bb72011-09-12 14:41:14 -0700159 Thread::Current()->Dump(std::cerr);
160 //NOTE: to save code space, this handler must look up caller's Method*
161 UNIMPLEMENTED(FATAL) << "Null pointer exception";
buzbee5ade1d22011-09-09 14:44:52 -0700162}
163
164// TODO: placeholder
Elliott Hughesd369bb72011-09-12 14:41:14 -0700165void ThrowDivZeroFromCode() {
166 UNIMPLEMENTED(FATAL) << "Divide by zero";
buzbee5ade1d22011-09-09 14:44:52 -0700167}
168
169// TODO: placeholder
Elliott Hughesd369bb72011-09-12 14:41:14 -0700170void ThrowArrayBoundsFromCode(int32_t index, int32_t limit) {
171 UNIMPLEMENTED(FATAL) << "Bound check exception, idx: " << index << ", limit: " << limit;
buzbee5ade1d22011-09-09 14:44:52 -0700172}
173
174// TODO: placeholder
Elliott Hughesd369bb72011-09-12 14:41:14 -0700175void ThrowVerificationErrorFromCode(int32_t src1, int32_t ref) {
buzbee5ade1d22011-09-09 14:44:52 -0700176 UNIMPLEMENTED(FATAL) << "Verification error, src1: " << src1 <<
177 " ref: " << ref;
178}
179
180// TODO: placeholder
Elliott Hughesd369bb72011-09-12 14:41:14 -0700181void ThrowNegArraySizeFromCode(int32_t index) {
buzbee5ade1d22011-09-09 14:44:52 -0700182 UNIMPLEMENTED(FATAL) << "Negative array size: " << index;
183}
184
185// TODO: placeholder
Elliott Hughesd369bb72011-09-12 14:41:14 -0700186void ThrowInternalErrorFromCode(int32_t errnum) {
buzbee5ade1d22011-09-09 14:44:52 -0700187 UNIMPLEMENTED(FATAL) << "Internal error: " << errnum;
188}
189
190// TODO: placeholder
Elliott Hughesd369bb72011-09-12 14:41:14 -0700191void ThrowRuntimeExceptionFromCode(int32_t errnum) {
buzbee5ade1d22011-09-09 14:44:52 -0700192 UNIMPLEMENTED(FATAL) << "Internal error: " << errnum;
193}
194
195// TODO: placeholder
Elliott Hughesd369bb72011-09-12 14:41:14 -0700196void ThrowNoSuchMethodFromCode(int32_t method_idx) {
buzbee5ade1d22011-09-09 14:44:52 -0700197 UNIMPLEMENTED(FATAL) << "No such method, idx: " << method_idx;
198}
199
Ian Rogersbdb03912011-09-14 00:55:44 -0700200void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread) {
201 thread->ThrowNewException("Ljava/lang/AbstractMethodError",
202 "abstract method \"%s\"",
203 PrettyMethod(method).c_str());
204 thread->DeliverException(thread->GetException());
205}
206
207
buzbee5ade1d22011-09-09 14:44:52 -0700208/*
209 * Temporary placeholder. Should include run-time checks for size
210 * of fill data <= size of array. If not, throw arrayOutOfBoundsException.
211 * As with other new "FromCode" routines, this should return to the caller
212 * only if no exception has been thrown.
213 *
214 * NOTE: When dealing with a raw dex file, the data to be copied uses
215 * little-endian ordering. Require that oat2dex do any required swapping
216 * so this routine can get by with a memcpy().
217 *
218 * Format of the data:
219 * ushort ident = 0x0300 magic value
220 * ushort width width of each element in the table
221 * uint size number of elements in the table
222 * ubyte data[size*width] table of data values (may contain a single-byte
223 * padding at the end)
224 */
Elliott Hughesd369bb72011-09-12 14:41:14 -0700225void HandleFillArrayDataFromCode(Array* array, const uint16_t* table) {
buzbee5ade1d22011-09-09 14:44:52 -0700226 uint32_t size = (uint32_t)table[2] | (((uint32_t)table[3]) << 16);
227 uint32_t size_in_bytes = size * table[1];
228 if (static_cast<int32_t>(size) > array->GetLength()) {
229 ThrowArrayBoundsFromCode(array->GetLength(), size);
230 }
231 memcpy((char*)array + art::Array::DataOffset().Int32Value(),
232 (char*)&table[4], size_in_bytes);
233}
234
Brian Carlstrom16192862011-09-12 17:50:06 -0700235/*
236 * TODO: placeholder for a method that can be called by the
237 * invoke-interface trampoline to unwind and handle exception. The
238 * trampoline will arrange it so that the caller appears to be the
239 * callsite of the failed invoke-interface. See comments in
240 * runtime_support.S
241 */
242extern "C" void artFailedInvokeInterface() {
243 UNIMPLEMENTED(FATAL) << "Unimplemented exception throw";
244}
245
246// See comments in runtime_support.S
247extern "C" uint64_t artFindInterfaceMethodInCache(uint32_t method_idx,
248 Object* this_object , Method* caller_method)
249{
250 if (this_object == NULL) {
251 ThrowNullPointerFromCode();
252 }
253 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
254 Method* interface_method = class_linker->ResolveMethod(method_idx, caller_method, false);
255 if (interface_method == NULL) {
256 UNIMPLEMENTED(FATAL) << "Could not resolve interface method. Throw error and unwind";
257 }
258 Method* method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
259 const void* code = method->GetCode();
260
261 uint32_t method_uint = reinterpret_cast<uint32_t>(method);
262 uint64_t code_uint = reinterpret_cast<uint32_t>(code);
263 uint64_t result = ((code_uint << 32) | method_uint);
264 return result;
265}
266
buzbee5ade1d22011-09-09 14:44:52 -0700267// TODO: move to more appropriate location
268/*
269 * Float/double conversion requires clamping to min and max of integer form. If
270 * target doesn't support this normally, use these.
271 */
Elliott Hughesd369bb72011-09-12 14:41:14 -0700272int64_t D2L(double d) {
buzbee5ade1d22011-09-09 14:44:52 -0700273 static const double kMaxLong = (double)(int64_t)0x7fffffffffffffffULL;
274 static const double kMinLong = (double)(int64_t)0x8000000000000000ULL;
275 if (d >= kMaxLong)
276 return (int64_t)0x7fffffffffffffffULL;
277 else if (d <= kMinLong)
278 return (int64_t)0x8000000000000000ULL;
279 else if (d != d) // NaN case
280 return 0;
281 else
282 return (int64_t)d;
283}
284
Elliott Hughesd369bb72011-09-12 14:41:14 -0700285int64_t F2L(float f) {
buzbee5ade1d22011-09-09 14:44:52 -0700286 static const float kMaxLong = (float)(int64_t)0x7fffffffffffffffULL;
287 static const float kMinLong = (float)(int64_t)0x8000000000000000ULL;
288 if (f >= kMaxLong)
289 return (int64_t)0x7fffffffffffffffULL;
290 else if (f <= kMinLong)
291 return (int64_t)0x8000000000000000ULL;
292 else if (f != f) // NaN case
293 return 0;
294 else
295 return (int64_t)f;
296}
297
Brian Carlstrom16192862011-09-12 17:50:06 -0700298// Return value helper for jobject return types
299static Object* DecodeJObjectInThread(Thread* thread, jobject obj) {
300 return thread->DecodeJObject(obj);
301}
302
buzbee3ea4ec52011-08-22 17:37:19 -0700303void Thread::InitFunctionPointers() {
buzbee54330722011-08-23 16:46:55 -0700304#if defined(__arm__)
305 pShlLong = art_shl_long;
306 pShrLong = art_shr_long;
307 pUshrLong = art_ushr_long;
buzbee7b1b86d2011-08-26 18:59:10 -0700308 pIdiv = __aeabi_idiv;
309 pIdivmod = __aeabi_idivmod;
310 pI2f = __aeabi_i2f;
311 pF2iz = __aeabi_f2iz;
312 pD2f = __aeabi_d2f;
313 pF2d = __aeabi_f2d;
314 pD2iz = __aeabi_d2iz;
315 pL2f = __aeabi_l2f;
316 pL2d = __aeabi_l2d;
317 pFadd = __aeabi_fadd;
318 pFsub = __aeabi_fsub;
319 pFdiv = __aeabi_fdiv;
320 pFmul = __aeabi_fmul;
321 pFmodf = fmodf;
322 pDadd = __aeabi_dadd;
323 pDsub = __aeabi_dsub;
324 pDdiv = __aeabi_ddiv;
325 pDmul = __aeabi_dmul;
326 pFmod = fmod;
buzbee7b1b86d2011-08-26 18:59:10 -0700327 pLdivmod = __aeabi_ldivmod;
buzbee439c4fa2011-08-27 15:59:07 -0700328 pLmul = __aeabi_lmul;
buzbee4a3164f2011-09-03 11:25:10 -0700329 pInvokeInterfaceTrampoline = art_invoke_interface_trampoline;
Ian Rogers67375ac2011-09-14 00:55:44 -0700330#endif
Ian Rogers67375ac2011-09-14 00:55:44 -0700331 pDeliverException = art_deliver_exception;
buzbeec396efc2011-09-11 09:36:41 -0700332 pF2l = F2L;
333 pD2l = D2L;
buzbeedfd3d702011-08-28 12:56:51 -0700334 pAllocFromCode = Array::AllocFromCode;
buzbee1da522d2011-09-04 11:22:20 -0700335 pCheckAndAllocFromCode = CheckAndAllocFromCode;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700336 pAllocObjectFromCode = Class::AllocObjectFromCode;
buzbee3ea4ec52011-08-22 17:37:19 -0700337 pMemcpy = memcpy;
buzbee1b4c8592011-08-31 10:43:51 -0700338 pHandleFillArrayDataFromCode = HandleFillArrayDataFromCode;
buzbeee1931742011-08-28 21:15:53 -0700339 pGet32Static = Field::Get32StaticFromCode;
340 pSet32Static = Field::Set32StaticFromCode;
341 pGet64Static = Field::Get64StaticFromCode;
342 pSet64Static = Field::Set64StaticFromCode;
343 pGetObjStatic = Field::GetObjStaticFromCode;
344 pSetObjStatic = Field::SetObjStaticFromCode;
buzbee1b4c8592011-08-31 10:43:51 -0700345 pCanPutArrayElementFromCode = Class::CanPutArrayElementFromCode;
buzbee1b4c8592011-08-31 10:43:51 -0700346 pInitializeTypeFromCode = InitializeTypeFromCode;
buzbee561227c2011-09-02 15:28:19 -0700347 pResolveMethodFromCode = ResolveMethodFromCode;
buzbee1da522d2011-09-04 11:22:20 -0700348 pInitializeStaticStorage = ClassLinker::InitializeStaticStorageFromCode;
buzbee2a475e72011-09-07 17:19:17 -0700349 pInstanceofNonTrivialFromCode = Object::InstanceOf;
350 pCheckCastFromCode = CheckCastFromCode;
351 pLockObjectFromCode = LockObjectFromCode;
352 pUnlockObjectFromCode = UnlockObjectFromCode;
buzbee34cd9e52011-09-08 14:31:52 -0700353 pFindFieldFromCode = Field::FindFieldFromCode;
buzbee0d966cf2011-09-08 17:34:58 -0700354 pCheckSuspendFromCode = CheckSuspendFromCode;
buzbeecefd1872011-09-09 09:59:52 -0700355 pStackOverflowFromCode = StackOverflowFromCode;
buzbee5ade1d22011-09-09 14:44:52 -0700356 pThrowNullPointerFromCode = ThrowNullPointerFromCode;
357 pThrowArrayBoundsFromCode = ThrowArrayBoundsFromCode;
358 pThrowDivZeroFromCode = ThrowDivZeroFromCode;
359 pThrowVerificationErrorFromCode = ThrowVerificationErrorFromCode;
360 pThrowNegArraySizeFromCode = ThrowNegArraySizeFromCode;
361 pThrowRuntimeExceptionFromCode = ThrowRuntimeExceptionFromCode;
362 pThrowInternalErrorFromCode = ThrowInternalErrorFromCode;
363 pThrowNoSuchMethodFromCode = ThrowNoSuchMethodFromCode;
Ian Rogersbdb03912011-09-14 00:55:44 -0700364 pThrowAbstractMethodErrorFromCode = ThrowAbstractMethodErrorFromCode;
Brian Carlstrom16192862011-09-12 17:50:06 -0700365 pFindNativeMethod = FindNativeMethod;
366 pDecodeJObjectInThread = DecodeJObjectInThread;
buzbee4a3164f2011-09-03 11:25:10 -0700367 pDebugMe = DebugMe;
buzbee3ea4ec52011-08-22 17:37:19 -0700368}
369
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700370void Frame::Next() {
Ian Rogers67375ac2011-09-14 00:55:44 -0700371 size_t frame_size = GetMethod()->GetFrameSizeInBytes();
372 DCHECK_NE(frame_size, 0u);
373 DCHECK_LT(frame_size, 1024u);
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700374 byte* next_sp = reinterpret_cast<byte*>(sp_) +
Ian Rogers67375ac2011-09-14 00:55:44 -0700375 frame_size;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700376 sp_ = reinterpret_cast<Method**>(next_sp);
Ian Rogers67375ac2011-09-14 00:55:44 -0700377 DCHECK(*sp_ == NULL ||
378 (*sp_)->GetClass()->GetDescriptor()->Equals("Ljava/lang/reflect/Method;"));
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700379}
380
Ian Rogersbdb03912011-09-14 00:55:44 -0700381uintptr_t Frame::GetReturnPC() const {
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700382 byte* pc_addr = reinterpret_cast<byte*>(sp_) +
Shih-wei Liaod11af152011-08-23 16:02:11 -0700383 GetMethod()->GetReturnPcOffsetInBytes();
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700384 return *reinterpret_cast<uintptr_t*>(pc_addr);
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700385}
386
Ian Rogersbdb03912011-09-14 00:55:44 -0700387uintptr_t Frame::LoadCalleeSave(int num) const {
388 // Callee saves are held at the top of the frame
389 Method* method = GetMethod();
390 DCHECK(method != NULL);
391 size_t frame_size = method->GetFrameSizeInBytes();
392 byte* save_addr = reinterpret_cast<byte*>(sp_) + frame_size -
393 ((num + 1) * kPointerSize);
Ian Rogers67375ac2011-09-14 00:55:44 -0700394#if defined(__i386__)
395 save_addr -= kPointerSize; // account for return address
396#endif
Ian Rogersbdb03912011-09-14 00:55:44 -0700397 return *reinterpret_cast<uintptr_t*>(save_addr);
398}
399
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700400Method* Frame::NextMethod() const {
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700401 byte* next_sp = reinterpret_cast<byte*>(sp_) +
Shih-wei Liaod11af152011-08-23 16:02:11 -0700402 GetMethod()->GetFrameSizeInBytes();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700403 return *reinterpret_cast<Method**>(next_sp);
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700404}
405
Brian Carlstrom78128a62011-09-15 17:21:19 -0700406void* Thread::CreateCallback(void* arg) {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700407 Thread* self = reinterpret_cast<Thread*>(arg);
408 Runtime* runtime = Runtime::Current();
409
410 self->Attach(runtime);
411
Elliott Hughes038a8062011-09-18 14:12:41 -0700412 String* thread_name = reinterpret_cast<String*>(gThread_name->GetObject(self->peer_));
Elliott Hughes93e74e82011-09-13 11:07:03 -0700413 if (thread_name != NULL) {
414 SetThreadName(thread_name->ToModifiedUtf8().c_str());
415 }
416
417 // Wait until it's safe to start running code. (There may have been a suspend-all
418 // in progress while we were starting up.)
419 runtime->GetThreadList()->WaitForGo();
420
421 // TODO: say "hi" to the debugger.
422 //if (gDvm.debuggerConnected) {
423 // dvmDbgPostThreadStart(self);
424 //}
425
426 // Invoke the 'run' method of our java.lang.Thread.
427 CHECK(self->peer_ != NULL);
428 Object* receiver = self->peer_;
Elliott Hughes038a8062011-09-18 14:12:41 -0700429 Method* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(gThread_run);
Elliott Hughes93e74e82011-09-13 11:07:03 -0700430 m->Invoke(self, receiver, NULL, NULL);
431
432 // Detach.
433 runtime->GetThreadList()->Unregister();
434
Carl Shapirob5573532011-07-12 18:22:59 -0700435 return NULL;
436}
437
Elliott Hughes93e74e82011-09-13 11:07:03 -0700438void SetVmData(Object* managed_thread, Thread* native_thread) {
Elliott Hughes038a8062011-09-18 14:12:41 -0700439 gThread_vmData->SetInt(managed_thread, reinterpret_cast<uintptr_t>(native_thread));
Elliott Hughes93e74e82011-09-13 11:07:03 -0700440}
441
Elliott Hughesd369bb72011-09-12 14:41:14 -0700442void Thread::Create(Object* peer, size_t stack_size) {
443 CHECK(peer != NULL);
Elliott Hughesdcc24742011-09-07 14:02:44 -0700444
Elliott Hughesd369bb72011-09-12 14:41:14 -0700445 if (stack_size == 0) {
446 stack_size = Runtime::Current()->GetDefaultStackSize();
447 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700448
Elliott Hughes93e74e82011-09-13 11:07:03 -0700449 Thread* native_thread = new Thread;
450 native_thread->peer_ = peer;
451
452 // Thread.start is synchronized, so we know that vmData is 0,
453 // and know that we're not racing to assign it.
454 SetVmData(peer, native_thread);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700455
456 pthread_attr_t attr;
Elliott Hughes8d768a92011-09-14 16:35:25 -0700457 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
458 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
459 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
460 CHECK_PTHREAD_CALL(pthread_create, (&native_thread->pthread_, &attr, Thread::CreateCallback, native_thread), "new thread");
461 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
Elliott Hughes93e74e82011-09-13 11:07:03 -0700462
463 // Let the child know when it's safe to start running.
464 Runtime::Current()->GetThreadList()->SignalGo(native_thread);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700465}
466
Elliott Hughes93e74e82011-09-13 11:07:03 -0700467void Thread::Attach(const Runtime* runtime) {
468 InitCpu();
469 InitFunctionPointers();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700470
Elliott Hughes93e74e82011-09-13 11:07:03 -0700471 thin_lock_id_ = Runtime::Current()->GetThreadList()->AllocThreadId();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700472
Elliott Hughes93e74e82011-09-13 11:07:03 -0700473 tid_ = ::art::GetTid();
474 pthread_ = pthread_self();
Elliott Hughesbe759c62011-09-08 19:38:21 -0700475
Elliott Hughes93e74e82011-09-13 11:07:03 -0700476 InitStackHwm();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700477
Elliott Hughes8d768a92011-09-14 16:35:25 -0700478 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach");
Elliott Hughesa5780da2011-07-17 11:39:39 -0700479
Elliott Hughes93e74e82011-09-13 11:07:03 -0700480 jni_env_ = new JNIEnvExt(this, runtime->GetJavaVM());
Elliott Hughes330304d2011-08-12 14:28:05 -0700481
Elliott Hughes93e74e82011-09-13 11:07:03 -0700482 runtime->GetThreadList()->Register(this);
483}
484
485Thread* Thread::Attach(const Runtime* runtime, const char* name, bool as_daemon) {
486 Thread* self = new Thread;
487 self->Attach(runtime);
488
489 self->SetState(Thread::kRunnable);
490
491 SetThreadName(name);
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700492
493 // If we're the main thread, ClassLinker won't be created until after we're attached,
494 // so that thread needs a two-stage attach. Regular threads don't need this hack.
495 if (self->thin_lock_id_ != ThreadList::kMainId) {
496 self->CreatePeer(name, as_daemon);
497 }
498
499 return self;
500}
501
Elliott Hughesd369bb72011-09-12 14:41:14 -0700502jobject GetWellKnownThreadGroup(JNIEnv* env, const char* field_name) {
503 jclass thread_group_class = env->FindClass("java/lang/ThreadGroup");
504 jfieldID fid = env->GetStaticFieldID(thread_group_class, field_name, "Ljava/lang/ThreadGroup;");
505 jobject thread_group = env->GetStaticObjectField(thread_group_class, fid);
506 // This will be null in the compiler (and tests), but never in a running system.
507 //CHECK(thread_group != NULL) << "java.lang.ThreadGroup." << field_name << " not initialized";
508 return thread_group;
509}
510
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700511void Thread::CreatePeer(const char* name, bool as_daemon) {
512 ScopedThreadStateChange tsc(Thread::Current(), Thread::kNative);
513
514 JNIEnv* env = jni_env_;
515
Elliott Hughesd369bb72011-09-12 14:41:14 -0700516 const char* field_name = (GetThinLockId() == ThreadList::kMainId) ? "mMain" : "mSystem";
517 jobject thread_group = GetWellKnownThreadGroup(env, field_name);
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700518 jobject thread_name = env->NewStringUTF(name);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700519 jint thread_priority = GetNativePriority();
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700520 jboolean thread_is_daemon = as_daemon;
521
522 jclass c = env->FindClass("java/lang/Thread");
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700523 jmethodID mid = env->GetMethodID(c, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700524
Elliott Hughes8daa0922011-09-11 13:46:25 -0700525 jobject peer = env->NewObject(c, mid, thread_group, thread_name, thread_priority, thread_is_daemon);
Elliott Hughesd369bb72011-09-12 14:41:14 -0700526
527 // Because we mostly run without code available (in the compiler, in tests), we
528 // manually assign the fields the constructor should have set.
529 // TODO: lose this.
530 jfieldID fid;
531 fid = env->GetFieldID(c, "group", "Ljava/lang/ThreadGroup;");
532 env->SetObjectField(peer, fid, thread_group);
533 fid = env->GetFieldID(c, "name", "Ljava/lang/String;");
534 env->SetObjectField(peer, fid, thread_name);
535 fid = env->GetFieldID(c, "priority", "I");
536 env->SetIntField(peer, fid, thread_priority);
537 fid = env->GetFieldID(c, "daemon", "Z");
538 env->SetBooleanField(peer, fid, thread_is_daemon);
539
540 peer_ = DecodeJObject(peer);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700541}
542
Elliott Hughesbe759c62011-09-08 19:38:21 -0700543void Thread::InitStackHwm() {
544 pthread_attr_t attributes;
Elliott Hughes8d768a92011-09-14 16:35:25 -0700545 CHECK_PTHREAD_CALL(pthread_getattr_np, (pthread_, &attributes), __FUNCTION__);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700546
Elliott Hughesbe759c62011-09-08 19:38:21 -0700547 void* stack_base;
548 size_t stack_size;
Elliott Hughes8d768a92011-09-14 16:35:25 -0700549 CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, &stack_base, &stack_size), __FUNCTION__);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700550
Elliott Hughesbe759c62011-09-08 19:38:21 -0700551 if (stack_size <= kStackOverflowReservedBytes) {
552 LOG(FATAL) << "attempt to attach a thread with a too-small stack (" << stack_size << " bytes)";
553 }
Elliott Hughes449b4bd2011-09-09 12:01:38 -0700554
555 // stack_base is the "lowest addressable byte" of the stack.
556 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
557 // to throw a StackOverflowError.
buzbeecefd1872011-09-09 09:59:52 -0700558 stack_end_ = reinterpret_cast<byte*>(stack_base) + kStackOverflowReservedBytes;
Elliott Hughes449b4bd2011-09-09 12:01:38 -0700559
560 // Sanity check.
561 int stack_variable;
562 CHECK_GT(&stack_variable, (void*) stack_end_);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700563
Elliott Hughes8d768a92011-09-14 16:35:25 -0700564 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700565}
566
Elliott Hughesa0957642011-09-02 14:27:33 -0700567void Thread::Dump(std::ostream& os) const {
Elliott Hughesd92bec42011-09-02 17:04:36 -0700568 DumpState(os);
569 DumpStack(os);
Elliott Hughesa0957642011-09-02 14:27:33 -0700570}
571
Elliott Hughesd92bec42011-09-02 17:04:36 -0700572std::string GetSchedulerGroup(pid_t tid) {
573 // /proc/<pid>/group looks like this:
574 // 2:devices:/
575 // 1:cpuacct,cpu:/
576 // We want the third field from the line whose second field contains the "cpu" token.
577 std::string cgroup_file;
578 if (!ReadFileToString("/proc/self/cgroup", &cgroup_file)) {
579 return "";
580 }
581 std::vector<std::string> cgroup_lines;
582 Split(cgroup_file, '\n', cgroup_lines);
583 for (size_t i = 0; i < cgroup_lines.size(); ++i) {
584 std::vector<std::string> cgroup_fields;
585 Split(cgroup_lines[i], ':', cgroup_fields);
586 std::vector<std::string> cgroups;
587 Split(cgroup_fields[1], ',', cgroups);
588 for (size_t i = 0; i < cgroups.size(); ++i) {
589 if (cgroups[i] == "cpu") {
590 return cgroup_fields[2].substr(1); // Skip the leading slash.
591 }
592 }
593 }
594 return "";
595}
596
597void Thread::DumpState(std::ostream& os) const {
Elliott Hughesd369bb72011-09-12 14:41:14 -0700598 std::string thread_name("<native thread without managed peer>");
599 std::string group_name;
600 int priority;
601 bool is_daemon = false;
Elliott Hughesdcc24742011-09-07 14:02:44 -0700602
Elliott Hughesd369bb72011-09-12 14:41:14 -0700603 if (peer_ != NULL) {
Elliott Hughes038a8062011-09-18 14:12:41 -0700604 String* thread_name_string = reinterpret_cast<String*>(gThread_name->GetObject(peer_));
Elliott Hughesd369bb72011-09-12 14:41:14 -0700605 thread_name = (thread_name_string != NULL) ? thread_name_string->ToModifiedUtf8() : "<null>";
Elliott Hughes038a8062011-09-18 14:12:41 -0700606 priority = gThread_priority->GetInt(peer_);
607 is_daemon = gThread_daemon->GetBoolean(peer_);
Elliott Hughesd369bb72011-09-12 14:41:14 -0700608
Elliott Hughes038a8062011-09-18 14:12:41 -0700609 Object* thread_group = gThread_group->GetObject(peer_);
Elliott Hughesd369bb72011-09-12 14:41:14 -0700610 if (thread_group != NULL) {
Elliott Hughes038a8062011-09-18 14:12:41 -0700611 String* group_name_string = reinterpret_cast<String*>(gThreadGroup_name->GetObject(thread_group));
Elliott Hughesd369bb72011-09-12 14:41:14 -0700612 group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>";
613 }
614 } else {
615 // This name may be truncated, but it's the best we can do in the absence of a managed peer.
Elliott Hughesdcc24742011-09-07 14:02:44 -0700616 std::string stats;
617 if (ReadFileToString(StringPrintf("/proc/self/task/%d/stat", GetTid()).c_str(), &stats)) {
618 size_t start = stats.find('(') + 1;
619 size_t end = stats.find(')') - start;
620 thread_name = stats.substr(start, end);
621 }
Elliott Hughesd369bb72011-09-12 14:41:14 -0700622 priority = GetNativePriority();
Elliott Hughesdcc24742011-09-07 14:02:44 -0700623 }
Elliott Hughesd92bec42011-09-02 17:04:36 -0700624
625 int policy;
626 sched_param sp;
Elliott Hughes8d768a92011-09-14 16:35:25 -0700627 CHECK_PTHREAD_CALL(pthread_getschedparam, (pthread_, &policy, &sp), __FUNCTION__);
Elliott Hughesd92bec42011-09-02 17:04:36 -0700628
629 std::string scheduler_group(GetSchedulerGroup(GetTid()));
630 if (scheduler_group.empty()) {
631 scheduler_group = "default";
632 }
633
Elliott Hughesd92bec42011-09-02 17:04:36 -0700634 os << '"' << thread_name << '"';
Elliott Hughesd369bb72011-09-12 14:41:14 -0700635 if (is_daemon) {
Elliott Hughesd92bec42011-09-02 17:04:36 -0700636 os << " daemon";
637 }
638 os << " prio=" << priority
Elliott Hughesdcc24742011-09-07 14:02:44 -0700639 << " tid=" << GetThinLockId()
Elliott Hughes93e74e82011-09-13 11:07:03 -0700640 << " " << GetState() << "\n";
Elliott Hughesd92bec42011-09-02 17:04:36 -0700641
Elliott Hughesd92bec42011-09-02 17:04:36 -0700642 int debug_suspend_count = 0; // TODO
Elliott Hughesd92bec42011-09-02 17:04:36 -0700643 os << " | group=\"" << group_name << "\""
Elliott Hughes8d768a92011-09-14 16:35:25 -0700644 << " sCount=" << suspend_count_
Elliott Hughesd92bec42011-09-02 17:04:36 -0700645 << " dsCount=" << debug_suspend_count
Elliott Hughesdcc24742011-09-07 14:02:44 -0700646 << " obj=" << reinterpret_cast<void*>(peer_)
Elliott Hughesd92bec42011-09-02 17:04:36 -0700647 << " self=" << reinterpret_cast<const void*>(this) << "\n";
648 os << " | sysTid=" << GetTid()
649 << " nice=" << getpriority(PRIO_PROCESS, GetTid())
650 << " sched=" << policy << "/" << sp.sched_priority
651 << " cgrp=" << scheduler_group
652 << " handle=" << GetImpl() << "\n";
653
654 // Grab the scheduler stats for this thread.
655 std::string scheduler_stats;
656 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", GetTid()).c_str(), &scheduler_stats)) {
657 scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'.
658 } else {
659 scheduler_stats = "0 0 0";
660 }
661
662 int utime = 0;
663 int stime = 0;
664 int task_cpu = 0;
665 std::string stats;
666 if (ReadFileToString(StringPrintf("/proc/self/task/%d/stat", GetTid()).c_str(), &stats)) {
667 // Skip the command, which may contain spaces.
668 stats = stats.substr(stats.find(')') + 2);
669 // Extract the three fields we care about.
670 std::vector<std::string> fields;
671 Split(stats, ' ', fields);
672 utime = strtoull(fields[11].c_str(), NULL, 10);
673 stime = strtoull(fields[12].c_str(), NULL, 10);
674 task_cpu = strtoull(fields[36].c_str(), NULL, 10);
675 }
676
677 os << " | schedstat=( " << scheduler_stats << " )"
678 << " utm=" << utime
679 << " stm=" << stime
680 << " core=" << task_cpu
681 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
682}
683
Elliott Hughesd369bb72011-09-12 14:41:14 -0700684struct StackDumpVisitor : public Thread::StackVisitor {
685 StackDumpVisitor(std::ostream& os) : os(os) {
686 }
687
Ian Rogersbdb03912011-09-14 00:55:44 -0700688 virtual ~StackDumpVisitor() {
Elliott Hughesd369bb72011-09-12 14:41:14 -0700689 }
690
Ian Rogersbdb03912011-09-14 00:55:44 -0700691 void VisitFrame(const Frame& frame, uintptr_t pc) {
Elliott Hughesd369bb72011-09-12 14:41:14 -0700692 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
693
694 Method* m = frame.GetMethod();
695 Class* c = m->GetDeclaringClass();
696 const DexFile& dex_file = class_linker->FindDexFile(c->GetDexCache());
697
698 os << " at " << PrettyMethod(m, false);
699 if (m->IsNative()) {
700 os << "(Native method)";
701 } else {
Ian Rogersbdb03912011-09-14 00:55:44 -0700702 int line_number = dex_file.GetLineNumFromPC(m, m->ToDexPC(pc));
Elliott Hughesd369bb72011-09-12 14:41:14 -0700703 os << "(" << c->GetSourceFile()->ToModifiedUtf8() << ":" << line_number << ")";
704 }
705 os << "\n";
706 }
707
708 std::ostream& os;
709};
710
Elliott Hughesd92bec42011-09-02 17:04:36 -0700711void Thread::DumpStack(std::ostream& os) const {
Elliott Hughesd369bb72011-09-12 14:41:14 -0700712 StackDumpVisitor dumper(os);
713 WalkStack(&dumper);
Elliott Hughese27955c2011-08-26 15:21:24 -0700714}
715
Elliott Hughes8d768a92011-09-14 16:35:25 -0700716Thread::State Thread::SetState(Thread::State new_state) {
717 Thread::State old_state = state_;
718 if (old_state == new_state) {
719 return old_state;
720 }
721
722 volatile void* raw = reinterpret_cast<volatile void*>(&state_);
723 volatile int32_t* addr = reinterpret_cast<volatile int32_t*>(raw);
724
725 if (new_state == Thread::kRunnable) {
726 /*
727 * Change our status to Thread::kRunnable. The transition requires
728 * that we check for pending suspension, because the VM considers
729 * us to be "asleep" in all other states, and another thread could
730 * be performing a GC now.
731 *
732 * The order of operations is very significant here. One way to
733 * do this wrong is:
734 *
735 * GCing thread Our thread (in kNative)
736 * ------------ ----------------------
737 * check suspend count (== 0)
738 * SuspendAllThreads()
739 * grab suspend-count lock
740 * increment all suspend counts
741 * release suspend-count lock
742 * check thread state (== kNative)
743 * all are suspended, begin GC
744 * set state to kRunnable
745 * (continue executing)
746 *
747 * We can correct this by grabbing the suspend-count lock and
748 * performing both of our operations (check suspend count, set
749 * state) while holding it, now we need to grab a mutex on every
750 * transition to kRunnable.
751 *
752 * What we do instead is change the order of operations so that
753 * the transition to kRunnable happens first. If we then detect
754 * that the suspend count is nonzero, we switch to kSuspended.
755 *
756 * Appropriate compiler and memory barriers are required to ensure
757 * that the operations are observed in the expected order.
758 *
759 * This does create a small window of opportunity where a GC in
760 * progress could observe what appears to be a running thread (if
761 * it happens to look between when we set to kRunnable and when we
762 * switch to kSuspended). At worst this only affects assertions
763 * and thread logging. (We could work around it with some sort
764 * of intermediate "pre-running" state that is generally treated
765 * as equivalent to running, but that doesn't seem worthwhile.)
766 *
767 * We can also solve this by combining the "status" and "suspend
768 * count" fields into a single 32-bit value. This trades the
769 * store/load barrier on transition to kRunnable for an atomic RMW
770 * op on all transitions and all suspend count updates (also, all
771 * accesses to status or the thread count require bit-fiddling).
772 * It also eliminates the brief transition through kRunnable when
773 * the thread is supposed to be suspended. This is possibly faster
774 * on SMP and slightly more correct, but less convenient.
775 */
776 android_atomic_acquire_store(new_state, addr);
777 if (ANNOTATE_UNPROTECTED_READ(suspend_count_) != 0) {
778 Runtime::Current()->GetThreadList()->FullSuspendCheck(this);
779 }
780 } else {
781 /*
782 * Not changing to Thread::kRunnable. No additional work required.
783 *
784 * We use a releasing store to ensure that, if we were runnable,
785 * any updates we previously made to objects on the managed heap
786 * will be observed before the state change.
787 */
788 android_atomic_release_store(new_state, addr);
789 }
790
791 return old_state;
792}
793
794void Thread::WaitUntilSuspended() {
795 // TODO: dalvik dropped the waiting thread's priority after a while.
796 // TODO: dalvik timed out and aborted.
797 useconds_t delay = 0;
798 while (GetState() == Thread::kRunnable) {
799 useconds_t new_delay = delay * 2;
800 CHECK_GE(new_delay, delay);
801 delay = new_delay;
802 if (delay == 0) {
803 sched_yield();
804 delay = 10000;
805 } else {
806 usleep(delay);
807 }
808 }
809}
810
Elliott Hughesbe759c62011-09-08 19:38:21 -0700811void Thread::ThreadExitCallback(void* arg) {
812 Thread* self = reinterpret_cast<Thread*>(arg);
813 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
Carl Shapirob5573532011-07-12 18:22:59 -0700814}
815
Elliott Hughesbe759c62011-09-08 19:38:21 -0700816void Thread::Startup() {
Carl Shapirob5573532011-07-12 18:22:59 -0700817 // Allocate a TLS slot.
Elliott Hughes8d768a92011-09-14 16:35:25 -0700818 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
Carl Shapirob5573532011-07-12 18:22:59 -0700819
820 // Double-check the TLS slot allocation.
821 if (pthread_getspecific(pthread_key_self_) != NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700822 LOG(FATAL) << "newly-created pthread TLS slot is not NULL";
Carl Shapirob5573532011-07-12 18:22:59 -0700823 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700824}
Carl Shapirob5573532011-07-12 18:22:59 -0700825
Elliott Hughes038a8062011-09-18 14:12:41 -0700826void Thread::FinishStartup() {
827 // Finish attaching the main thread.
828 Thread::Current()->CreatePeer("main", false);
829
830 // Now the ClassLinker is ready, we can find the various Class*, Field*, and Method*s we need.
831 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
832 Class* boolean_class = class_linker->FindPrimitiveClass('Z');
833 Class* int_class = class_linker->FindPrimitiveClass('I');
834 Class* String_class = class_linker->FindSystemClass("Ljava/lang/String;");
835 Class* Thread_class = class_linker->FindSystemClass("Ljava/lang/Thread;");
836 Class* ThreadGroup_class = class_linker->FindSystemClass("Ljava/lang/ThreadGroup;");
837 Class* ThreadLock_class = class_linker->FindSystemClass("Ljava/lang/ThreadLock;");
838 gThread_daemon = Thread_class->FindDeclaredInstanceField("daemon", boolean_class);
839 gThread_group = Thread_class->FindDeclaredInstanceField("group", ThreadGroup_class);
840 gThread_lock = Thread_class->FindDeclaredInstanceField("lock", ThreadLock_class);
841 gThread_name = Thread_class->FindDeclaredInstanceField("name", String_class);
842 gThread_priority = Thread_class->FindDeclaredInstanceField("priority", int_class);
843 gThread_run = Thread_class->FindVirtualMethod("run", "()V");
844 gThread_vmData = Thread_class->FindDeclaredInstanceField("vmData", int_class);
845 gThreadGroup_name = ThreadGroup_class->FindDeclaredInstanceField("name", String_class);
Carl Shapirob5573532011-07-12 18:22:59 -0700846}
847
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700848void Thread::Shutdown() {
Elliott Hughes8d768a92011-09-14 16:35:25 -0700849 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700850}
851
Elliott Hughesdcc24742011-09-07 14:02:44 -0700852Thread::Thread()
Elliott Hughes02b48d12011-09-07 17:15:51 -0700853 : peer_(NULL),
Elliott Hughes85d15452011-09-16 17:33:01 -0700854 wait_mutex_(new Mutex("Thread wait mutex")),
855 wait_cond_(new ConditionVariable("Thread wait condition variable")),
Elliott Hughes8daa0922011-09-11 13:46:25 -0700856 wait_monitor_(NULL),
857 interrupted_(false),
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700858 wait_next_(NULL),
859 card_table_(0),
Elliott Hughes8daa0922011-09-11 13:46:25 -0700860 stack_end_(NULL),
Elliott Hughesdcc24742011-09-07 14:02:44 -0700861 top_of_managed_stack_(),
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700862 top_of_managed_stack_pc_(0),
Elliott Hughesdcc24742011-09-07 14:02:44 -0700863 native_to_managed_record_(NULL),
864 top_sirt_(NULL),
865 jni_env_(NULL),
Elliott Hughes93e74e82011-09-13 11:07:03 -0700866 state_(Thread::kUnknown),
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700867 self_(NULL),
868 runtime_(NULL),
Elliott Hughesdcc24742011-09-07 14:02:44 -0700869 exception_(NULL),
870 suspend_count_(0),
Elliott Hughes85d15452011-09-16 17:33:01 -0700871 class_loader_override_(NULL),
872 long_jump_context_(NULL) {
Elliott Hughesdcc24742011-09-07 14:02:44 -0700873}
874
Elliott Hughes02b48d12011-09-07 17:15:51 -0700875void MonitorExitVisitor(const Object* object, void*) {
876 Object* entered_monitor = const_cast<Object*>(object);
Elliott Hughes5f791332011-09-15 17:45:30 -0700877 entered_monitor->MonitorExit(Thread::Current());
Elliott Hughes02b48d12011-09-07 17:15:51 -0700878}
879
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700880Thread::~Thread() {
Elliott Hughes02b48d12011-09-07 17:15:51 -0700881 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
Elliott Hughes93e74e82011-09-13 11:07:03 -0700882 if (jni_env_ != NULL) {
883 jni_env_->monitors.VisitRoots(MonitorExitVisitor, NULL);
884 }
Elliott Hughes02b48d12011-09-07 17:15:51 -0700885
886 if (IsExceptionPending()) {
887 UNIMPLEMENTED(FATAL) << "threadExitUncaughtException()";
888 }
889
890 // TODO: ThreadGroup.removeThread(this);
891
Elliott Hughes93e74e82011-09-13 11:07:03 -0700892 if (peer_ != NULL) {
893 SetVmData(peer_, NULL);
894 }
Elliott Hughes02b48d12011-09-07 17:15:51 -0700895
896 // TODO: say "bye" to the debugger.
897 //if (gDvm.debuggerConnected) {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700898 // dvmDbgPostThreadDeath(self);
Elliott Hughes02b48d12011-09-07 17:15:51 -0700899 //}
900
901 // Thread.join() is implemented as an Object.wait() on the Thread.lock
902 // object. Signal anyone who is waiting.
Elliott Hughes5f791332011-09-15 17:45:30 -0700903 if (peer_ != NULL) {
Elliott Hughes5f791332011-09-15 17:45:30 -0700904 Thread* self = Thread::Current();
Elliott Hughes038a8062011-09-18 14:12:41 -0700905 Object* lock = gThread_lock->GetObject(peer_);
906 // (This conditional is only needed for tests, where Thread.lock won't have been set.)
Elliott Hughes5f791332011-09-15 17:45:30 -0700907 if (lock != NULL) {
908 lock->MonitorEnter(self);
909 lock->NotifyAll();
910 lock->MonitorExit(self);
911 }
912 }
Elliott Hughes02b48d12011-09-07 17:15:51 -0700913
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700914 delete jni_env_;
Elliott Hughes02b48d12011-09-07 17:15:51 -0700915 jni_env_ = NULL;
916
917 SetState(Thread::kTerminated);
Elliott Hughes85d15452011-09-16 17:33:01 -0700918
919 delete wait_cond_;
920 delete wait_mutex_;
921
922 delete long_jump_context_;
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700923}
924
Ian Rogers408f79a2011-08-23 18:22:33 -0700925size_t Thread::NumSirtReferences() {
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700926 size_t count = 0;
Ian Rogers408f79a2011-08-23 18:22:33 -0700927 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->Link()) {
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700928 count += cur->NumberOfReferences();
929 }
930 return count;
931}
932
Ian Rogers408f79a2011-08-23 18:22:33 -0700933bool Thread::SirtContains(jobject obj) {
934 Object** sirt_entry = reinterpret_cast<Object**>(obj);
935 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->Link()) {
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700936 size_t num_refs = cur->NumberOfReferences();
Ian Rogers408f79a2011-08-23 18:22:33 -0700937 // A SIRT should always have a jobject/jclass as a native method is passed
938 // in a this pointer or a class
939 DCHECK_GT(num_refs, 0u);
Shih-wei Liao2f0ce9d2011-09-01 02:07:58 -0700940 if ((&cur->References()[0] <= sirt_entry) &&
941 (sirt_entry <= (&cur->References()[num_refs - 1]))) {
Ian Rogersa8cd9f42011-08-19 16:43:41 -0700942 return true;
943 }
944 }
945 return false;
946}
947
Ian Rogers67375ac2011-09-14 00:55:44 -0700948void Thread::PopSirt() {
949 CHECK(top_sirt_ != NULL);
950 top_sirt_ = top_sirt_->Link();
951}
952
Ian Rogers408f79a2011-08-23 18:22:33 -0700953Object* Thread::DecodeJObject(jobject obj) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700954 DCHECK(CanAccessDirectReferences());
Ian Rogers408f79a2011-08-23 18:22:33 -0700955 if (obj == NULL) {
956 return NULL;
957 }
958 IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
959 IndirectRefKind kind = GetIndirectRefKind(ref);
960 Object* result;
961 switch (kind) {
962 case kLocal:
963 {
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700964 IndirectReferenceTable& locals = jni_env_->locals;
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700965 result = const_cast<Object*>(locals.Get(ref));
Ian Rogers408f79a2011-08-23 18:22:33 -0700966 break;
967 }
968 case kGlobal:
969 {
970 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
971 IndirectReferenceTable& globals = vm->globals;
972 MutexLock mu(vm->globals_lock);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700973 result = const_cast<Object*>(globals.Get(ref));
Ian Rogers408f79a2011-08-23 18:22:33 -0700974 break;
975 }
976 case kWeakGlobal:
977 {
978 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
979 IndirectReferenceTable& weak_globals = vm->weak_globals;
980 MutexLock mu(vm->weak_globals_lock);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700981 result = const_cast<Object*>(weak_globals.Get(ref));
Ian Rogers408f79a2011-08-23 18:22:33 -0700982 if (result == kClearedJniWeakGlobal) {
983 // This is a special case where it's okay to return NULL.
984 return NULL;
985 }
986 break;
987 }
988 case kSirtOrInvalid:
989 default:
990 // TODO: make stack indirect reference table lookup more efficient
991 // Check if this is a local reference in the SIRT
992 if (SirtContains(obj)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700993 result = *reinterpret_cast<Object**>(obj); // Read from SIRT
Elliott Hughesc5bfa8f2011-08-30 14:32:49 -0700994 } else if (jni_env_->work_around_app_jni_bugs) {
Ian Rogers408f79a2011-08-23 18:22:33 -0700995 // Assume an invalid local reference is actually a direct pointer.
996 result = reinterpret_cast<Object*>(obj);
997 } else {
Elliott Hughesa2501992011-08-26 19:39:54 -0700998 result = kInvalidIndirectRefObject;
Ian Rogers408f79a2011-08-23 18:22:33 -0700999 }
1000 }
1001
1002 if (result == NULL) {
Elliott Hughesa2501992011-08-26 19:39:54 -07001003 LOG(ERROR) << "JNI ERROR (app bug): use of deleted " << kind << ": " << obj;
1004 JniAbort(NULL);
1005 } else {
1006 if (result != kInvalidIndirectRefObject) {
1007 Heap::VerifyObject(result);
1008 }
Ian Rogers408f79a2011-08-23 18:22:33 -07001009 }
Ian Rogers408f79a2011-08-23 18:22:33 -07001010 return result;
1011}
1012
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001013class CountStackDepthVisitor : public Thread::StackVisitor {
1014 public:
Ian Rogersaaa20802011-09-11 21:47:37 -07001015 CountStackDepthVisitor() : depth_(0) {}
Elliott Hughesd369bb72011-09-12 14:41:14 -07001016
Ian Rogersbdb03912011-09-14 00:55:44 -07001017 virtual void VisitFrame(const Frame&, uintptr_t pc) {
Ian Rogersaaa20802011-09-11 21:47:37 -07001018 ++depth_;
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001019 }
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001020
1021 int GetDepth() const {
Ian Rogersaaa20802011-09-11 21:47:37 -07001022 return depth_;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001023 }
1024
1025 private:
Ian Rogersaaa20802011-09-11 21:47:37 -07001026 uint32_t depth_;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001027};
1028
Ian Rogersaaa20802011-09-11 21:47:37 -07001029//
1030class BuildInternalStackTraceVisitor : public Thread::StackVisitor {
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001031 public:
Ian Rogersaaa20802011-09-11 21:47:37 -07001032 explicit BuildInternalStackTraceVisitor(int depth, ScopedJniThreadState& ts) : count_(0) {
1033 // Allocate method trace with an extra slot that will hold the PC trace
1034 method_trace_ = Runtime::Current()->GetClassLinker()->
1035 AllocObjectArray<Object>(depth + 1);
1036 // Register a local reference as IntArray::Alloc may trigger GC
1037 local_ref_ = AddLocalReference<jobject>(ts.Env(), method_trace_);
1038 pc_trace_ = IntArray::Alloc(depth);
1039#ifdef MOVING_GARBAGE_COLLECTOR
1040 // Re-read after potential GC
1041 method_trace = Decode<ObjectArray<Object>*>(ts.Env(), local_ref_);
1042#endif
1043 // Save PC trace in last element of method trace, also places it into the
1044 // object graph.
1045 method_trace_->Set(depth, pc_trace_);
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001046 }
1047
Ian Rogersaaa20802011-09-11 21:47:37 -07001048 virtual ~BuildInternalStackTraceVisitor() {}
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001049
Ian Rogersbdb03912011-09-14 00:55:44 -07001050 virtual void VisitFrame(const Frame& frame, uintptr_t pc) {
Ian Rogersaaa20802011-09-11 21:47:37 -07001051 method_trace_->Set(count_, frame.GetMethod());
Ian Rogersbdb03912011-09-14 00:55:44 -07001052 pc_trace_->Set(count_, pc);
Ian Rogersaaa20802011-09-11 21:47:37 -07001053 ++count_;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001054 }
1055
Ian Rogersaaa20802011-09-11 21:47:37 -07001056 jobject GetInternalStackTrace() const {
1057 return local_ref_;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001058 }
1059
1060 private:
Ian Rogersaaa20802011-09-11 21:47:37 -07001061 // Current position down stack trace
1062 uint32_t count_;
1063 // Array of return PC values
1064 IntArray* pc_trace_;
1065 // An array of the methods on the stack, the last entry is a reference to the
1066 // PC trace
1067 ObjectArray<Object>* method_trace_;
1068 // Local indirect reference table entry for method trace
1069 jobject local_ref_;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001070};
1071
Ian Rogersaaa20802011-09-11 21:47:37 -07001072void Thread::WalkStack(StackVisitor* visitor) const {
Elliott Hughesd369bb72011-09-12 14:41:14 -07001073 Frame frame = GetTopOfStack();
Ian Rogersbdb03912011-09-14 00:55:44 -07001074 uintptr_t pc = top_of_managed_stack_pc_;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001075 // TODO: enable this CHECK after native_to_managed_record_ is initialized during startup.
1076 // CHECK(native_to_managed_record_ != NULL);
1077 NativeToManagedRecord* record = native_to_managed_record_;
1078
Ian Rogersbdb03912011-09-14 00:55:44 -07001079 while (frame.GetSP() != 0) {
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001080 for ( ; frame.GetMethod() != 0; frame.Next()) {
Ian Rogersbdb03912011-09-14 00:55:44 -07001081 DCHECK(frame.GetMethod()->IsWithinCode(pc));
1082 visitor->VisitFrame(frame, pc);
1083 pc = frame.GetReturnPC();
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001084 }
1085 if (record == NULL) {
1086 break;
1087 }
Ian Rogersbdb03912011-09-14 00:55:44 -07001088 // last_tos should return Frame instead of sp?
1089 frame.SetSP(reinterpret_cast<art::Method**>(record->last_top_of_managed_stack_));
1090 pc = record->last_top_of_managed_stack_pc_;
1091 record = record->link_;
1092 }
1093}
1094
Ian Rogers67375ac2011-09-14 00:55:44 -07001095void Thread::WalkStackUntilUpCall(StackVisitor* visitor, bool include_upcall) const {
Ian Rogersbdb03912011-09-14 00:55:44 -07001096 Frame frame = GetTopOfStack();
1097 uintptr_t pc = top_of_managed_stack_pc_;
1098
1099 if (frame.GetSP() != 0) {
1100 for ( ; frame.GetMethod() != 0; frame.Next()) {
Ian Rogers67375ac2011-09-14 00:55:44 -07001101 DCHECK(frame.GetMethod()->IsWithinCode(pc));
Ian Rogersbdb03912011-09-14 00:55:44 -07001102 visitor->VisitFrame(frame, pc);
1103 pc = frame.GetReturnPC();
1104 }
Ian Rogers67375ac2011-09-14 00:55:44 -07001105 if (include_upcall) {
1106 visitor->VisitFrame(frame, pc);
1107 }
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001108 }
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001109}
1110
Ian Rogersaaa20802011-09-11 21:47:37 -07001111jobject Thread::CreateInternalStackTrace() const {
1112 // Compute depth of stack
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001113 CountStackDepthVisitor count_visitor;
1114 WalkStack(&count_visitor);
1115 int32_t depth = count_visitor.GetDepth();
Shih-wei Liao44175362011-08-28 16:59:17 -07001116
Ian Rogersaaa20802011-09-11 21:47:37 -07001117 // Transition into runnable state to work on Object*/Array*
1118 ScopedJniThreadState ts(jni_env_);
1119
1120 // Build internal stack trace
1121 BuildInternalStackTraceVisitor build_trace_visitor(depth, ts);
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001122 WalkStack(&build_trace_visitor);
Shih-wei Liao44175362011-08-28 16:59:17 -07001123
Ian Rogersaaa20802011-09-11 21:47:37 -07001124 return build_trace_visitor.GetInternalStackTrace();
1125}
1126
1127jobjectArray Thread::InternalStackTraceToStackTraceElementArray(jobject internal,
1128 JNIEnv* env) {
1129 // Transition into runnable state to work on Object*/Array*
1130 ScopedJniThreadState ts(env);
1131
1132 // Decode the internal stack trace into the depth, method trace and PC trace
1133 ObjectArray<Object>* method_trace =
1134 down_cast<ObjectArray<Object>*>(Decode<Object*>(ts.Env(), internal));
1135 int32_t depth = method_trace->GetLength()-1;
1136 IntArray* pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1137
1138 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1139
1140 // Create java_trace array and place in local reference table
1141 ObjectArray<StackTraceElement>* java_traces =
1142 class_linker->AllocStackTraceElementArray(depth);
1143 jobjectArray result = AddLocalReference<jobjectArray>(ts.Env(), java_traces);
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001144
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001145 for (int32_t i = 0; i < depth; ++i) {
Ian Rogersaaa20802011-09-11 21:47:37 -07001146 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1147 Method* method = down_cast<Method*>(method_trace->Get(i));
1148 uint32_t native_pc = pc_trace->Get(i);
1149 Class* klass = method->GetDeclaringClass();
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001150 const DexFile& dex_file = class_linker->FindDexFile(klass->GetDexCache());
Elliott Hughes38933572011-09-16 12:29:03 -07001151 std::string class_name(PrettyDescriptor(klass->GetDescriptor()));
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001152
Ian Rogersaaa20802011-09-11 21:47:37 -07001153 // Allocate element, potentially triggering GC
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001154 StackTraceElement* obj =
Elliott Hughes38933572011-09-16 12:29:03 -07001155 StackTraceElement::Alloc(String::AllocFromModifiedUtf8(class_name.c_str()),
Shih-wei Liao44175362011-08-28 16:59:17 -07001156 method->GetName(),
Brian Carlstrom4b620ff2011-09-11 01:11:01 -07001157 klass->GetSourceFile(),
Shih-wei Liao44175362011-08-28 16:59:17 -07001158 dex_file.GetLineNumFromPC(method,
Ian Rogersaaa20802011-09-11 21:47:37 -07001159 method->ToDexPC(native_pc)));
1160#ifdef MOVING_GARBAGE_COLLECTOR
1161 // Re-read after potential GC
1162 java_traces = Decode<ObjectArray<Object>*>(ts.Env(), result);
1163 method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(ts.Env(), internal));
1164 pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1165#endif
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001166 java_traces->Set(i, obj);
1167 }
Ian Rogersaaa20802011-09-11 21:47:37 -07001168 return result;
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001169}
1170
Elliott Hughese5b0dc82011-08-23 09:59:02 -07001171void Thread::ThrowNewException(const char* exception_class_descriptor, const char* fmt, ...) {
Elliott Hughes37f7a402011-08-22 18:56:01 -07001172 std::string msg;
Elliott Hughesa5b897e2011-08-16 11:33:06 -07001173 va_list args;
1174 va_start(args, fmt);
Elliott Hughes37f7a402011-08-22 18:56:01 -07001175 StringAppendV(&msg, fmt, args);
Elliott Hughesa5b897e2011-08-16 11:33:06 -07001176 va_end(args);
Elliott Hughes37f7a402011-08-22 18:56:01 -07001177
Elliott Hughese5b0dc82011-08-23 09:59:02 -07001178 // Convert "Ljava/lang/Exception;" into JNI-style "java/lang/Exception".
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001179 CHECK_EQ('L', exception_class_descriptor[0]);
Elliott Hughese5b0dc82011-08-23 09:59:02 -07001180 std::string descriptor(exception_class_descriptor + 1);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001181 CHECK_EQ(';', descriptor[descriptor.length() - 1]);
Elliott Hughese5b0dc82011-08-23 09:59:02 -07001182 descriptor.erase(descriptor.length() - 1);
1183
1184 JNIEnv* env = GetJniEnv();
1185 jclass exception_class = env->FindClass(descriptor.c_str());
1186 CHECK(exception_class != NULL) << "descriptor=\"" << descriptor << "\"";
1187 int rc = env->ThrowNew(exception_class, msg.c_str());
1188 CHECK_EQ(rc, JNI_OK);
Elliott Hughesa5b897e2011-08-16 11:33:06 -07001189}
1190
Elliott Hughes79082e32011-08-25 12:07:32 -07001191void Thread::ThrowOutOfMemoryError() {
1192 UNIMPLEMENTED(FATAL);
1193}
1194
Ian Rogersbdb03912011-09-14 00:55:44 -07001195Method* Thread::CalleeSaveMethod() const {
1196 // TODO: we should only allocate this once
Ian Rogersbdb03912011-09-14 00:55:44 -07001197 Method* method = Runtime::Current()->GetClassLinker()->AllocMethod();
Ian Rogers67375ac2011-09-14 00:55:44 -07001198#if defined(__arm__)
Ian Rogersbdb03912011-09-14 00:55:44 -07001199 method->SetCode(NULL, art::kThumb2, NULL);
1200 method->SetFrameSizeInBytes(64);
1201 method->SetReturnPcOffsetInBytes(60);
Ian Rogers67375ac2011-09-14 00:55:44 -07001202 method->SetCoreSpillMask((1 << art::arm::R1) |
1203 (1 << art::arm::R2) |
1204 (1 << art::arm::R3) |
1205 (1 << art::arm::R4) |
1206 (1 << art::arm::R5) |
1207 (1 << art::arm::R6) |
1208 (1 << art::arm::R7) |
1209 (1 << art::arm::R8) |
1210 (1 << art::arm::R9) |
1211 (1 << art::arm::R10) |
1212 (1 << art::arm::R11) |
1213 (1 << art::arm::LR));
Ian Rogersbdb03912011-09-14 00:55:44 -07001214 method->SetFpSpillMask(0);
Ian Rogers67375ac2011-09-14 00:55:44 -07001215#elif defined(__i386__)
1216 method->SetCode(NULL, art::kX86, NULL);
1217 method->SetFrameSizeInBytes(32);
1218 method->SetReturnPcOffsetInBytes(28);
1219 method->SetCoreSpillMask((1 << art::x86::EBX) |
1220 (1 << art::x86::EBP) |
1221 (1 << art::x86::ESI) |
1222 (1 << art::x86::EDI));
1223 method->SetFpSpillMask(0);
1224#else
1225 UNIMPLEMENTED(FATAL);
1226#endif
Ian Rogersbdb03912011-09-14 00:55:44 -07001227 return method;
1228}
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001229
Ian Rogersbdb03912011-09-14 00:55:44 -07001230class CatchBlockStackVisitor : public Thread::StackVisitor {
1231 public:
1232 CatchBlockStackVisitor(Class* to_find, Context* ljc)
Ian Rogers67375ac2011-09-14 00:55:44 -07001233 : found_(false), to_find_(to_find), long_jump_context_(ljc), native_method_count_(0) {
1234#ifndef NDEBUG
1235 handler_pc_ = 0xEBADC0DE;
1236 handler_frame_.SetSP(reinterpret_cast<Method**>(0xEBADF00D));
1237#endif
1238 }
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001239
Ian Rogersbdb03912011-09-14 00:55:44 -07001240 virtual void VisitFrame(const Frame& fr, uintptr_t pc) {
1241 if (!found_) {
Ian Rogersbdb03912011-09-14 00:55:44 -07001242 Method* method = fr.GetMethod();
Ian Rogers67375ac2011-09-14 00:55:44 -07001243 if (method == NULL) {
1244 // This is the upcall, we remember the frame and last_pc so that we may
1245 // long jump to them
1246 handler_pc_ = pc;
1247 handler_frame_ = fr;
1248 return;
Ian Rogersbdb03912011-09-14 00:55:44 -07001249 }
Ian Rogers67375ac2011-09-14 00:55:44 -07001250 uint32_t dex_pc = DexFile::kDexNoIndex;
1251 if (pc > 0) {
1252 if (method->IsNative()) {
1253 native_method_count_++;
1254 } else {
1255 // Move the PC back 2 bytes as a call will frequently terminate the
1256 // decoding of a particular instruction and we want to make sure we
1257 // get the Dex PC of the instruction with the call and not the
1258 // instruction following.
1259 pc -= 2;
1260 dex_pc = method->ToDexPC(pc);
1261 }
1262 }
Ian Rogersbdb03912011-09-14 00:55:44 -07001263 if (dex_pc != DexFile::kDexNoIndex) {
1264 uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc);
1265 if (found_dex_pc != DexFile::kDexNoIndex) {
1266 found_ = true;
Ian Rogers67375ac2011-09-14 00:55:44 -07001267 handler_pc_ = method->ToNativePC(found_dex_pc);
1268 handler_frame_ = fr;
Ian Rogersbdb03912011-09-14 00:55:44 -07001269 }
1270 }
1271 if (!found_) {
1272 // Caller may be handler, fill in callee saves in context
1273 long_jump_context_->FillCalleeSaves(fr);
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001274 }
1275 }
1276 }
Ian Rogersbdb03912011-09-14 00:55:44 -07001277
1278 // Did we find a catch block yet?
1279 bool found_;
1280 // The type of the exception catch block to find
1281 Class* to_find_;
1282 // Frame with found handler or last frame if no handler found
1283 Frame handler_frame_;
Ian Rogers67375ac2011-09-14 00:55:44 -07001284 // PC to branch to for the handler
1285 uintptr_t handler_pc_;
Ian Rogersbdb03912011-09-14 00:55:44 -07001286 // Context that will be the target of the long jump
1287 Context* long_jump_context_;
Ian Rogers67375ac2011-09-14 00:55:44 -07001288 // Number of native methods passed in crawl (equates to number of SIRTs to pop)
1289 uint32_t native_method_count_;
Ian Rogersbdb03912011-09-14 00:55:44 -07001290};
1291
1292void Thread::DeliverException(Throwable* exception) {
1293 SetException(exception); // Set exception on thread
1294
1295 Context* long_jump_context = GetLongJumpContext();
1296 CatchBlockStackVisitor catch_finder(exception->GetClass(), long_jump_context);
Ian Rogers67375ac2011-09-14 00:55:44 -07001297 WalkStackUntilUpCall(&catch_finder, true);
Ian Rogersbdb03912011-09-14 00:55:44 -07001298
Ian Rogers67375ac2011-09-14 00:55:44 -07001299 // Pop any SIRT
1300 if (catch_finder.native_method_count_ == 1) {
1301 PopSirt();
Ian Rogersbdb03912011-09-14 00:55:44 -07001302 } else {
Ian Rogersad42e132011-09-17 20:23:33 -07001303 // We only expect the stack crawl to have passed 1 native method as it's terminated
1304 // by an up call
Ian Rogers67375ac2011-09-14 00:55:44 -07001305 DCHECK_EQ(catch_finder.native_method_count_, 0u);
Ian Rogersbdb03912011-09-14 00:55:44 -07001306 }
Ian Rogers67375ac2011-09-14 00:55:44 -07001307 long_jump_context->SetSP(reinterpret_cast<intptr_t>(catch_finder.handler_frame_.GetSP()));
1308 long_jump_context->SetPC(catch_finder.handler_pc_);
Ian Rogersbdb03912011-09-14 00:55:44 -07001309 long_jump_context->DoLongJump();
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001310}
1311
Ian Rogersbdb03912011-09-14 00:55:44 -07001312Context* Thread::GetLongJumpContext() {
Elliott Hughes85d15452011-09-16 17:33:01 -07001313 Context* result = long_jump_context_;
Ian Rogersbdb03912011-09-14 00:55:44 -07001314 if (result == NULL) {
1315 result = Context::Create();
Elliott Hughes85d15452011-09-16 17:33:01 -07001316 long_jump_context_ = result;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001317 }
Ian Rogersbdb03912011-09-14 00:55:44 -07001318 return result;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001319}
1320
Elliott Hughes5f791332011-09-15 17:45:30 -07001321bool Thread::HoldsLock(Object* object) {
1322 if (object == NULL) {
1323 return false;
1324 }
1325 return object->GetLockOwner() == thin_lock_id_;
1326}
1327
Elliott Hughes038a8062011-09-18 14:12:41 -07001328bool Thread::IsDaemon() {
1329 return gThread_daemon->GetBoolean(peer_);
1330}
1331
Elliott Hughes410c0c82011-09-01 17:58:25 -07001332void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
Elliott Hughesd369bb72011-09-12 14:41:14 -07001333 if (exception_ != NULL) {
1334 visitor(exception_, arg);
1335 }
1336 if (peer_ != NULL) {
1337 visitor(peer_, arg);
1338 }
Elliott Hughes410c0c82011-09-01 17:58:25 -07001339 jni_env_->locals.VisitRoots(visitor, arg);
1340 jni_env_->monitors.VisitRoots(visitor, arg);
1341 // visitThreadStack(visitor, thread, arg);
1342 UNIMPLEMENTED(WARNING) << "some per-Thread roots not visited";
1343}
1344
Ian Rogersb033c752011-07-20 12:22:35 -07001345static const char* kStateNames[] = {
Elliott Hughes93e74e82011-09-13 11:07:03 -07001346 "Terminated",
Ian Rogersb033c752011-07-20 12:22:35 -07001347 "Runnable",
Elliott Hughes93e74e82011-09-13 11:07:03 -07001348 "TimedWaiting",
Ian Rogersb033c752011-07-20 12:22:35 -07001349 "Blocked",
1350 "Waiting",
Elliott Hughes93e74e82011-09-13 11:07:03 -07001351 "Initializing",
1352 "Starting",
Ian Rogersb033c752011-07-20 12:22:35 -07001353 "Native",
Elliott Hughes93e74e82011-09-13 11:07:03 -07001354 "VmWait",
1355 "Suspended",
Ian Rogersb033c752011-07-20 12:22:35 -07001356};
1357std::ostream& operator<<(std::ostream& os, const Thread::State& state) {
Elliott Hughes93e74e82011-09-13 11:07:03 -07001358 int int_state = static_cast<int>(state);
1359 if (state >= Thread::kTerminated && state <= Thread::kSuspended) {
1360 os << kStateNames[int_state];
Ian Rogersb033c752011-07-20 12:22:35 -07001361 } else {
Elliott Hughes93e74e82011-09-13 11:07:03 -07001362 os << "State[" << int_state << "]";
Ian Rogersb033c752011-07-20 12:22:35 -07001363 }
1364 return os;
1365}
1366
Elliott Hughes330304d2011-08-12 14:28:05 -07001367std::ostream& operator<<(std::ostream& os, const Thread& thread) {
1368 os << "Thread[" << &thread
Elliott Hughese27955c2011-08-26 15:21:24 -07001369 << ",pthread_t=" << thread.GetImpl()
1370 << ",tid=" << thread.GetTid()
Elliott Hughesdcc24742011-09-07 14:02:44 -07001371 << ",id=" << thread.GetThinLockId()
Elliott Hughes8daa0922011-09-11 13:46:25 -07001372 << ",state=" << thread.GetState()
1373 << ",peer=" << thread.GetPeer()
1374 << "]";
Elliott Hughes330304d2011-08-12 14:28:05 -07001375 return os;
1376}
1377
Elliott Hughes8daa0922011-09-11 13:46:25 -07001378} // namespace art