blob: 4d9317f6ce3c9895086b207ffa095c1c0346a9bb [file] [log] [blame]
Sebastien Hertz8ece0502013-08-07 11:26:41 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
18#define ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
19
20#include "interpreter.h"
21
22#include <math.h>
23
24#include "base/logging.h"
25#include "class_linker-inl.h"
26#include "common_throws.h"
27#include "dex_file-inl.h"
28#include "dex_instruction-inl.h"
29#include "dex_instruction.h"
30#include "entrypoints/entrypoint_utils.h"
31#include "gc/accounting/card_table-inl.h"
32#include "invoke_arg_array_builder.h"
33#include "nth_caller_visitor.h"
34#include "mirror/art_field-inl.h"
35#include "mirror/art_method.h"
36#include "mirror/art_method-inl.h"
37#include "mirror/class.h"
38#include "mirror/class-inl.h"
39#include "mirror/object-inl.h"
40#include "mirror/object_array-inl.h"
41#include "object_utils.h"
42#include "ScopedLocalRef.h"
43#include "scoped_thread_state_change.h"
44#include "thread.h"
45#include "well_known_classes.h"
46
47using ::art::mirror::ArtField;
48using ::art::mirror::ArtMethod;
49using ::art::mirror::Array;
50using ::art::mirror::BooleanArray;
51using ::art::mirror::ByteArray;
52using ::art::mirror::CharArray;
53using ::art::mirror::Class;
54using ::art::mirror::ClassLoader;
55using ::art::mirror::IntArray;
56using ::art::mirror::LongArray;
57using ::art::mirror::Object;
58using ::art::mirror::ObjectArray;
59using ::art::mirror::ShortArray;
60using ::art::mirror::String;
61using ::art::mirror::Throwable;
62
63namespace art {
64namespace interpreter {
65
66// External references to both interpreter implementations.
67
68// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
69// specialization.
70template<bool do_access_check>
71extern JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh,
72 const DexFile::CodeItem* code_item,
73 ShadowFrame& shadow_frame, JValue result_register)
74 NO_THREAD_SAFETY_ANALYSIS __attribute__((hot));
75
76// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
77// specialization.
78template<bool do_access_check>
79extern JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh,
80 const DexFile::CodeItem* code_item,
81 ShadowFrame& shadow_frame, JValue result_register)
82 NO_THREAD_SAFETY_ANALYSIS __attribute__((hot));
83
84// Common part of both implementations.
85static const int32_t kMaxInt = std::numeric_limits<int32_t>::max();
86static const int32_t kMinInt = std::numeric_limits<int32_t>::min();
87static const int64_t kMaxLong = std::numeric_limits<int64_t>::max();
88static const int64_t kMinLong = std::numeric_limits<int64_t>::min();
89
90void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
91 const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame,
92 JValue* result, size_t arg_offset)
93 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
94
95static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
96 ref->MonitorEnter(self);
97}
98
99static inline void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
100 ref->MonitorExit(self);
101}
102
103// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
104// specialization.
105template<InvokeType type, bool is_range, bool do_access_check>
106bool DoInvoke(Thread* self, ShadowFrame& shadow_frame,
107 const Instruction* inst, JValue* result) NO_THREAD_SAFETY_ANALYSIS;
108
109// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
110// specialization.
111template<bool is_range>
112bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
113 const Instruction* inst, JValue* result)
114 NO_THREAD_SAFETY_ANALYSIS;
115
116// We use template functions to optimize compiler inlining process. Otherwise,
117// some parts of the code (like a switch statement) which depend on a constant
118// parameter would not be inlined while it should be. These constant parameters
119// are now part of the template arguments.
120// Note these template functions are static and inlined so they should not be
121// part of the final object file.
122// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
123// specialization.
124template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
125static bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
126 const Instruction* inst)
127 NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
128
129template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
130static inline bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
131 const Instruction* inst) {
132 bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
133 uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
134 ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
135 find_type, Primitive::FieldSize(field_type),
136 do_access_check);
137 if (UNLIKELY(f == NULL)) {
138 CHECK(self->IsExceptionPending());
139 return false;
140 }
141 Object* obj;
142 if (is_static) {
143 obj = f->GetDeclaringClass();
144 } else {
145 obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
146 if (UNLIKELY(obj == NULL)) {
147 ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true);
148 return false;
149 }
150 }
151 uint32_t vregA = is_static ? inst->VRegA_21c() : inst->VRegA_22c();
152 switch (field_type) {
153 case Primitive::kPrimBoolean:
154 shadow_frame.SetVReg(vregA, f->GetBoolean(obj));
155 break;
156 case Primitive::kPrimByte:
157 shadow_frame.SetVReg(vregA, f->GetByte(obj));
158 break;
159 case Primitive::kPrimChar:
160 shadow_frame.SetVReg(vregA, f->GetChar(obj));
161 break;
162 case Primitive::kPrimShort:
163 shadow_frame.SetVReg(vregA, f->GetShort(obj));
164 break;
165 case Primitive::kPrimInt:
166 shadow_frame.SetVReg(vregA, f->GetInt(obj));
167 break;
168 case Primitive::kPrimLong:
169 shadow_frame.SetVRegLong(vregA, f->GetLong(obj));
170 break;
171 case Primitive::kPrimNot:
172 shadow_frame.SetVRegReference(vregA, f->GetObject(obj));
173 break;
174 default:
175 LOG(FATAL) << "Unreachable: " << field_type;
176 }
177 return true;
178}
179
180// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
181// specialization.
182template<Primitive::Type field_type>
183static bool DoIGetQuick(Thread* self, ShadowFrame& shadow_frame,
184 const Instruction* inst)
185 NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
186
187template<Primitive::Type field_type>
188static inline bool DoIGetQuick(Thread* self, ShadowFrame& shadow_frame,
189 const Instruction* inst) {
190 Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
191 if (UNLIKELY(obj == NULL)) {
192 // We lost the reference to the field index so we cannot get a more
193 // precised exception message.
194 ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
195 return false;
196 }
197 MemberOffset field_offset(inst->VRegC_22c());
198 const bool is_volatile = false; // iget-x-quick only on non volatile fields.
199 const uint32_t vregA = inst->VRegA_22c();
200 switch (field_type) {
201 case Primitive::kPrimInt:
202 shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetField32(field_offset, is_volatile)));
203 break;
204 case Primitive::kPrimLong:
205 shadow_frame.SetVRegLong(vregA, static_cast<int64_t>(obj->GetField64(field_offset, is_volatile)));
206 break;
207 case Primitive::kPrimNot:
208 shadow_frame.SetVRegReference(vregA, obj->GetFieldObject<mirror::Object*>(field_offset, is_volatile));
209 break;
210 default:
211 LOG(FATAL) << "Unreachable: " << field_type;
212 }
213 return true;
214}
215
216// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
217// specialization.
218template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
219static bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
220 const Instruction* inst)
221 NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
222
223template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
224static inline bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
225 const Instruction* inst) {
226 bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
227 uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
228 ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
229 find_type, Primitive::FieldSize(field_type),
230 do_access_check);
231 if (UNLIKELY(f == NULL)) {
232 CHECK(self->IsExceptionPending());
233 return false;
234 }
235 Object* obj;
236 if (is_static) {
237 obj = f->GetDeclaringClass();
238 } else {
239 obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
240 if (UNLIKELY(obj == NULL)) {
241 ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(),
242 f, false);
243 return false;
244 }
245 }
246 uint32_t vregA = is_static ? inst->VRegA_21c() : inst->VRegA_22c();
247 switch (field_type) {
248 case Primitive::kPrimBoolean:
249 f->SetBoolean(obj, shadow_frame.GetVReg(vregA));
250 break;
251 case Primitive::kPrimByte:
252 f->SetByte(obj, shadow_frame.GetVReg(vregA));
253 break;
254 case Primitive::kPrimChar:
255 f->SetChar(obj, shadow_frame.GetVReg(vregA));
256 break;
257 case Primitive::kPrimShort:
258 f->SetShort(obj, shadow_frame.GetVReg(vregA));
259 break;
260 case Primitive::kPrimInt:
261 f->SetInt(obj, shadow_frame.GetVReg(vregA));
262 break;
263 case Primitive::kPrimLong:
264 f->SetLong(obj, shadow_frame.GetVRegLong(vregA));
265 break;
266 case Primitive::kPrimNot:
267 f->SetObj(obj, shadow_frame.GetVRegReference(vregA));
268 break;
269 default:
270 LOG(FATAL) << "Unreachable: " << field_type;
271 }
272 return true;
273}
274
275// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
276// specialization.
277template<Primitive::Type field_type>
278static bool DoIPutQuick(Thread* self, ShadowFrame& shadow_frame,
279 const Instruction* inst)
280 NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
281
282template<Primitive::Type field_type>
283static inline bool DoIPutQuick(Thread* self, ShadowFrame& shadow_frame,
284 const Instruction* inst) {
285 Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
286 if (UNLIKELY(obj == NULL)) {
287 // We lost the reference to the field index so we cannot get a more
288 // precised exception message.
289 ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
290 return false;
291 }
292 MemberOffset field_offset(inst->VRegC_22c());
293 const bool is_volatile = false; // iput-x-quick only on non volatile fields.
294 const uint32_t vregA = inst->VRegA_22c();
295 switch (field_type) {
296 case Primitive::kPrimInt:
297 obj->SetField32(field_offset, shadow_frame.GetVReg(vregA), is_volatile);
298 break;
299 case Primitive::kPrimLong:
300 obj->SetField64(field_offset, shadow_frame.GetVRegLong(vregA), is_volatile);
301 break;
302 case Primitive::kPrimNot:
303 obj->SetFieldObject(field_offset, shadow_frame.GetVRegReference(vregA), is_volatile);
304 break;
305 default:
306 LOG(FATAL) << "Unreachable: " << field_type;
307 }
308 return true;
309}
310
311static inline String* ResolveString(Thread* self, MethodHelper& mh, uint32_t string_idx)
312 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
313 Class* java_lang_string_class = String::GetJavaLangString();
314 if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
315 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
316 if (UNLIKELY(!class_linker->EnsureInitialized(java_lang_string_class,
317 true, true))) {
318 DCHECK(self->IsExceptionPending());
319 return NULL;
320 }
321 }
322 return mh.ResolveString(string_idx);
323}
324
325static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
326 int32_t dividend, int32_t divisor)
327 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
328 if (UNLIKELY(divisor == 0)) {
329 ThrowArithmeticExceptionDivideByZero();
330 return false;
331 }
332 if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
333 shadow_frame.SetVReg(result_reg, kMinInt);
334 } else {
335 shadow_frame.SetVReg(result_reg, dividend / divisor);
336 }
337 return true;
338}
339
340static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
341 int32_t dividend, int32_t divisor)
342 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
343 if (UNLIKELY(divisor == 0)) {
344 ThrowArithmeticExceptionDivideByZero();
345 return false;
346 }
347 if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
348 shadow_frame.SetVReg(result_reg, 0);
349 } else {
350 shadow_frame.SetVReg(result_reg, dividend % divisor);
351 }
352 return true;
353}
354
355static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
356 int64_t dividend, int64_t divisor)
357 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
358 if (UNLIKELY(divisor == 0)) {
359 ThrowArithmeticExceptionDivideByZero();
360 return false;
361 }
362 if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
363 shadow_frame.SetVRegLong(result_reg, kMinLong);
364 } else {
365 shadow_frame.SetVRegLong(result_reg, dividend / divisor);
366 }
367 return true;
368}
369
370static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
371 int64_t dividend, int64_t divisor)
372 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
373 if (UNLIKELY(divisor == 0)) {
374 ThrowArithmeticExceptionDivideByZero();
375 return false;
376 }
377 if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
378 shadow_frame.SetVRegLong(result_reg, 0);
379 } else {
380 shadow_frame.SetVRegLong(result_reg, dividend % divisor);
381 }
382 return true;
383}
384
385// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
386// specialization.
387// Returns true on success, otherwise throws an exception and returns false.
388template <bool is_range, bool do_access_check>
389bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
390 Thread* self, JValue* result) NO_THREAD_SAFETY_ANALYSIS;
391
392static inline int32_t DoPackedSwitch(const Instruction* inst,
393 const ShadowFrame& shadow_frame)
394 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
395 DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
396 const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
397 int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t());
398 DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
399 uint16_t size = switch_data[1];
400 DCHECK_GT(size, 0);
401 const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
402 DCHECK(IsAligned<4>(keys));
403 int32_t first_key = keys[0];
404 const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
405 DCHECK(IsAligned<4>(targets));
406 int32_t index = test_val - first_key;
407 if (index >= 0 && index < size) {
408 return targets[index];
409 } else {
410 // No corresponding value: move forward by 3 (size of PACKED_SWITCH).
411 return 3;
412 }
413}
414
415static inline int32_t DoSparseSwitch(const Instruction* inst,
416 const ShadowFrame& shadow_frame)
417 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
418 DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
419 const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
420 int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t());
421 DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
422 uint16_t size = switch_data[1];
423 DCHECK_GT(size, 0);
424 const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
425 DCHECK(IsAligned<4>(keys));
426 const int32_t* entries = keys + size;
427 DCHECK(IsAligned<4>(entries));
428 int lo = 0;
429 int hi = size - 1;
430 while (lo <= hi) {
431 int mid = (lo + hi) / 2;
432 int32_t foundVal = keys[mid];
433 if (test_val < foundVal) {
434 hi = mid - 1;
435 } else if (test_val > foundVal) {
436 lo = mid + 1;
437 } else {
438 return entries[mid];
439 }
440 }
441 // No corresponding value: move forward by 3 (size of SPARSE_SWITCH).
442 return 3;
443}
444
445static inline uint32_t FindNextInstructionFollowingException(Thread* self,
446 ShadowFrame& shadow_frame,
447 uint32_t dex_pc,
448 SirtRef<Object>& this_object_ref,
449 instrumentation::Instrumentation* instrumentation)
450 ALWAYS_INLINE;
451
452static inline uint32_t FindNextInstructionFollowingException(Thread* self,
453 ShadowFrame& shadow_frame,
454 uint32_t dex_pc,
455 SirtRef<Object>& this_object_ref,
456 instrumentation::Instrumentation* instrumentation)
457 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
458 self->VerifyStack();
459 ThrowLocation throw_location;
460 mirror::Throwable* exception = self->GetException(&throw_location);
461 bool clear_exception;
462 uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), dex_pc,
463 &clear_exception);
464 if (found_dex_pc == DexFile::kDexNoIndex) {
465 instrumentation->MethodUnwindEvent(self, this_object_ref.get(),
466 shadow_frame.GetMethod(), dex_pc);
467 } else {
468 instrumentation->ExceptionCaughtEvent(self, throw_location,
469 shadow_frame.GetMethod(),
470 found_dex_pc, exception);
471 if (clear_exception) {
472 self->ClearException();
473 }
474 }
475 return found_dex_pc;
476}
477
478static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
479 __attribute__((cold, noreturn, noinline));
480
481static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
482 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
483 LOG(FATAL) << "Unexpected instruction: " << inst->DumpString(&mh.GetDexFile());
484 exit(0); // Unreachable, keep GCC happy.
485}
486
487static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
488 const uint32_t dex_pc, MethodHelper& mh) {
489 const bool kTracing = false;
490 if (kTracing) {
491#define TRACE_LOG std::cerr
492 TRACE_LOG << PrettyMethod(shadow_frame.GetMethod())
493 << StringPrintf("\n0x%x: ", dex_pc)
494 << inst->DumpString(&mh.GetDexFile()) << "\n";
495 for (size_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
496 uint32_t raw_value = shadow_frame.GetVReg(i);
497 Object* ref_value = shadow_frame.GetVRegReference(i);
498 TRACE_LOG << StringPrintf(" vreg%d=0x%08X", i, raw_value);
499 if (ref_value != NULL) {
500 if (ref_value->GetClass()->IsStringClass() &&
501 ref_value->AsString()->GetCharArray() != NULL) {
502 TRACE_LOG << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
503 } else {
504 TRACE_LOG << "/" << PrettyTypeOf(ref_value);
505 }
506 }
507 }
508 TRACE_LOG << "\n";
509#undef TRACE_LOG
510 }
511}
512
513} // namespace interpreter
514} // namespace art
515
516#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_